This commit is contained in:
2022-12-30 20:52:41 +08:00
parent f933400535
commit d3a950b54d
47 changed files with 10444 additions and 2 deletions

82
Cargo.toml Normal file
View File

@@ -0,0 +1,82 @@
[package]
name = "webdav-handler"
# When releasing to crates.io:
# - Update html_root_url in src/lib.rs
# - Update CHANGELOG.md.
# - Run ./generate-readme
# - Create git tag v0.x.y
version = "0.2.0"
readme = "README.md"
description = "handler for the HTTP and Webdav protocols with filesystem backend"
documentation = "https://docs.rs/webdav-handler"
repository = "https://github.com/miquels/webdav-handler-rs"
homepage = "https://github.com/miquels/webdav-handler-rs"
authors = ["Miquel van Smoorenburg <mike@langeraar.net>"]
edition = "2018"
license = "Apache-2.0"
keywords = ["webdav"]
categories = ["web-programming"]
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[package.metadata.playground]
features = ["full"]
[lib]
name = "webdav_handler"
path = "src/lib.rs"
[features]
default = []
actix-compat = [ "actix-web" ]
warp-compat = [ "warp", "hyper" ]
all = [ "actix-compat", "warp-compat" ]
[[example]]
name = "actix"
required-features = [ "actix-compat" ]
[[example]]
name = "warp"
required-features = [ "warp-compat" ]
[dependencies]
bytes = "1.0.1"
futures = "0.3.9"
handlebars = "3.5.0"
headers = "0.3.0"
htmlescape = "0.3.1"
http = "0.2.3"
http-body = "0.4.0"
lazy_static = "1.4.0"
libc = "0.2.0"
log = "0.4.0"
lru = "0.6.0"
mime_guess = "2.0.0"
parking_lot = "0.11.1"
percent-encoding = "1.0.1"
pin-project = "1.0.4"
pin-utils = "0.1.0"
regex = "1.4.0"
tokio = { version = "1.3.0", features = [ "rt-multi-thread", "io-util", "net", "time", "sync" ] }
time = { version = "0.2.24", default-features = false }
url = "2.2.0"
uuid = { version = "0.8.0", features = ["v4"] }
xml-rs = "0.8.0"
xmltree = "0.10.0"
hyper = {version = "0.14.0", optional = true }
warp = { version = "0.3.0", optional = true }
#actix-web = { version = "3.3.2", optional = true }
actix-web = { version = "4.0.0-beta.6", optional = true }
[dev-dependencies]
clap = "2.33.0"
env_logger = "0.8.0"
hyper = { version = "0.14.0", features = [ "http1", "http2", "server", "stream", "runtime" ] }
tokio = { version = "1.3.0", features = ["full"] }

149
README.litmus-test.md Normal file
View File

@@ -0,0 +1,149 @@
# Webdav protocol compliance.
The standard for webdav compliance testing is [`litmus`](http://www.webdav.org/neon/litmus/),
which is available at [http://www.webdav.org/neon/litmus/](http://www.webdav.org/neon/litmus/).
Building it:
```
curl -O http://www.webdav.org/neon/litmus/litmus-0.13.tar.gz
tar xf litmus-0.13.tar.gz
cd litmus-0.13
./configure
make
```
Then run the test server (`sample-litmus-server`). For some tests, `litmus`
assumes that it is using basic authentication, so you must run the server
with the `--auth` flag.
```
cd webdav-handler-rs
cargo run --example sample-litmus-server -- --memfs --auth
```
You do not have to install the litmus binary, it's possible to run the tests
straight from the unpacked & compiled litmus directory (`someuser` and
`somepass` are literal, you do not have to put a real username/password there):
```
$ cd litmus-0.13
$ TESTS="http basic copymove locks props" HTDOCS=htdocs TESTROOT=. ./litmus http://localhost:4918/ someuser somepass
-> running `http':
0. init.................. pass
1. begin................. pass
2. expect100............. pass
3. finish................ pass
<- summary for `http': of 4 tests run: 4 passed, 0 failed. 100.0%
-> running `basic':
0. init.................. pass
1. begin................. pass
2. options............... pass
3. put_get............... pass
4. put_get_utf8_segment.. pass
5. put_no_parent......... pass
6. mkcol_over_plain...... pass
7. delete................ pass
8. delete_null........... pass
9. delete_fragment....... WARNING: DELETE removed collection resource with Request-URI including fragment; unsafe
...................... pass (with 1 warning)
10. mkcol................. pass
11. mkcol_again........... pass
12. delete_coll........... pass
13. mkcol_no_parent....... pass
14. mkcol_with_body....... pass
15. finish................ pass
<- summary for `basic': of 16 tests run: 16 passed, 0 failed. 100.0%
-> 1 warning was issued.
-> running `copymove':
0. init.................. pass
1. begin................. pass
2. copy_init............. pass
3. copy_simple........... pass
4. copy_overwrite........ pass
5. copy_nodestcoll....... pass
6. copy_cleanup.......... pass
7. copy_coll............. pass
8. copy_shallow.......... pass
9. move.................. pass
10. move_coll............. pass
11. move_cleanup.......... pass
12. finish................ pass
<- summary for `copymove': of 13 tests run: 13 passed, 0 failed. 100.0%
-> running `locks':
0. init.................. pass
1. begin................. pass
2. options............... pass
3. precond............... pass
4. init_locks............ pass
5. put................... pass
6. lock_excl............. pass
7. discover.............. pass
8. refresh............... pass
9. notowner_modify....... pass
10. notowner_lock......... pass
11. owner_modify.......... pass
12. notowner_modify....... pass
13. notowner_lock......... pass
14. copy.................. pass
15. cond_put.............. pass
16. fail_cond_put......... pass
17. cond_put_with_not..... pass
18. cond_put_corrupt_token pass
19. complex_cond_put...... pass
20. fail_complex_cond_put. pass
21. unlock................ pass
22. fail_cond_put_unlocked pass
23. lock_shared........... pass
24. notowner_modify....... pass
25. notowner_lock......... pass
26. owner_modify.......... pass
27. double_sharedlock..... pass
28. notowner_modify....... pass
29. notowner_lock......... pass
30. unlock................ pass
31. prep_collection....... pass
32. lock_collection....... pass
33. owner_modify.......... pass
34. notowner_modify....... pass
35. refresh............... pass
36. indirect_refresh...... pass
37. unlock................ pass
38. unmapped_lock......... pass
39. unlock................ pass
40. finish................ pass
<- summary for `locks': of 41 tests run: 41 passed, 0 failed. 100.0%
-> running `props':
0. init.................. pass
1. begin................. pass
2. propfind_invalid...... pass
3. propfind_invalid2..... pass
4. propfind_d0........... pass
5. propinit.............. pass
6. propset............... pass
7. propget............... pass
8. propextended.......... pass
9. propmove.............. pass
10. propget............... pass
11. propdeletes........... pass
12. propget............... pass
13. propreplace........... pass
14. propget............... pass
15. propnullns............ pass
16. propget............... pass
17. prophighunicode....... pass
18. propget............... pass
19. propremoveset......... pass
20. propget............... pass
21. propsetremove......... pass
22. propget............... pass
23. propvalnspace......... pass
24. propwformed........... pass
25. propinit.............. pass
26. propmanyns............ pass
27. propget............... pass
28. propcleanup........... pass
29. finish................ pass
<- summary for `props': of 30 tests run: 30 passed, 0 failed. 100.0%
```

149
README.md
View File

@@ -1,3 +1,148 @@
# webdav-handler-rs
# webdav-handler
webdav-handler-rs
[![Apache-2.0 licensed](https://img.shields.io/badge/license-Apache2.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.txt)
[![crates.io](https://meritbadge.herokuapp.com/webdav-handler)](https://crates.io/crates/webdav-handler)
[![Released API docs](https://docs.rs/webdav-handler/badge.svg)](https://docs.rs/webdav-handler)
### Generic async HTTP/Webdav handler
[`Webdav`] (RFC4918) is defined as
HTTP (GET/HEAD/PUT/DELETE) plus a bunch of extension methods (PROPFIND, etc).
These extension methods are used to manage collections (like unix directories),
get information on collections (like unix `ls` or `readdir`), rename and
copy items, lock/unlock items, etc.
A `handler` is a piece of code that takes a `http::Request`, processes it in some
way, and then generates a `http::Response`. This library is a `handler` that maps
the HTTP/Webdav protocol to the filesystem. Or actually, "a" filesystem. Included
is an adapter for the local filesystem (`localfs`), and an adapter for an
in-memory filesystem (`memfs`).
So this library can be used as a handler with HTTP servers like [hyper],
[warp], [actix-web], etc. Either as a correct and complete HTTP handler for
files (GET/HEAD) or as a handler for the entire Webdav protocol. In the latter case, you can
mount it as a remote filesystem: Linux, Windows, macOS can all mount Webdav filesystems.
### Backend interfaces.
The backend interfaces are similar to the ones from the Go `x/net/webdav package`:
- the library contains a [HTTP handler][DavHandler].
- you supply a [filesystem][DavFileSystem] for backend storage, which can optionally
implement reading/writing [DAV properties][DavProp].
- you can supply a [locksystem][DavLockSystem] that handles webdav locks.
The handler in this library works with the standard http types
from the `http` and `http_body` crates. That means that you can use it
straight away with http libraries / frameworks that also work with
those types, like hyper. Compatibility modules for [actix-web][actix-compat]
and [warp][warp-compat] are also provided.
### Implemented standards.
Currently [passes the "basic", "copymove", "props", "locks" and "http"
checks][README_litmus] of the Webdav Litmus Test testsuite. That's all of the base
[RFC4918] webdav specification.
The litmus test suite also has tests for RFC3744 "acl" and "principal",
RFC5842 "bind", and RFC3253 "versioning". Those we do not support right now.
The relevant parts of the HTTP RFCs are also implemented, such as the
preconditions (If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since,
If-Range), partial transfers (Range).
Also implemented is `partial PUT`, for which there are currently two
non-standard ways to do it: [`PUT` with the `Content-Range` header][PUT],
which is what Apache's `mod_dav` implements, and [`PATCH` with the `X-Update-Range`
header][PATCH] from `SabreDav`.
### Backends.
Included are two filesystems:
- [`LocalFs`]: serves a directory on the local filesystem
- [`MemFs`]: ephemeral in-memory filesystem. supports DAV properties.
Also included are two locksystems:
- [`MemLs`]: ephemeral in-memory locksystem.
- [`FakeLs`]: fake locksystem. just enough LOCK/UNLOCK support for macOS/Windows.
### Example.
Example server using [hyper] that serves the /tmp directory in r/w mode. You should be
able to mount this network share from Linux, macOS and Windows. [Examples][examples]
for other frameworks are also available.
```rust
use std::convert::Infallible;
use webdav_handler::{fakels::FakeLs, localfs::LocalFs, DavHandler};
#[tokio::main]
async fn main() {
let dir = "/tmp";
let addr = ([127, 0, 0, 1], 4918).into();
let dav_server = DavHandler::builder()
.filesystem(LocalFs::new(dir, false, false, false))
.locksystem(FakeLs::new())
.build_handler();
let make_service = hyper::service::make_service_fn(move |_| {
let dav_server = dav_server.clone();
async move {
let func = move |req| {
let dav_server = dav_server.clone();
async move {
Ok::<_, Infallible>(dav_server.handle(req).await)
}
};
Ok::<_, Infallible>(hyper::service::service_fn(func))
}
});
println!("Serving {} on {}", dir, addr);
let _ = hyper::Server::bind(&addr)
.serve(make_service)
.await
.map_err(|e| eprintln!("server error: {}", e));
}
```
[DavHandler]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/struct.DavHandler.html
[DavFileSystem]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/fs/index.html
[DavLockSystem]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/ls/index.html
[DavProp]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/fs/struct.DavProp.html
[`WebDav`]: https://tools.ietf.org/html/rfc4918
[RFC4918]: https://tools.ietf.org/html/rfc4918
[`MemLs`]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/memls/index.html
[`MemFs`]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/memfs/index.html
[`LocalFs`]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/localfs/index.html
[`FakeLs`]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/fakels/index.html
[actix-compat]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/actix/index.html
[warp-compat]: https://docs.rs/webdav-handler/0.2.0/webdav_handler/warp/index.html
[README_litmus]: https://github.com/miquels/webdav-handler-rs/blob/master/README.litmus-test.md
[examples]: https://github.com/miquels/webdav-handler-rs/tree/master/examples/
[PUT]: https://github.com/miquels/webdav-handler-rs/tree/master/doc/Apache-PUT-with-Content-Range.md
[PATCH]: https://github.com/miquels/webdav-handler-rs/tree/master/doc/SABREDAV-partialupdate.md
[hyper]: https://hyper.rs/
[warp]: https://crates.io/crates/warp
[actix-web]: https://actix.rs/
### Building.
This crate uses std::future::Future and async/await, so it only works with Rust 1.39 and up.
### Testing.
```
RUST_LOG=webdav_handler=debug cargo run --example sample-litmus-server
```
This will start a server on port 4918, serving an in-memory filesystem.
For other options, run `cargo run --example sample-litmus-server -- --help`
### Copyright and License.
* © 2018, 2019, 2020 XS4ALL Internet bv
* © 2018, 2019, 2020 Miquel van Smoorenburg
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)

27
README.tpl Normal file
View File

@@ -0,0 +1,27 @@
# {{crate}}
[![Apache-2.0 licensed](https://img.shields.io/badge/license-Apache2.0-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0.txt)
[![crates.io](https://meritbadge.herokuapp.com/webdav-handler)](https://crates.io/crates/webdav-handler)
[![Released API docs](https://docs.rs/webdav-handler/badge.svg)](https://docs.rs/webdav-handler)
{{readme}}
### Building.
This crate uses std::future::Future and async/await, so it only works with Rust 1.39 and up.
### Testing.
```
RUST_LOG=webdav_handler=debug cargo run --example sample-litmus-server
```
This will start a server on port 4918, serving an in-memory filesystem.
For other options, run `cargo run --example sample-litmus-server -- --help`
### Copyright and License.
* © 2018, 2019, 2020 XS4ALL Internet bv
* © 2018, 2019, 2020 Miquel van Smoorenburg
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)

108
TODO.md Normal file
View File

@@ -0,0 +1,108 @@
# TODO list
## Protocol compliance
### Apply all headers
The RFC says that for COPY/MOVE/DELETE with Depth: Infinity all headers
must be applied to all resources. For example, in RFC4918 9.6.1:
```
Any headers included with DELETE MUST be applied in processing every resource to be deleted.
```
Currently we do not do this- we do apply the If-Match, If-None-Match, If-Modified-Since,
If-Unmodified-Since, and If headers to the request url, but not recursively.
### Props on symbolic links
Should probably disallow that
### In MOVE/DELETE test locks seperately per resource
Right now we check if we hold the locks (if any) for the request url, and paths
below it for Depth: Infinity requests. If we don't, the entire request fails. We
should really check that for every resource to be MOVEd/DELETEd seperately
and only fail those resources.
This does mean that we cannot MOVE a collection by doing a simple rename, we must
do it resource-per-resource, like COPY.
## Race conditions
During long-running requests like MOVE/COPY/DELETE we should really LOCK the resource
so that no other request can race us.
Actually, check if this is true. Isn't the webdav client responsible for this?
Anyway:
- if the resource is locked exclusively and we hold the lock- great, nothing to do
- otherwise:
- lock the request URL exclusively (unless already locked exclusively), Depth: infinite,
_without checking if any other locks already exist_. This is a temporary lock.
- now check if we actually can lock the request URL and paths below
- if not, unlock, error
- go ahead and do the work
- unlock
The temporary lock should probably have a timeout of say 10 seconds, where we
refresh it every 5 seconds or so, so that a stale lock doesn't hang around
too long if something goes catastrophically wrong. Might only happen when
the lock database is seperate from the webdav server.
## Improvements:
- Do fake locking only for user-agents:
- /WebDAVFS/ // Apple
- /Microsoft Office OneNote 2013/' // MS
- /^Microsoft-WebDAV/ // MS
this is the list that NextCloud uses for fake locking.
probably (WebDAVFS|Microsoft) would do the trick.
- API: perhaps move filesystem interface to Path/PathBuf or similar and hide WebPath
- add documentation
- add tests, tests ...
## Project ideas:
- Add support for properties to localfs.rs on XFS. XFS has unlimited and
scalable extended attributes. ext2/3/4 can store max 4KB. On XFS we can
then also store creationdate in an attribute.
- Add support for changing live props like mtime/atime
- atime could be done with Win32LastAccessTime
- allow setting apache "executable" prop
- it appears that there are webdav implementations that allow
you to set "DAV:getcontentlength".
- we could support (at least return) some Win32FileAttributes:
- readonly: 00000001 (unix mode)
- hidden: 00000002 (if file starts with a "."
- dir: 00000010
- file: 00000020
readonly on dirs means "all files in the directory" so that
is best not implemented.
- allow setting of some windows live props:
- readonly (on files, via chmod)
- Win32LastAccessTime, Win32LastModifiedTime
- implement [RFC4437 Webdav Redirectref](https://tools.ietf.org/html/rfc4437) -- basically support for symbolic links
- implement [RFC3744 Webdac ACL](https://tools.ietf.org/html/rfc3744)
## Things I thought of but aren't going to work:
### Compression
- support for compressing responses, at least PROPFIND.
- support for compressed PUT requests
Nice, but no webdav client that I know of uses compression.

9
attic/README.md Normal file
View File

@@ -0,0 +1,9 @@
ATTIC
=====
Temporarily moved the actix-web example here. It's broken.
Most things have been fixed, the only thing that remains is we need
to find a way to spawn the I/O requests on Actix's threadpool.

46
doc/APPLE-Finder-hints.md Normal file
View File

@@ -0,0 +1,46 @@
# APPLE-FINDER-HINTS
The Apple Finder (and other subsystems) seem to probe for a few
files at the root of the filesystems to get a hint about the
behaviour they should show processing this filesystem.
It also looks for files with extra localization information in
every directory, and for resource fork data (the `._` files).
## FILES
- `.metadata_never_index`
prevents the system from indexing all of the data
- `.ql_disablethumbnails`
prevent the system from downloading all files that look like an
image or a video to create a thumbnail
- `.ql_disablecache`
not really sure but it sounds useful
The `.ql_` files are configuration for the "QuickLook" functionality
of the Finder.
The `.metadata_never_index` file appears to be a hint for the
Spotlight indexing system.
Additionally, the Finder probes for a `.localized` file in every
directory it encounters, and it does a PROPSTAT for every file
in the directory prefixed with `._`.
## OPTIMIZATIONS
For a macOS client we return the metadata for a zero-sized file if it
does a PROPSTAT of `/.metadata_never_index` or `/.ql_disablethumbnails`.
We always return a 404 Not Found for a PROPSTAT of any `.localized` file.
Furthermore, we disallow moving, removing etc of those files. The files
do not show up in a PROPSTAT of the rootdirectory.
If a PROPFIND with `Depth: 1` is done on a directory, we add the
directory pathname to an LRU cache, and the pathname of each file of
which the name starts with `._`. Since we then know which `._` files
exist, it is easy to return a fast 404 for PROPSTAT request for `._`
files that do not exist. The cache is kept consistent by checking
the timestamp on the parent directory, and a timeout.

57
doc/APPLE-doubleinfo.md Normal file
View File

@@ -0,0 +1,57 @@
# APPLEDOUBLEINFO
Normally, after asking for a directory listing (using PROPFIND with Depth: 1)
the macOS Finder will send a PROPFIND request for every file in the
directory, prefixed with ".\_". Even though it just got a complete directory
listing which doesn't list those files.
An optimization the Apple iDisk service makes, is that it sometimes
synthesizes those info files ahead of time. It then lists those synthesized
files in the PROPFIND response together with the <appledoubleheader> propery,
which is the contents of the ".\_file" (if it would be present) in base64.
It appears to only do this when the appledoubleinfo data is completely
basic and is 82 bytes of size.
This prevents the webdav clients from launching an additional PROPFIND
request for every file prefixed with ".\_".
Note that you cannot add an <appledoubleheader> propery to a PROPSTAT
element of a "file" itself, that's ignored, alas. macOS only accepts
it on ".\_" files.
There is not much information about this, but an Apple engineer mentioned it in
https://lists.apple.com/archives/filesystem-dev/2009/Feb/msg00013.html
There is a default "empty"-like response for a file that I found at
https://github.com/DanRohde/webdavcgi/blob/master/lib/perl/WebDAV/Properties.pm
So, what we _could_ do (but don't, yet) to optimize the macOS webdav client,
when we reply to PROPFIND:
- for each file that does NOT have a ".\_file" present
- we synthesize a virtual response
- for a virtual file with name ".\_file
- with size: 82 bytes
- that contains:
<DAV:prop xmlns:S="http://www.apple.com/webdav\_fs/props/">
<S:appledoubleheader>
AAUWBwACAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAACAAAAJgAAACwAAAAJAAAAMgAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==
</S:appledoubleheader>
The contents of this base64 string are explained at
https://github.com/DanRohde/webdavcgi/blob/master/lib/perl/WebDAV/Properties.pm
... and they are:
```
appledoubleheader: Magic(4) Version(4) Filler(16) EntryCout(2)
EntryDescriptor(id:4(2:resource fork),offset:4,length:4)
EntryDescriptor(id:9 finder)... Finder Info(16+16)
namespace: http://www.apple.com/webdav\_fs/props/
content: MIME::Base64(pack('H\*', '00051607'. '00020000' . ( '00' x 16 ) .
'0002'. '00000002'. '00000026' . '0000002C'.'00000009'. '00000032' . '00000020' .
('00' x 32) ))
```

View File

@@ -0,0 +1,85 @@
# HTTP PUT-with-Content-Range support.
The [mod_dav](https://httpd.apache.org/docs/2.4/mod/mod_dav.html) module of
the [Apache web server](https://httpd.apache.org/) was one of the first
implementations of [Webdav](https://tools.ietf.org/html/rfc4918). Ever since
the first released version, it has had support for partial uploads using
the Content-Range header with PUT requests.
## A sample request
```text
PUT /file.txt
Content-Length: 4
Content-Range: bytes 3-6/*
ABCD
```
This request updates 'file.txt', specifically the bytes 3-6 (inclusive) to
`ABCD`.
There is no explicit support for appending to a file, that is simply done
by writing just past the end of a file. For example, if a file has size
1000, and you want to append 4 bytes:
```text
PUT /file.txt
Content-Length: 4
Content-Range: bytes 1000-1003/*
1234
```
## Apache `mod_dav` behaviour:
- The `Content-Range` header is required, and the syntax is `bytes START-END/LENGTH`.
- END must be bigger than or equal to START.
- LENGTH is parsed by Apache mod_dav, and it must either be a valid number
or a `*` (star), but mod_dav otherwise ignores it. Since it is not clearly
defined what LENGTH should be, always use `*`.
- Neither the start, nor the end-byte have to be within the file's current size.
- If the start-byte is beyond the file's current length, the space in between
will be filled with NULL bytes (`0x00`).
## Notes
- `bytes<space>`, _not_ `bytes=`.
- The `Content-Length` header is not required by the original Apache mod_dav
implementation. The body must either have a valid Content-Length, or it must
use the `Chunked` transfer encoding. It is *strongly encouraged* though to
include Content-Length, so that it can be validated against the range before
accepting the PUT request.
- If the `Content-Length` header is present, its value must be equal
to `END - START + 1`.
## Status codes
### The following status codes are used:
Status code | Reason
----------- | ------
200 or 204 | When the operation was successful
400 | Invalid `Content-Range` header
416 | If there was something wrong with the bytes, such as a `Content-Length` not matching with what was sent as the start and end bytes, or an end byte being lower than the start byte.
501 | Content-Range header present, but not supported.
## RECKOGNIZING PUT-with-Content-Range support (client).
There is no official way to know if PUT-with-content-range is supported by
a webserver. For a client it's probably best to do an OPTIONS request,
and then check two things:
- the `Server` header must contain the word `Apache`
- the `DAV` header must contain `<http://apache.org/dav/propset/fs/1>`.
In that case, your are sure to talk to an Apache webserver with mod_dav enabled.
## IMPLEMENTING PUT-with-Content-Range support (server).
Don't. Implement [sabredav-partialupdate](SABREDAV-partialupdate.md).
## MORE INFORMATION.
https://blog.sphere.chronosempire.org.uk/2012/11/21/webdav-and-the-http-patch-nightmare

View File

@@ -0,0 +1,108 @@
# HTTP PATCH support
This is a markdown translation of the document at
[http://sabre.io/dav/http-patch/](http://sabre.io/dav/http-patch/)
[© 2018 fruux GmbH](https://fruux.com/)
The `Sabre\\DAV\\PartialUpdate\\Plugin` from the Sabre DAV library provides
support for the HTTP PATCH method [RFC5789](http://tools.ietf.org/html/rfc5789).
This allows you to update just a portion of a file, or append to a file.
This document can be used as a spec for other implementors. There is some
DAV-specific stuff in this document, but only in relation to the OPTIONS
request.
## A sample request
```
PATCH /file.txt
Content-Length: 4
Content-Type: application/x-sabredav-partialupdate
X-Update-Range: bytes=3-6
ABCD
```
This request updates 'file.txt', specifically the bytes 3-6 (inclusive) to
`ABCD`.
If you just want to append to an existing file, use the following syntax:
```
PATCH /file.txt
Content-Length: 4
Content-Type: application/x-sabredav-partialupdate
X-Update-Range: append
1234
```
The last request adds 4 bytes to the bottom of the file.
## The rules
- The `Content-Length` header is required.
- `X-Update-Range` is also required.
- The `bytes` value is the exact same as the HTTP Range header. The two numbers
are inclusive (so `3-6` means that bytes 3,4,5 and 6 will be updated).
- Just like the HTTP Range header, the specified bytes is a 0-based index.
- The `application/x-sabredav-partialupdate` must also be specified.
- The end-byte is optional.
- The start-byte cannot be omitted.
- If the start byte is negative, it's calculated from the end of the file. So
`-1` will update the last byte in the file.
- Use `X-Update-Range: append` to add to the end of the file.
- Neither the start, nor the end-byte have to be within the file's current size.
- If the start-byte is beyond the file's current length, the space in between
will be filled with NULL bytes (`0x00`).
- The specification currently does not support multiple ranges.
- If both start and end offsets are given, than both must be non-negative, and
the end offset must be greater or equal to the start offset.
## More examples
The following table illustrates most types of requests and what the end-result
of them will be.
It is assumed that the input file contains `1234567890`, and the request body
always contains 4 dashes (`----`).
X-Update-Range header | Result
--------------------- | -------
`bytes=0-3` | `----567890`
`bytes=1-4` | `1----67890`
`bytes=0-` | `----567890`
`bytes=-4` | `123456----`
`bytes=-2` | `12345678----`
`bytes=2-` | `12----7890`
`bytes=12-` | `1234567890..----`
`append` | `1234567890----`
Please note that in the `bytes=12-` example, we used dots (`.`) to represent
what are actually `NULL` bytes (so `0x00`). The null byte is not printable.
## Status codes
### The following status codes should be used:
Status code | Reason
----------- | ------
200 or 204 | When the operation was successful
400 | Invalid `X-Update-Range` header
411 | `Content-Length` header was not provided
415 | Unrecognized content-type, should be `application/x-sabredav-partialupdate`
416 | If there was something wrong with the bytes, such as a `Content-Length` not matching with what was sent as the start and end bytes, or an end byte being lower than the start byte.
## OPTIONS
If you want to be compliant with SabreDAV's implementation of PATCH, you must
also return 'sabredav-partialupdate' in the 'DAV:' header:
```
HTTP/1.1 204 No Content
DAV: 1, 2, 3, sabredav-partialupdate, extended-mkcol
```
This is only required if you are adding this feature to a DAV server. For
non-webdav implementations such as REST services this is optional.

37
examples/actix.rs Normal file
View File

@@ -0,0 +1,37 @@
use std::io;
use actix_web::{web, App, HttpServer};
use webdav_handler::actix::*;
use webdav_handler::{fakels::FakeLs, localfs::LocalFs, DavConfig, DavHandler};
pub async fn dav_handler(req: DavRequest, davhandler: web::Data<DavHandler>) -> DavResponse {
if let Some(prefix) = req.prefix() {
let config = DavConfig::new().strip_prefix(prefix);
davhandler.handle_with(config, req.request).await.into()
} else {
davhandler.handle(req.request).await.into()
}
}
#[actix_web::main]
async fn main() -> io::Result<()> {
env_logger::init();
let addr = "127.0.0.1:4918";
let dir = "/tmp";
let dav_server = DavHandler::builder()
.filesystem(LocalFs::new(dir, false, false, false))
.locksystem(FakeLs::new())
.build_handler();
println!("actix-web example: listening on {} serving {}", addr, dir);
HttpServer::new(move || {
App::new()
.data(dav_server.clone())
.service(web::resource("/{tail:.*}").to(dav_handler))
})
.bind(addr)?
.run()
.await
}

31
examples/hyper.rs Normal file
View File

@@ -0,0 +1,31 @@
use std::convert::Infallible;
use webdav_handler::{fakels::FakeLs, localfs::LocalFs, DavHandler};
#[tokio::main]
async fn main() {
env_logger::init();
let dir = "/tmp";
let addr = ([127, 0, 0, 1], 4918).into();
let dav_server = DavHandler::builder()
.filesystem(LocalFs::new(dir, false, false, false))
.locksystem(FakeLs::new())
.build_handler();
let make_service = hyper::service::make_service_fn(move |_| {
let dav_server = dav_server.clone();
async move {
let func = move |req| {
let dav_server = dav_server.clone();
async move { Ok::<_, Infallible>(dav_server.handle(req).await) }
};
Ok::<_, Infallible>(hyper::service::service_fn(func))
}
});
println!("hyper example: listening on {:?} serving {}", addr, dir);
let _ = hyper::Server::bind(&addr)
.serve(make_service)
.await
.map_err(|e| eprintln!("server error: {}", e));
}

View File

@@ -0,0 +1,125 @@
//
// Sample application.
//
// Listens on localhost:4918, plain http, no ssl.
// Connect to http://localhost:4918/
//
use std::convert::Infallible;
use std::error::Error;
use std::net::SocketAddr;
use std::str::FromStr;
#[macro_use]
extern crate clap;
use env_logger;
use futures::future::TryFutureExt;
use hyper;
use headers::{authorization::Basic, Authorization, HeaderMapExt};
use webdav_handler::{body::Body, fakels, localfs, memfs, memls, DavConfig, DavHandler};
#[derive(Clone)]
struct Server {
dh: DavHandler,
auth: bool,
}
impl Server {
pub fn new(directory: String, memls: bool, fakels: bool, auth: bool) -> Self {
let mut config = DavHandler::builder();
if directory != "" {
config = config.filesystem(localfs::LocalFs::new(directory, true, true, true));
} else {
config = config.filesystem(memfs::MemFs::new());
};
if fakels {
config = config.locksystem(fakels::FakeLs::new());
}
if memls {
config = config.locksystem(memls::MemLs::new());
}
Server {
dh: config.build_handler(),
auth,
}
}
async fn handle(&self, req: hyper::Request<hyper::Body>) -> Result<hyper::Response<Body>, Infallible> {
let user = if self.auth {
// we want the client to authenticate.
match req.headers().typed_get::<Authorization<Basic>>() {
Some(Authorization(basic)) => Some(basic.username().to_string()),
None => {
// return a 401 reply.
let response = hyper::Response::builder()
.status(401)
.header("WWW-Authenticate", "Basic realm=\"foo\"")
.body(Body::from("please auth".to_string()))
.unwrap();
return Ok(response);
},
}
} else {
None
};
if let Some(user) = user {
let config = DavConfig::new().principal(user);
Ok(self.dh.handle_with(config, req).await)
} else {
Ok(self.dh.handle(req).await)
}
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
env_logger::init();
let matches = clap_app!(webdav_lib =>
(version: "0.1")
(@arg PORT: -p --port +takes_value "port to listen on (4918)")
(@arg DIR: -d --dir +takes_value "local directory to serve")
(@arg MEMFS: -m --memfs "serve from ephemeral memory filesystem (default)")
(@arg MEMLS: -l --memls "use ephemeral memory locksystem (default with --memfs)")
(@arg FAKELS: -f --fakels "use fake memory locksystem (default with --memfs)")
(@arg AUTH: -a --auth "require basic authentication")
)
.get_matches();
let (dir, name) = match matches.value_of("DIR") {
Some(dir) => (dir, dir),
None => ("", "memory filesystem"),
};
let auth = matches.is_present("AUTH");
let memls = matches.is_present("MEMFS") || matches.is_present("MEMLS");
let fakels = matches.is_present("FAKELS");
let dav_server = Server::new(dir.to_string(), memls, fakels, auth);
let make_service = hyper::service::make_service_fn(|_| {
let dav_server = dav_server.clone();
async move {
let func = move |req| {
let dav_server = dav_server.clone();
async move { dav_server.clone().handle(req).await }
};
Ok::<_, hyper::Error>(hyper::service::service_fn(func))
}
});
let port = matches.value_of("PORT").unwrap_or("4918");
let addr = "0.0.0.0:".to_string() + port;
let addr = SocketAddr::from_str(&addr)?;
let server = hyper::Server::try_bind(&addr)?
.serve(make_service)
.map_err(|e| eprintln!("server error: {}", e));
println!("Serving {} on {}", name, port);
let _ = server.await;
Ok(())
}

13
examples/warp.rs Normal file
View File

@@ -0,0 +1,13 @@
use std::net::SocketAddr;
use webdav_handler::warp::dav_dir;
#[tokio::main]
async fn main() {
env_logger::init();
let dir = "/tmp";
let addr: SocketAddr = ([127, 0, 0, 1], 4918).into();
println!("warp example: listening on {:?} serving {}", addr, dir);
let warpdav = dav_dir(dir, true, true);
warp::serve(warpdav).run(addr).await;
}

13
generate-readme Executable file
View File

@@ -0,0 +1,13 @@
#! /bin/sh
VERSION=$(sed -ne 's!^version *= *"\(.*\)".*!\1!p' Cargo.toml)
DOCS="https://docs.rs/webdav-handler/$VERSION/webdav_handler"
if ! fgrep "$VERSION" src/lib.rs >/dev/null
then
echo "WARNING: html_root_url in src/lib.rs out of date"
fi
cargo readme |
sed -e 's!^\(.*\]: \)\(.*\.html\)$!\1'"$DOCS"'/\2!' > README.md

12
rustfmt.toml Normal file
View File

@@ -0,0 +1,12 @@
unstable_features = true
edition = "2018"
binop_separator = "Back"
blank_lines_upper_bound = 3
enum_discrim_align_threshold = 20
force_multiline_blocks = true
match_block_trailing_comma = true
max_width = 110
struct_field_align_threshold = 20
where_single_line = true
wrap_comments = false

147
src/actix.rs Normal file
View File

@@ -0,0 +1,147 @@
//! Adapters to use the standard `http` types with Actix.
//!
//! Using the adapters in this crate, it's easy to build a webdav
//! handler for actix:
//!
//! ```no_run
//! use webdav_handler::{DavHandler, actix::DavRequest, actix::DavResponse};
//! use actix_web::web;
//!
//! pub async fn dav_handler(req: DavRequest, davhandler: web::Data<DavHandler>) -> DavResponse {
//! davhandler.handle(req.request).await.into()
//! }
//! ```
//!
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_web::error::PayloadError;
use actix_web::{dev, Error, FromRequest, HttpRequest, HttpResponse};
use bytes::Bytes;
use futures::{future, Stream};
use pin_project::pin_project;
/// http::Request compatibility.
///
/// Wraps `http::Request<DavBody>` and implements `actix_web::FromRequest`.
pub struct DavRequest {
pub request: http::Request<DavBody>,
prefix: Option<String>,
}
impl DavRequest {
/// Returns the request path minus the tail.
pub fn prefix(&self) -> Option<&str> {
self.prefix.as_ref().map(|s| s.as_str())
}
}
impl FromRequest for DavRequest {
type Config = ();
type Error = Error;
type Future = future::Ready<Result<DavRequest, Error>>;
fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future {
let mut builder = http::Request::builder()
.method(req.method().to_owned())
.uri(req.uri().to_owned())
.version(req.version().to_owned());
for (name, value) in req.headers().iter() {
builder = builder.header(name, value);
}
let path = req.match_info().path();
let tail = req.match_info().unprocessed();
let prefix = match &path[..path.len() - tail.len()] {
"" | "/" => None,
x => Some(x.to_string()),
};
let body = DavBody { body: payload.take() };
let stdreq = DavRequest {
request: builder.body(body).unwrap(),
prefix,
};
future::ready(Ok(stdreq))
}
}
/// Body type for `DavRequest`.
///
/// It wraps actix's `PayLoad` and implements `http_body::Body`.
#[pin_project]
pub struct DavBody {
#[pin]
body: dev::Payload,
}
impl http_body::Body for DavBody {
type Data = Bytes;
type Error = io::Error;
fn poll_data(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>>
{
let this = self.project();
match this.body.poll_next(cx) {
Poll::Ready(Some(Ok(data))) => Poll::Ready(Some(Ok(data))),
Poll::Ready(Some(Err(err))) => {
Poll::Ready(Some(Err(match err {
PayloadError::Incomplete(Some(err)) => err,
PayloadError::Incomplete(None) => io::ErrorKind::BrokenPipe.into(),
PayloadError::Io(err) => err,
other => io::Error::new(io::ErrorKind::Other, format!("{:?}", other)),
})))
},
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context,
) -> Poll<Result<Option<http::HeaderMap>, Self::Error>>
{
Poll::Ready(Ok(None))
}
}
/// `http::Response` compatibility.
///
/// Wraps `http::Response<dav_handler::body::Body>` and implements actix_web::Responder.
pub struct DavResponse(pub http::Response<crate::body::Body>);
impl From<http::Response<crate::body::Body>> for DavResponse {
fn from(resp: http::Response<crate::body::Body>) -> DavResponse {
DavResponse(resp)
}
}
impl actix_web::Responder for DavResponse {
fn respond_to(self, _req: &HttpRequest) -> HttpResponse {
use crate::body::{Body, BodyType};
let (parts, body) = self.0.into_parts();
let mut builder = HttpResponse::build(parts.status);
for (name, value) in parts.headers.into_iter() {
builder.append_header((name.unwrap(), value));
}
// I noticed that actix-web returns an empty chunked body
// (\r\n0\r\n\r\n) and _no_ Transfer-Encoding header on
// a 204 statuscode. It's probably because of
// builder.streaming(). So only use builder.streaming()
// on actual streaming replies.
let resp = match body.inner {
BodyType::Bytes(None) => builder.body(""),
BodyType::Bytes(Some(b)) => builder.body(b),
BodyType::Empty => builder.body(""),
b @ BodyType::AsyncStream(..) => builder.streaming(Body { inner: b }),
};
resp
}
}

162
src/async_stream.rs Normal file
View File

@@ -0,0 +1,162 @@
//! Use an [async block][async] to produce items for a stream.
//!
//! Example:
//!
//! ```rust ignore
//! use futures::StreamExt;
//! use futures::executor::block_on;
//! # use webdav_handler::async_stream;
//! use async_stream::AsyncStream;
//!
//! let mut strm = AsyncStream::<u8, std::io::Error>::new(|mut tx| async move {
//! for i in 0u8..10 {
//! tx.send(i).await;
//! }
//! Ok(())
//! });
//!
//! let fut = async {
//! let mut count = 0;
//! while let Some(item) = strm.next().await {
//! println!("{:?}", item);
//! count += 1;
//! }
//! assert!(count == 10);
//! };
//! block_on(fut);
//!
//! ```
//!
//! The stream will produce a `Result<Item, Error>` where the `Item`
//! is an item sent with [tx.send(item)][send]. Any errors returned by
//! the async closure will be returned as an error value on
//! the stream.
//!
//! On success the async closure should return `Ok(())`.
//!
//! [async]: https://rust-lang.github.io/async-book/getting_started/async_await_primer.html
//! [send]: async_stream/struct.Sender.html#method.send
//!
use std::cell::Cell;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use futures::Stream;
/// Future returned by the Sender.send() method.
///
/// Completes when the item is sent.
#[must_use]
pub struct SenderFuture {
is_ready: bool,
}
impl SenderFuture {
fn new() -> SenderFuture {
SenderFuture { is_ready: false }
}
}
impl Future for SenderFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.is_ready {
Poll::Ready(())
} else {
self.is_ready = true;
Poll::Pending
}
}
}
// Only internally used by one AsyncStream and never shared
// in any other way, so we don't have to use Arc<Mutex<..>>.
/// Type of the sender passed as first argument into the async closure.
pub struct Sender<I, E>(Rc<Cell<Option<I>>>, PhantomData<E>);
unsafe impl<I: Sync, E: Sync> Sync for Sender<I, E> {}
unsafe impl<I: Send, E: Send> Send for Sender<I, E> {}
impl<I, E> Sender<I, E> {
fn new(item_opt: Option<I>) -> Sender<I, E> {
Sender(Rc::new(Cell::new(item_opt)), PhantomData::<E>)
}
// note that this is NOT impl Clone for Sender, it's private.
fn clone(&self) -> Sender<I, E> {
Sender(self.0.clone(), PhantomData::<E>)
}
/// Send one item to the stream.
pub fn send<T>(&mut self, item: T) -> SenderFuture
where T: Into<I> {
self.0.set(Some(item.into()));
SenderFuture::new()
}
}
/// An abstraction around a future, where the
/// future can internally loop and yield items.
///
/// AsyncStream::new() takes a [Future][Future] ([async closure][async], usually)
/// and AsyncStream then implements a [futures 0.3 Stream][Stream].
///
/// [async]: https://rust-lang.github.io/async-book/getting_started/async_await_primer.html
/// [Future]: https://doc.rust-lang.org/std/future/trait.Future.html
/// [Stream]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html
#[must_use]
pub struct AsyncStream<Item, Error> {
item: Sender<Item, Error>,
fut: Option<Pin<Box<dyn Future<Output = Result<(), Error>> + 'static + Send>>>,
}
impl<Item, Error: 'static + Send> AsyncStream<Item, Error> {
/// Create a new stream from a closure returning a Future 0.3,
/// or an "async closure" (which is the same).
///
/// The closure is passed one argument, the sender, which has a
/// method "send" that can be called to send a item to the stream.
pub fn new<F, R>(f: F) -> Self
where
F: FnOnce(Sender<Item, Error>) -> R,
R: Future<Output = Result<(), Error>> + Send + 'static,
Item: 'static,
{
let sender = Sender::new(None);
AsyncStream::<Item, Error> {
item: sender.clone(),
fut: Some(Box::pin(f(sender))),
}
}
}
/// Stream implementation for Futures 0.3.
impl<I, E: Unpin> Stream for AsyncStream<I, E> {
type Item = Result<I, E>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<I, E>>> {
let pollres = {
let fut = self.fut.as_mut().unwrap();
fut.as_mut().poll(cx)
};
match pollres {
// If the future returned Poll::Ready, that signals the end of the stream.
Poll::Ready(Ok(_)) => Poll::Ready(None),
Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
Poll::Pending => {
// Pending means that some sub-future returned pending. That sub-future
// _might_ have been the SenderFuture returned by Sender.send, so
// check if there is an item available in self.item.
let mut item = self.item.0.replace(None);
if item.is_none() {
Poll::Pending
} else {
Poll::Ready(Some(Ok(item.take().unwrap())))
}
},
}
}
}

147
src/body.rs Normal file
View File

@@ -0,0 +1,147 @@
//! Definitions for the Request and Response bodies.
use std::error::Error as StdError;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::{Buf, Bytes};
use futures::stream::Stream;
use http::header::HeaderMap;
use http_body::Body as HttpBody;
use crate::async_stream::AsyncStream;
/// Body is returned by the webdav handler, and implements both `Stream`
/// and `http_body::Body`.
pub struct Body {
pub(crate) inner: BodyType,
}
pub(crate) enum BodyType {
Bytes(Option<Bytes>),
AsyncStream(AsyncStream<Bytes, io::Error>),
Empty,
}
impl Body {
/// Return an empty body.
pub fn empty() -> Body {
Body {
inner: BodyType::Empty,
}
}
}
impl Stream for Body {
type Item = io::Result<Bytes>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
match self.inner {
BodyType::Bytes(ref mut strm) => Poll::Ready(strm.take().map(|b| Ok(b))),
BodyType::AsyncStream(ref mut strm) => {
let strm = Pin::new(strm);
strm.poll_next(cx)
},
BodyType::Empty => Poll::Ready(None),
}
}
}
impl HttpBody for Body {
type Data = Bytes;
type Error = io::Error;
fn poll_data(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Result<Self::Data, Self::Error>>> {
self.poll_next(cx)
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context,
) -> Poll<Result<Option<HeaderMap>, Self::Error>>
{
Poll::Ready(Ok(None))
}
}
impl From<String> for Body {
fn from(t: String) -> Body {
Body {
inner: BodyType::Bytes(Some(Bytes::from(t))),
}
}
}
impl From<&str> for Body {
fn from(t: &str) -> Body {
Body {
inner: BodyType::Bytes(Some(Bytes::from(t.to_string()))),
}
}
}
impl From<Bytes> for Body {
fn from(t: Bytes) -> Body {
Body {
inner: BodyType::Bytes(Some(t)),
}
}
}
impl From<AsyncStream<Bytes, io::Error>> for Body {
fn from(s: AsyncStream<Bytes, io::Error>) -> Body {
Body {
inner: BodyType::AsyncStream(s),
}
}
}
use pin_project::pin_project;
//
// A struct that contains a Stream, and implements http_body::Body.
//
#[pin_project]
pub(crate) struct StreamBody<B> {
#[pin]
body: B,
}
impl<ReqBody, ReqData, ReqError> HttpBody for StreamBody<ReqBody>
where
ReqData: Buf + Send,
ReqError: StdError + Send + Sync + 'static,
ReqBody: Stream<Item = Result<ReqData, ReqError>>,
{
type Data = ReqData;
type Error = ReqError;
fn poll_data(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>>
{
let this = self.project();
this.body.poll_next(cx)
}
fn poll_trailers(
self: Pin<&mut Self>,
_cx: &mut Context,
) -> Poll<Result<Option<HeaderMap>, Self::Error>>
{
Poll::Ready(Ok(None))
}
}
impl<ReqBody, ReqData, ReqError> StreamBody<ReqBody>
where
ReqData: Buf + Send,
ReqError: StdError + Send + Sync + 'static,
ReqBody: Stream<Item = Result<ReqData, ReqError>>,
{
pub fn new(body: ReqBody) -> StreamBody<ReqBody> {
StreamBody { body }
}
}

246
src/conditional.rs Normal file
View File

@@ -0,0 +1,246 @@
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use headers::HeaderMapExt;
use http::{Method, StatusCode};
use crate::davheaders::{self, ETag};
use crate::davpath::DavPath;
use crate::fs::{DavFileSystem, DavMetaData};
use crate::ls::DavLockSystem;
type Request = http::Request<()>;
// SystemTime has nanosecond precision. Round it down to the
// nearest second, because an HttpDate has second precision.
fn round_time(tm: impl Into<SystemTime>) -> SystemTime {
let tm = tm.into();
match tm.duration_since(UNIX_EPOCH) {
Ok(d) => UNIX_EPOCH + Duration::from_secs(d.as_secs()),
Err(_) => tm,
}
}
pub(crate) fn ifrange_match(
hdr: &davheaders::IfRange,
tag: Option<&davheaders::ETag>,
date: Option<SystemTime>,
) -> bool
{
match hdr {
&davheaders::IfRange::Date(ref d) => {
match date {
Some(date) => round_time(date) == round_time(*d),
None => false,
}
},
&davheaders::IfRange::ETag(ref t) => {
match tag {
Some(tag) => t == tag,
None => false,
}
},
}
}
pub(crate) fn etaglist_match(
tags: &davheaders::ETagList,
exists: bool,
tag: Option<&davheaders::ETag>,
) -> bool
{
match tags {
&davheaders::ETagList::Star => exists,
&davheaders::ETagList::Tags(ref t) => {
match tag {
Some(tag) => t.iter().any(|x| x == tag),
None => false,
}
},
}
}
// Handle the if-headers: RFC 7232, HTTP/1.1 Conditional Requests.
pub(crate) fn http_if_match(req: &Request, meta: Option<&Box<dyn DavMetaData>>) -> Option<StatusCode> {
let file_modified = meta.and_then(|m| m.modified().ok());
if let Some(r) = req.headers().typed_get::<davheaders::IfMatch>() {
let etag = meta.and_then(|m| ETag::from_meta(m));
if !etaglist_match(&r.0, meta.is_some(), etag.as_ref()) {
trace!("precondition fail: If-Match {:?}", r);
return Some(StatusCode::PRECONDITION_FAILED);
}
} else if let Some(r) = req.headers().typed_get::<headers::IfUnmodifiedSince>() {
match file_modified {
None => return Some(StatusCode::PRECONDITION_FAILED),
Some(file_modified) => {
if round_time(file_modified) > round_time(r) {
trace!("precondition fail: If-Unmodified-Since {:?}", r);
return Some(StatusCode::PRECONDITION_FAILED);
}
},
}
}
if let Some(r) = req.headers().typed_get::<davheaders::IfNoneMatch>() {
let etag = meta.and_then(|m| ETag::from_meta(m));
if etaglist_match(&r.0, meta.is_some(), etag.as_ref()) {
trace!("precondition fail: If-None-Match {:?}", r);
if req.method() == &Method::GET || req.method() == &Method::HEAD {
return Some(StatusCode::NOT_MODIFIED);
} else {
return Some(StatusCode::PRECONDITION_FAILED);
}
}
} else if let Some(r) = req.headers().typed_get::<headers::IfModifiedSince>() {
if req.method() == &Method::GET || req.method() == &Method::HEAD {
if let Some(file_modified) = file_modified {
if round_time(file_modified) <= round_time(r) {
trace!("not-modified If-Modified-Since {:?}", r);
return Some(StatusCode::NOT_MODIFIED);
}
}
}
}
None
}
// handle the If header: RFC4918, 10.4. If Header
//
// returns true if the header was not present, or if any of the iflists
// evaluated to true. Also returns a Vec of StateTokens that we encountered.
//
// caller should set the http status to 412 PreconditionFailed if
// the return value from this function is false.
//
pub(crate) async fn dav_if_match<'a>(
req: &'a Request,
fs: &'a Box<dyn DavFileSystem + 'static>,
ls: &'a Option<Box<dyn DavLockSystem + 'static>>,
path: &'a DavPath,
) -> (bool, Vec<String>)
{
let mut tokens: Vec<String> = Vec::new();
let mut any_list_ok = false;
let r = match req.headers().typed_get::<davheaders::If>() {
Some(r) => r,
None => return (true, tokens),
};
for iflist in r.0.iter() {
// save and return all statetokens that we encountered.
let toks = iflist.conditions.iter().filter_map(|c| {
match &c.item {
&davheaders::IfItem::StateToken(ref t) => Some(t.to_owned()),
_ => None,
}
});
tokens.extend(toks);
// skip over if a previous list already evaluated to true.
if any_list_ok == true {
continue;
}
// find the resource that this list is about.
let mut pa: Option<DavPath> = None;
let (p, valid) = match iflist.resource_tag {
Some(ref url) => {
match DavPath::from_str_and_prefix(url.path(), path.prefix()) {
Ok(p) => {
// anchor davpath in pa.
let p: &DavPath = pa.get_or_insert(p);
(p, true)
},
Err(_) => (path, false),
}
},
None => (path, true),
};
// now process the conditions. they must all be true.
let mut list_ok = false;
for cond in iflist.conditions.iter() {
let cond_ok = match cond.item {
davheaders::IfItem::StateToken(ref s) => {
// tokens in DAV: namespace always evaluate to false (10.4.8)
if !valid || s.starts_with("DAV:") {
false
} else {
match ls {
&Some(ref ls) => ls.check(p, None, true, false, vec![s]).is_ok(),
&None => false,
}
}
},
davheaders::IfItem::ETag(ref tag) => {
if !valid {
// invalid location, so always false.
false
} else {
match fs.metadata(p).await {
Ok(meta) => {
// exists and may have metadata ..
if let Some(mtag) = ETag::from_meta(meta) {
tag == &mtag
} else {
false
}
},
Err(_) => {
// metadata error, fail.
false
},
}
}
},
};
if cond_ok == cond.not {
list_ok = false;
break;
}
list_ok = true;
}
if list_ok {
any_list_ok = true;
}
}
if !any_list_ok {
trace!("precondition fail: If {:?}", r.0);
}
(any_list_ok, tokens)
}
// Handle both the HTTP conditional If: headers, and the webdav If: header.
pub(crate) async fn if_match<'a>(
req: &'a Request,
meta: Option<&'a Box<dyn DavMetaData + 'static>>,
fs: &'a Box<dyn DavFileSystem + 'static>,
ls: &'a Option<Box<dyn DavLockSystem + 'static>>,
path: &'a DavPath,
) -> Option<StatusCode>
{
match dav_if_match(req, fs, ls, path).await {
(true, _) => {},
(false, _) => return Some(StatusCode::PRECONDITION_FAILED),
}
http_if_match(req, meta)
}
// Like if_match, but also returns all "associated state-tokens"
pub(crate) async fn if_match_get_tokens<'a>(
req: &'a Request,
meta: Option<&'a Box<dyn DavMetaData + 'static>>,
fs: &'a Box<dyn DavFileSystem + 'static>,
ls: &'a Option<Box<dyn DavLockSystem + 'static>>,
path: &'a DavPath,
) -> Result<Vec<String>, StatusCode>
{
if let Some(code) = http_if_match(req, meta) {
return Err(code);
}
match dav_if_match(req, fs, ls, path).await {
(true, v) => Ok(v),
(false, _) => Err(StatusCode::PRECONDITION_FAILED),
}
}

502
src/davhandler.rs Normal file
View File

@@ -0,0 +1,502 @@
//
// This module contains the main entry point of the library,
// DavHandler.
//
use std::error::Error as StdError;
use std::io;
use std::sync::Arc;
use bytes::{self, buf::Buf};
use futures::stream::Stream;
use headers::HeaderMapExt;
use http::{Request, Response, StatusCode};
use http_body::Body as HttpBody;
use crate::body::{Body, StreamBody};
use crate::davheaders;
use crate::davpath::DavPath;
use crate::util::{dav_method, DavMethod, DavMethodSet};
use crate::errors::DavError;
use crate::fs::*;
use crate::ls::*;
use crate::voidfs::{is_voidfs, VoidFs};
use crate::DavResult;
/// The webdav handler struct.
///
/// The `new` and `build` etc methods are used to instantiate a handler.
///
/// The `handle` and `handle_with` methods are the methods that do the actual work.
#[derive(Clone)]
pub struct DavHandler {
pub(crate) config: Arc<DavConfig>,
}
/// Configuration of the handler.
#[derive(Default)]
pub struct DavConfig {
// Prefix to be stripped off when handling request.
pub(crate) prefix: Option<String>,
// Filesystem backend.
pub(crate) fs: Option<Box<dyn DavFileSystem>>,
// Locksystem backend.
pub(crate) ls: Option<Box<dyn DavLockSystem>>,
// Set of allowed methods (None means "all methods")
pub(crate) allow: Option<DavMethodSet>,
// Principal is webdav speak for "user", used to give locks an owner (if a locksystem is
// active).
pub(crate) principal: Option<String>,
// Hide symbolic links? `None` maps to `true`.
pub(crate) hide_symlinks: Option<bool>,
// Does GET on a directory return indexes.
pub(crate) autoindex: Option<bool>,
// index.html
pub(crate) indexfile: Option<String>,
}
impl DavConfig {
/// Create a new configuration builder.
pub fn new() -> DavConfig {
DavConfig::default()
}
/// Use the configuration that was built to generate a DavConfig.
pub fn build_handler(self) -> DavHandler {
DavHandler {
config: Arc::new(self),
}
}
/// Prefix to be stripped off before translating the rest of
/// the request path to a filesystem path.
pub fn strip_prefix(self, prefix: impl Into<String>) -> Self {
let mut this = self;
this.prefix = Some(prefix.into());
this
}
/// Set the filesystem to use.
pub fn filesystem(self, fs: Box<dyn DavFileSystem>) -> Self {
let mut this = self;
this.fs = Some(fs);
this
}
/// Set the locksystem to use.
pub fn locksystem(self, ls: Box<dyn DavLockSystem>) -> Self {
let mut this = self;
this.ls = Some(ls);
this
}
/// Which methods to allow (default is all methods).
pub fn methods(self, allow: DavMethodSet) -> Self {
let mut this = self;
this.allow = Some(allow);
this
}
/// Set the name of the "webdav principal". This will be the owner of any created locks.
pub fn principal(self, principal: impl Into<String>) -> Self {
let mut this = self;
this.principal = Some(principal.into());
this
}
/// Hide symbolic links (default is true)
pub fn hide_symlinks(self, hide: bool) -> Self {
let mut this = self;
this.hide_symlinks = Some(hide);
this
}
/// Does a GET on a directory produce a directory index.
pub fn autoindex(self, autoindex: bool) -> Self {
let mut this = self;
this.autoindex = Some(autoindex);
this
}
/// Indexfile to show (index.html, usually).
pub fn indexfile(self, indexfile: impl Into<String>) -> Self {
let mut this = self;
this.indexfile = Some(indexfile.into());
this
}
fn merge(&self, new: DavConfig) -> DavConfig {
DavConfig {
prefix: new.prefix.or(self.prefix.clone()),
fs: new.fs.or(self.fs.clone()),
ls: new.ls.or(self.ls.clone()),
allow: new.allow.or(self.allow.clone()),
principal: new.principal.or(self.principal.clone()),
hide_symlinks: new.hide_symlinks.or(self.hide_symlinks.clone()),
autoindex: new.autoindex.or(self.autoindex.clone()),
indexfile: new.indexfile.or(self.indexfile.clone()),
}
}
}
// The actual inner struct.
//
// At the start of the request, DavConfig is used to generate
// a DavInner struct. DavInner::handle then handles the request.
pub(crate) struct DavInner {
pub prefix: String,
pub fs: Box<dyn DavFileSystem>,
pub ls: Option<Box<dyn DavLockSystem>>,
pub allow: Option<DavMethodSet>,
pub principal: Option<String>,
pub hide_symlinks: Option<bool>,
pub autoindex: Option<bool>,
pub indexfile: Option<String>,
}
impl From<DavConfig> for DavInner {
fn from(cfg: DavConfig) -> Self {
DavInner {
prefix: cfg.prefix.unwrap_or("".to_string()),
fs: cfg.fs.unwrap_or(VoidFs::new()),
ls: cfg.ls,
allow: cfg.allow,
principal: cfg.principal,
hide_symlinks: cfg.hide_symlinks,
autoindex: cfg.autoindex,
indexfile: cfg.indexfile,
}
}
}
impl From<&DavConfig> for DavInner {
fn from(cfg: &DavConfig) -> Self {
DavInner {
prefix: cfg
.prefix
.as_ref()
.map(|p| p.to_owned())
.unwrap_or("".to_string()),
fs: cfg.fs.clone().unwrap(),
ls: cfg.ls.clone(),
allow: cfg.allow,
principal: cfg.principal.clone(),
hide_symlinks: cfg.hide_symlinks.clone(),
autoindex: cfg.autoindex.clone(),
indexfile: cfg.indexfile.clone(),
}
}
}
impl Clone for DavInner {
fn clone(&self) -> Self {
DavInner {
prefix: self.prefix.clone(),
fs: self.fs.clone(),
ls: self.ls.clone(),
allow: self.allow.clone(),
principal: self.principal.clone(),
hide_symlinks: self.hide_symlinks.clone(),
autoindex: self.autoindex.clone(),
indexfile: self.indexfile.clone(),
}
}
}
impl DavHandler {
/// Create a new `DavHandler`.
///
/// This returns a DavHandler with an empty configuration. That's only
/// useful if you use the `handle_with` method instead of `handle`.
/// Normally you should create a new `DavHandler` using `DavHandler::build`
/// and configure at least the filesystem, and probably the strip_prefix.
pub fn new() -> DavHandler {
DavHandler {
config: Arc::new(DavConfig::default()),
}
}
/// Return a configuration builder.
pub fn builder() -> DavConfig {
DavConfig::new()
}
/// Handle a webdav request.
pub async fn handle<ReqBody, ReqData, ReqError>(&self, req: Request<ReqBody>) -> Response<Body>
where
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
{
let inner = DavInner::from(&*self.config);
inner.handle(req).await
}
/// Handle a webdav request, overriding parts of the config.
///
/// For example, the `principal` can be set for this request.
///
/// Or, the default config has no locksystem, and you pass in
/// a fake locksystem (`FakeLs`) because this is a request from a
/// windows or macos client that needs to see locking support.
pub async fn handle_with<ReqBody, ReqData, ReqError>(
&self,
config: DavConfig,
req: Request<ReqBody>,
) -> Response<Body>
where
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
{
let inner = DavInner::from(self.config.merge(config));
inner.handle(req).await
}
/// Handles a request with a `Stream` body instead of a `HttpBody`.
/// Used with webserver frameworks that have not
/// opted to use the `http_body` crate just yet.
#[doc(hidden)]
pub async fn handle_stream<ReqBody, ReqData, ReqError>(&self, req: Request<ReqBody>) -> Response<Body>
where
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
ReqBody: Stream<Item = Result<ReqData, ReqError>>,
{
let req = {
let (parts, body) = req.into_parts();
Request::from_parts(parts, StreamBody::new(body))
};
let inner = DavInner::from(&*self.config);
inner.handle(req).await
}
/// Handles a request with a `Stream` body instead of a `HttpBody`.
#[doc(hidden)]
pub async fn handle_stream_with<ReqBody, ReqData, ReqError>(
&self,
config: DavConfig,
req: Request<ReqBody>,
) -> Response<Body>
where
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
ReqBody: Stream<Item = Result<ReqData, ReqError>>,
{
let req = {
let (parts, body) = req.into_parts();
Request::from_parts(parts, StreamBody::new(body))
};
let inner = DavInner::from(self.config.merge(config));
inner.handle(req).await
}
}
impl DavInner {
// helper.
pub(crate) async fn has_parent<'a>(&'a self, path: &'a DavPath) -> bool {
let p = path.parent();
self.fs.metadata(&p).await.map(|m| m.is_dir()).unwrap_or(false)
}
// helper.
pub(crate) fn path(&self, req: &Request<()>) -> DavPath {
// This never fails (has been checked before)
DavPath::from_uri_and_prefix(req.uri(), &self.prefix).unwrap()
}
// See if this is a directory and if so, if we have
// to fixup the path by adding a slash at the end.
pub(crate) fn fixpath(
&self,
res: &mut Response<Body>,
path: &mut DavPath,
meta: Box<dyn DavMetaData>,
) -> Box<dyn DavMetaData>
{
if meta.is_dir() && !path.is_collection() {
path.add_slash();
let newloc = path.with_prefix().as_url_string();
res.headers_mut()
.typed_insert(davheaders::ContentLocation(newloc));
}
meta
}
// drain request body and return length.
pub(crate) async fn read_request<'a, ReqBody, ReqData, ReqError>(
&'a self,
body: ReqBody,
max_size: usize,
) -> DavResult<Vec<u8>>
where
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
{
let mut data = Vec::new();
pin_utils::pin_mut!(body);
while let Some(res) = body.data().await {
let mut buf = res.map_err(|_| {
DavError::IoError(io::Error::new(io::ErrorKind::UnexpectedEof, "UnexpectedEof"))
})?;
while buf.has_remaining() {
if data.len() + buf.remaining() > max_size {
return Err(StatusCode::PAYLOAD_TOO_LARGE.into());
}
let b = buf.chunk();
let l = b.len();
data.extend_from_slice(b);
buf.advance(l);
}
}
Ok(data)
}
// internal dispatcher.
async fn handle<ReqBody, ReqData, ReqError>(self, req: Request<ReqBody>) -> Response<Body>
where
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
{
let is_ms = req
.headers()
.get("user-agent")
.and_then(|s| s.to_str().ok())
.map(|s| s.contains("Microsoft"))
.unwrap_or(false);
// Turn any DavError results into a HTTP error response.
match self.handle2(req).await {
Ok(resp) => {
debug!("== END REQUEST result OK");
resp
},
Err(err) => {
debug!("== END REQUEST result {:?}", err);
let mut resp = Response::builder();
if is_ms && err.statuscode() == StatusCode::NOT_FOUND {
// This is an attempt to convince Windows to not
// cache a 404 NOT_FOUND for 30-60 seconds.
//
// That is a problem since windows caches the NOT_FOUND in a
// case-insensitive way. So if "www" does not exist, but "WWW" does,
// and you do a "dir www" and then a "dir WWW" the second one
// will fail.
//
// Ofcourse the below is not sufficient. Fixes welcome.
resp = resp
.header("Cache-Control", "no-store, no-cache, must-revalidate")
.header("Progma", "no-cache")
.header("Expires", "0")
.header("Vary", "*");
}
resp = resp.header("Content-Length", "0").status(err.statuscode());
if err.must_close() {
resp = resp.header("connection", "close");
}
resp.body(Body::empty()).unwrap()
},
}
}
// internal dispatcher part 2.
async fn handle2<ReqBody, ReqData, ReqError>(mut self, req: Request<ReqBody>) -> DavResult<Response<Body>>
where
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
{
let (req, body) = {
let (parts, body) = req.into_parts();
(Request::from_parts(parts, ()), body)
};
// debug when running the webdav litmus tests.
if log_enabled!(log::Level::Debug) {
if let Some(t) = req.headers().typed_get::<davheaders::XLitmus>() {
debug!("X-Litmus: {:?}", t);
}
}
// translate HTTP method to Webdav method.
let method = match dav_method(req.method()) {
Ok(m) => m,
Err(e) => {
debug!("refusing method {} request {}", req.method(), req.uri());
return Err(e);
},
};
// See if method makes sense if we do not have a fileystem.
if is_voidfs(&self.fs) {
match method {
DavMethod::Options => {
if self
.allow
.as_ref()
.map(|a| a.contains(DavMethod::Options))
.unwrap_or(true)
{
let mut a = DavMethodSet::none();
a.add(DavMethod::Options);
self.allow = Some(a);
}
},
_ => {
debug!("no filesystem: method not allowed on request {}", req.uri());
return Err(DavError::StatusClose(StatusCode::METHOD_NOT_ALLOWED));
},
}
}
// see if method is allowed.
if let Some(ref a) = self.allow {
if !a.contains(method) {
debug!("method {} not allowed on request {}", req.method(), req.uri());
return Err(DavError::StatusClose(StatusCode::METHOD_NOT_ALLOWED));
}
}
// make sure the request path is valid.
let path = DavPath::from_uri_and_prefix(req.uri(), &self.prefix)?;
// PUT is the only handler that reads the body itself. All the
// other handlers either expected no body, or a pre-read Vec<u8>.
let (body_strm, body_data) = match method {
DavMethod::Put | DavMethod::Patch => (Some(body), Vec::new()),
_ => (None, self.read_request(body, 65536).await?),
};
// Not all methods accept a body.
match method {
DavMethod::Put |
DavMethod::Patch |
DavMethod::PropFind |
DavMethod::PropPatch |
DavMethod::Lock => {},
_ => {
if body_data.len() > 0 {
return Err(StatusCode::UNSUPPORTED_MEDIA_TYPE.into());
}
},
}
debug!("== START REQUEST {:?} {}", method, path);
let res = match method {
DavMethod::Options => self.handle_options(&req).await,
DavMethod::PropFind => self.handle_propfind(&req, &body_data).await,
DavMethod::PropPatch => self.handle_proppatch(&req, &body_data).await,
DavMethod::MkCol => self.handle_mkcol(&req).await,
DavMethod::Delete => self.handle_delete(&req).await,
DavMethod::Lock => self.handle_lock(&req, &body_data).await,
DavMethod::Unlock => self.handle_unlock(&req).await,
DavMethod::Head | DavMethod::Get => self.handle_get(&req).await,
DavMethod::Copy | DavMethod::Move => self.handle_copymove(&req, method).await,
DavMethod::Put | DavMethod::Patch => self.handle_put(&req, body_strm.unwrap()).await,
};
res
}
}

798
src/davheaders.rs Normal file
View File

@@ -0,0 +1,798 @@
use std::convert::TryFrom;
use std::fmt::Display;
use std::str::FromStr;
use headers::Header;
use http::header::{HeaderName, HeaderValue};
use lazy_static::lazy_static;
use regex::Regex;
use url;
use crate::fs::DavMetaData;
lazy_static! {
static ref RE_URL: Regex = Regex::new(r"https?://[^/]*([^#?]+).*$").unwrap();
pub static ref DEPTH: HeaderName = HeaderName::from_static("depth");
pub static ref TIMEOUT: HeaderName = HeaderName::from_static("timeout");
pub static ref OVERWRITE: HeaderName = HeaderName::from_static("overwrite");
pub static ref DESTINATION: HeaderName = HeaderName::from_static("destination");
pub static ref ETAG: HeaderName = HeaderName::from_static("etag");
pub static ref IF_RANGE: HeaderName = HeaderName::from_static("if-range");
pub static ref IF_MATCH: HeaderName = HeaderName::from_static("if-match");
pub static ref IF_NONE_MATCH: HeaderName = HeaderName::from_static("if-none-match");
pub static ref X_UPDATE_RANGE: HeaderName = HeaderName::from_static("x-update-range");
pub static ref IF: HeaderName = HeaderName::from_static("if");
pub static ref CONTENT_LANGUAGE: HeaderName = HeaderName::from_static("content-language");
}
// helper.
fn one<'i, I>(values: &mut I) -> Result<&'i HeaderValue, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let v = values.next().ok_or_else(invalid)?;
if values.next().is_some() {
Err(invalid())
} else {
Ok(v)
}
}
// helper
fn invalid() -> headers::Error {
headers::Error::invalid()
}
// helper
fn map_invalid(_e: impl std::error::Error) -> headers::Error {
headers::Error::invalid()
}
macro_rules! header {
($tname:ident, $hname:ident, $sname:expr) => {
lazy_static! {
pub static ref $hname: HeaderName = HeaderName::from_static($sname);
}
#[derive(Debug, Clone, PartialEq)]
pub struct $tname(pub String);
impl Header for $tname {
fn name() -> &'static HeaderName {
&$hname
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
one(values)?
.to_str()
.map(|x| $tname(x.to_owned()))
.map_err(map_invalid)
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let value = HeaderValue::from_str(&self.0).unwrap();
values.extend(std::iter::once(value))
}
}
};
}
header!(ContentType, CONTENT_TYPE, "content-type");
header!(ContentLocation, CONTENT_LOCATION, "content-location");
header!(LockToken, LOCK_TOKEN, "lock-token");
header!(XLitmus, X_LITMUS, "x-litmus");
/// Depth: header.
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum Depth {
Zero,
One,
Infinity,
}
impl Header for Depth {
fn name() -> &'static HeaderName {
&DEPTH
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let value = one(values)?;
match value.as_bytes() {
b"0" => Ok(Depth::Zero),
b"1" => Ok(Depth::One),
b"infinity" | b"Infinity" => Ok(Depth::Infinity),
_ => Err(invalid()),
}
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let value = match *self {
Depth::Zero => "0",
Depth::One => "1",
Depth::Infinity => "Infinity",
};
values.extend(std::iter::once(HeaderValue::from_static(value)));
}
}
/// Content-Language header.
#[derive(Debug, Clone, PartialEq)]
pub struct ContentLanguage(headers::Vary);
impl ContentLanguage {
#[allow(dead_code)]
pub fn iter_langs(&self) -> impl Iterator<Item = &str> {
self.0.iter_strs()
}
}
impl TryFrom<&str> for ContentLanguage {
type Error = headers::Error;
fn try_from(value: &str) -> Result<Self, Self::Error> {
let value = HeaderValue::from_str(value).map_err(map_invalid)?;
let mut values = std::iter::once(&value);
ContentLanguage::decode(&mut values)
}
}
impl Header for ContentLanguage {
fn name() -> &'static HeaderName {
&CONTENT_LANGUAGE
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let h = match headers::Vary::decode(values) {
Err(e) => return Err(e),
Ok(h) => h,
};
for lang in h.iter_strs() {
let lang = lang.as_bytes();
// **VERY** rudimentary check ...
let ok = lang.len() == 2 || (lang.len() > 4 && lang[2] == b'-');
if !ok {
return Err(invalid());
}
}
Ok(ContentLanguage(h))
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
self.0.encode(values)
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum DavTimeout {
Seconds(u32),
Infinite,
}
#[derive(Debug, Clone)]
pub struct Timeout(pub Vec<DavTimeout>);
impl Header for Timeout {
fn name() -> &'static HeaderName {
&TIMEOUT
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let value = one(values)?;
let mut v = Vec::new();
let words = value.to_str().map_err(map_invalid)?.split(|c| c == ',');
for word in words {
let w = match word {
"Infinite" => DavTimeout::Infinite,
_ if word.starts_with("Second-") => {
let num = &word[7..];
match num.parse::<u32>() {
Err(_) => return Err(invalid()),
Ok(n) => DavTimeout::Seconds(n),
}
},
_ => return Err(invalid()),
};
v.push(w);
}
return Ok(Timeout(v));
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let mut first = false;
let mut value = String::new();
for s in &self.0 {
if !first {
value.push_str(", ");
}
first = false;
match s {
&DavTimeout::Seconds(n) => value.push_str(&format!("Second-{}", n)),
&DavTimeout::Infinite => value.push_str("Infinite"),
}
}
values.extend(std::iter::once(HeaderValue::from_str(&value).unwrap()));
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Destination(pub String);
impl Header for Destination {
fn name() -> &'static HeaderName {
&DESTINATION
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let s = one(values)?.to_str().map_err(map_invalid)?;
if s.starts_with("/") {
return Ok(Destination(s.to_string()));
}
if let Some(caps) = RE_URL.captures(s) {
if let Some(path) = caps.get(1) {
return Ok(Destination(path.as_str().to_string()));
}
}
Err(invalid())
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
values.extend(std::iter::once(HeaderValue::from_str(&self.0).unwrap()));
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Overwrite(pub bool);
impl Header for Overwrite {
fn name() -> &'static HeaderName {
&OVERWRITE
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let line = one(values)?;
match line.as_bytes() {
b"F" => Ok(Overwrite(false)),
b"T" => Ok(Overwrite(true)),
_ => Err(invalid()),
}
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let value = match self.0 {
true => "T",
false => "F",
};
values.extend(std::iter::once(HeaderValue::from_static(value)));
}
}
#[derive(Debug, Clone)]
pub struct ETag {
tag: String,
weak: bool,
}
impl ETag {
#[allow(dead_code)]
pub fn new(weak: bool, t: impl Into<String>) -> Result<ETag, headers::Error> {
let t = t.into();
if t.contains("\"") {
Err(invalid())
} else {
let w = if weak { "W/" } else { "" };
Ok(ETag {
tag: format!("{}\"{}\"", w, t),
weak: weak,
})
}
}
pub fn from_meta(meta: impl AsRef<dyn DavMetaData>) -> Option<ETag> {
let tag = meta.as_ref().etag()?;
Some(ETag {
tag: format!("\"{}\"", tag),
weak: false,
})
}
#[allow(dead_code)]
pub fn is_weak(&self) -> bool {
self.weak
}
}
impl FromStr for ETag {
type Err = headers::Error;
fn from_str(t: &str) -> Result<Self, Self::Err> {
let (weak, s) = if t.starts_with("W/") {
(true, &t[2..])
} else {
(false, t)
};
if s.starts_with("\"") && s.ends_with("\"") && !s[1..s.len() - 1].contains("\"") {
Ok(ETag {
tag: t.to_owned(),
weak: weak,
})
} else {
Err(invalid())
}
}
}
impl TryFrom<&HeaderValue> for ETag {
type Error = headers::Error;
fn try_from(value: &HeaderValue) -> Result<Self, Self::Error> {
let s = value.to_str().map_err(map_invalid)?;
ETag::from_str(s)
}
}
impl Display for ETag {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "\"{}\"", self.tag)
}
}
impl PartialEq for ETag {
fn eq(&self, other: &Self) -> bool {
!self.weak && !other.weak && self.tag == other.tag
}
}
impl Header for ETag {
fn name() -> &'static HeaderName {
&ETAG
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let value = one(values)?;
ETag::try_from(value)
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
values.extend(std::iter::once(HeaderValue::from_str(&self.tag).unwrap()));
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum IfRange {
ETag(ETag),
Date(headers::Date),
}
impl Header for IfRange {
fn name() -> &'static HeaderName {
&IF_RANGE
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let value = one(values)?;
let mut iter = std::iter::once(value);
if let Ok(tm) = headers::Date::decode(&mut iter) {
return Ok(IfRange::Date(tm));
}
let mut iter = std::iter::once(value);
if let Ok(et) = ETag::decode(&mut iter) {
return Ok(IfRange::ETag(et));
}
Err(invalid())
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
match self {
&IfRange::Date(ref d) => d.encode(values),
&IfRange::ETag(ref t) => t.encode(values),
}
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum ETagList {
Tags(Vec<ETag>),
Star,
}
#[derive(Debug, Clone, PartialEq)]
pub struct IfMatch(pub ETagList);
#[derive(Debug, Clone, PartialEq)]
pub struct IfNoneMatch(pub ETagList);
// Decode a list of etags. This is not entirely correct, we should
// actually use a real parser. E.g. we don't handle comma's in
// etags correctly - but we never generated those anyway.
fn decode_etaglist<'i, I>(values: &mut I) -> Result<ETagList, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let mut v = Vec::new();
let mut count = 0usize;
for value in values {
let s = value.to_str().map_err(map_invalid)?;
if s.trim() == "*" {
return Ok(ETagList::Star);
}
for t in s.split(',') {
// Simply skip misformed etags, they will never match.
if let Ok(t) = ETag::from_str(t.trim()) {
v.push(t);
}
}
count += 1;
}
if count != 0 {
Ok(ETagList::Tags(v))
} else {
Err(invalid())
}
}
fn encode_etaglist<E>(m: &ETagList, values: &mut E)
where E: Extend<HeaderValue> {
let value = match m {
&ETagList::Star => "*".to_string(),
&ETagList::Tags(ref t) => t.iter().map(|t| t.tag.as_str()).collect::<Vec<&str>>().join(", "),
};
values.extend(std::iter::once(HeaderValue::from_str(&value).unwrap()));
}
impl Header for IfMatch {
fn name() -> &'static HeaderName {
&IF_MATCH
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
Ok(IfMatch(decode_etaglist(values)?))
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
encode_etaglist(&self.0, values)
}
}
impl Header for IfNoneMatch {
fn name() -> &'static HeaderName {
&IF_NONE_MATCH
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
Ok(IfNoneMatch(decode_etaglist(values)?))
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
encode_etaglist(&self.0, values)
}
}
#[derive(Debug, Clone, PartialEq)]
pub enum XUpdateRange {
FromTo(u64, u64),
AllFrom(u64),
Last(u64),
Append,
}
impl Header for XUpdateRange {
fn name() -> &'static HeaderName {
&X_UPDATE_RANGE
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
let mut s = one(values)?.to_str().map_err(map_invalid)?;
if s == "append" {
return Ok(XUpdateRange::Append);
}
if !s.starts_with("bytes=") {
return Err(invalid());
}
s = &s[6..];
let nums = s.split("-").collect::<Vec<&str>>();
if nums.len() != 2 {
return Err(invalid());
}
if nums[0] != "" && nums[1] != "" {
return Ok(XUpdateRange::FromTo(
(nums[0]).parse::<u64>().map_err(map_invalid)?,
(nums[1]).parse::<u64>().map_err(map_invalid)?,
));
}
if nums[0] != "" {
return Ok(XUpdateRange::AllFrom(
(nums[0]).parse::<u64>().map_err(map_invalid)?,
));
}
if nums[1] != "" {
return Ok(XUpdateRange::Last((nums[1]).parse::<u64>().map_err(map_invalid)?));
}
return Err(invalid());
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let value = match self {
&XUpdateRange::Append => "append".to_string(),
&XUpdateRange::FromTo(b, e) => format!("{}-{}", b, e),
&XUpdateRange::AllFrom(b) => format!("{}-", b),
&XUpdateRange::Last(e) => format!("-{}", e),
};
values.extend(std::iter::once(HeaderValue::from_str(&value).unwrap()));
}
}
// The "If" header contains IfLists, of which the results are ORed.
#[derive(Debug, Clone, PartialEq)]
pub struct If(pub Vec<IfList>);
// An IfList contains Conditions, of which the results are ANDed.
#[derive(Debug, Clone, PartialEq)]
pub struct IfList {
pub resource_tag: Option<url::Url>,
pub conditions: Vec<IfCondition>,
}
// helpers.
impl IfList {
fn new() -> IfList {
IfList {
resource_tag: None,
conditions: Vec::new(),
}
}
fn add(&mut self, not: bool, item: IfItem) {
self.conditions.push(IfCondition { not, item });
}
}
// Single Condition is [NOT] State-Token | ETag
#[derive(Debug, Clone, PartialEq)]
pub struct IfCondition {
pub not: bool,
pub item: IfItem,
}
#[derive(Debug, Clone, PartialEq)]
pub enum IfItem {
StateToken(String),
ETag(ETag),
}
// Below stuff is for the parser state.
#[derive(Debug, Clone, PartialEq)]
enum IfToken {
ListOpen,
ListClose,
Not,
Word(String),
Pointy(String),
ETag(ETag),
End,
}
#[derive(Debug, Clone, PartialEq)]
enum IfState {
Start,
RTag,
List,
Not,
Bad,
}
// helpers.
fn is_whitespace(c: u8) -> bool {
b" \t\r\n".iter().any(|&x| x == c)
}
fn is_special(c: u8) -> bool {
b"<>()[]".iter().any(|&x| x == c)
}
fn trim_left<'a>(mut out: &'a [u8]) -> &'a [u8] {
while !out.is_empty() && is_whitespace(out[0]) {
out = &out[1..];
}
out
}
// parse one token.
fn scan_until(buf: &[u8], c: u8) -> Result<(&[u8], &[u8]), headers::Error> {
let mut i = 1;
let mut quote = false;
while quote || buf[i] != c {
if buf.is_empty() || is_whitespace(buf[i]) {
return Err(invalid());
}
if buf[i] == b'"' {
quote = !quote;
}
i += 1
}
Ok((&buf[1..i], &buf[i + 1..]))
}
// scan one word.
fn scan_word(buf: &[u8]) -> Result<(&[u8], &[u8]), headers::Error> {
for (i, &c) in buf.iter().enumerate() {
if is_whitespace(c) || is_special(c) || c < 32 {
if i == 0 {
return Err(invalid());
}
return Ok((&buf[..i], &buf[i..]));
}
}
Ok((buf, b""))
}
// get next token.
fn get_token<'a>(buf: &'a [u8]) -> Result<(IfToken, &'a [u8]), headers::Error> {
let buf = trim_left(buf);
if buf.is_empty() {
return Ok((IfToken::End, buf));
}
match buf[0] {
b'(' => Ok((IfToken::ListOpen, &buf[1..])),
b')' => Ok((IfToken::ListClose, &buf[1..])),
b'N' if buf.starts_with(b"Not") => Ok((IfToken::Not, &buf[3..])),
b'<' => {
let (tok, rest) = scan_until(buf, b'>')?;
let s = std::string::String::from_utf8(tok.to_vec()).map_err(map_invalid)?;
Ok((IfToken::Pointy(s), rest))
},
b'[' => {
let (tok, rest) = scan_until(buf, b']')?;
let s = std::str::from_utf8(tok).map_err(map_invalid)?;
Ok((IfToken::ETag(ETag::from_str(s)?), rest))
},
_ => {
let (tok, rest) = scan_word(buf)?;
if tok == b"Not" {
Ok((IfToken::Not, rest))
} else {
let s = std::string::String::from_utf8(tok.to_vec()).map_err(map_invalid)?;
Ok((IfToken::Word(s), rest))
}
},
}
}
impl Header for If {
fn name() -> &'static HeaderName {
&IF
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where I: Iterator<Item = &'i HeaderValue> {
// one big state machine.
let mut if_lists = If(Vec::new());
let mut cur_list = IfList::new();
let mut state = IfState::Start;
let mut input = one(values)?.as_bytes();
loop {
let (tok, rest) = get_token(input)?;
input = rest;
state = match state {
IfState::Start => {
match tok {
IfToken::ListOpen => IfState::List,
IfToken::Pointy(url) => {
let u = url::Url::parse(&url).map_err(map_invalid)?;
cur_list.resource_tag = Some(u);
IfState::RTag
},
IfToken::End => {
if if_lists.0.len() > 0 {
break;
}
IfState::Bad
},
_ => IfState::Bad,
}
},
IfState::RTag => {
match tok {
IfToken::ListOpen => IfState::List,
_ => IfState::Bad,
}
},
IfState::List | IfState::Not => {
let not = state == IfState::Not;
match tok {
IfToken::Not => {
if not {
IfState::Bad
} else {
IfState::Not
}
},
IfToken::Pointy(stok) | IfToken::Word(stok) => {
// as we don't have an URI parser, just
// check if there's at least one ':' in there.
if !stok.contains(":") {
IfState::Bad
} else {
cur_list.add(not, IfItem::StateToken(stok));
IfState::List
}
},
IfToken::ETag(etag) => {
cur_list.add(not, IfItem::ETag(etag));
IfState::List
},
IfToken::ListClose => {
if cur_list.conditions.is_empty() {
IfState::Bad
} else {
if_lists.0.push(cur_list);
cur_list = IfList::new();
IfState::Start
}
},
_ => IfState::Bad,
}
},
IfState::Bad => return Err(invalid()),
};
}
Ok(if_lists)
}
fn encode<E>(&self, values: &mut E)
where E: Extend<HeaderValue> {
let value = "[If header]";
values.extend(std::iter::once(HeaderValue::from_static(value)));
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn if_header() {
// Note that some implementations (golang net/x/webdav) also
// accept a "plain word" as StateToken, instead of only
// a Coded-Url (<...>). We allow that as well, but I have
// no idea if we need to (or should!).
//let val = r#" <http://x.yz/> ([W/"etag"] Not <DAV:nope> )
// (Not<urn:x>[W/"bla"] plain:word:123) "#;
let val = r#" <http://x.yz/> ([W/"etag"] Not <DAV:nope> ) (Not<urn:x>[W/"bla"] plain:word:123) "#;
let hdrval = HeaderValue::from_static(val);
let mut iter = std::iter::once(&hdrval);
let hdr = If::decode(&mut iter);
assert!(hdr.is_ok());
}
#[test]
fn etag_header() {
let t1 = ETag::from_str(r#"W/"12345""#).unwrap();
let t2 = ETag::from_str(r#"W/"12345""#).unwrap();
let t3 = ETag::from_str(r#""12346""#).unwrap();
let t4 = ETag::from_str(r#""12346""#).unwrap();
assert!(t1 != t2);
assert!(t2 != t3);
assert!(t3 == t4);
}
}

427
src/davpath.rs Normal file
View File

@@ -0,0 +1,427 @@
//! Utility module to handle the path part of an URL as a filesytem path.
//!
use std::error::Error;
use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use mime_guess;
use percent_encoding as pct;
use crate::DavError;
/// URL path, with hidden prefix.
#[derive(Clone)]
pub struct DavPath {
fullpath: Vec<u8>,
pfxlen: Option<usize>,
}
/// Reference to DavPath, no prefix.
/// It's what you get when you `Deref` `DavPath`, and returned by `DavPath::with_prefix()`.
pub struct DavPathRef {
fullpath: [u8],
}
#[derive(Copy, Clone, Debug)]
#[allow(non_camel_case_types)]
struct ENCODE_SET;
impl pct::EncodeSet for ENCODE_SET {
// Encode all non-unreserved characters, except '/'.
// See RFC3986, and https://en.wikipedia.org/wiki/Percent-encoding .
#[inline]
fn contains(&self, byte: u8) -> bool {
let unreserved = (byte >= b'A' && byte <= b'Z') ||
(byte >= b'a' && byte <= b'z') ||
(byte >= b'0' && byte <= b'9') ||
byte == b'-' ||
byte == b'_' ||
byte == b'.' ||
byte == b'~';
!unreserved && byte != b'/'
}
}
impl std::fmt::Display for DavPath {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", &self.as_url_string_with_prefix_debug())
}
}
impl std::fmt::Debug for DavPath {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", &self.as_url_string_with_prefix_debug())
}
}
/// Error returned by some of the DavPath methods.
#[derive(Debug)]
pub enum ParseError {
/// cannot parse
InvalidPath,
/// outside of prefix
PrefixMismatch,
/// too many dotdots
ForbiddenPath,
}
impl Error for ParseError {
fn description(&self) -> &str {
"DavPath parse error"
}
fn cause(&self) -> Option<&dyn Error> {
None
}
}
impl std::fmt::Display for ParseError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<ParseError> for DavError {
fn from(e: ParseError) -> Self {
match e {
ParseError::InvalidPath => DavError::InvalidPath,
ParseError::PrefixMismatch => DavError::IllegalPath,
ParseError::ForbiddenPath => DavError::ForbiddenPath,
}
}
}
// a decoded segment can contain any value except '/' or '\0'
fn valid_segment(src: &[u8]) -> Result<(), ParseError> {
let mut p = pct::percent_decode(src);
if p.any(|x| x == 0 || x == b'/') {
return Err(ParseError::InvalidPath);
}
Ok(())
}
// encode path segment with user-defined ENCODE_SET
fn encode_path(src: &[u8]) -> Vec<u8> {
pct::percent_encode(src, ENCODE_SET).to_string().into_bytes()
}
// make path safe:
// - raw path before decoding can contain only printable ascii
// - make sure path is absolute
// - remove query part (everything after ?)
// - merge consecutive slashes
// - process . and ..
// - decode percent encoded bytes, fail on invalid encodings.
// - do not allow NUL or '/' in segments.
fn normalize_path(rp: &[u8]) -> Result<Vec<u8>, ParseError> {
// must consist of printable ASCII
if rp.iter().any(|&x| x < 32 || x > 126) {
Err(ParseError::InvalidPath)?;
}
// don't allow fragments. query part gets deleted.
let mut rawpath = rp;
if let Some(pos) = rawpath.iter().position(|&x| x == b'?' || x == b'#') {
if rawpath[pos] == b'#' {
Err(ParseError::InvalidPath)?;
}
rawpath = &rawpath[..pos];
}
// must start with "/"
if rawpath.is_empty() || rawpath[0] != b'/' {
Err(ParseError::InvalidPath)?;
}
// split up in segments
let isdir = match rawpath.last() {
Some(x) if *x == b'/' => true,
_ => false,
};
let segments = rawpath.split(|c| *c == b'/');
let mut v: Vec<&[u8]> = Vec::new();
for segment in segments {
match segment {
b"." | b"" => {},
b".." => {
if v.len() < 2 {
return Err(ParseError::ForbiddenPath);
}
v.pop();
v.pop();
},
s => {
if let Err(e) = valid_segment(s) {
Err(e)?;
}
v.push(b"/");
v.push(s);
},
}
}
if isdir || v.is_empty() {
v.push(b"/");
}
Ok(v.iter().flat_map(|s| pct::percent_decode(s)).collect())
}
/// Comparision ignores any trailing slash, so /foo == /foo/
impl PartialEq for DavPath {
fn eq(&self, rhs: &DavPath) -> bool {
let mut a = self.fullpath.as_slice();
if a.len() > 1 && a.ends_with(b"/") {
a = &a[..a.len() - 1];
}
let mut b = rhs.fullpath.as_slice();
if b.len() > 1 && b.ends_with(b"/") {
b = &b[..b.len() - 1];
}
a == b
}
}
impl DavPath {
/// from URL encoded path
pub fn new(src: &str) -> Result<DavPath, ParseError> {
let path = normalize_path(src.as_bytes())?;
Ok(DavPath {
fullpath: path.to_vec(),
pfxlen: None,
})
}
/// Set prefix.
pub fn set_prefix(&mut self, prefix: &str) -> Result<(), ParseError> {
let path = &mut self.fullpath;
let prefix = prefix.as_bytes();
if !path.starts_with(prefix) {
return Err(ParseError::PrefixMismatch);
}
let mut pfxlen = prefix.len();
if prefix.ends_with(b"/") {
pfxlen -= 1;
if path[pfxlen] != b'/' {
return Err(ParseError::PrefixMismatch);
}
} else if path.len() == pfxlen {
path.push(b'/');
}
self.pfxlen = Some(pfxlen);
Ok(())
}
/// Return a DavPathRef that refers to the entire URL path with prefix.
pub fn with_prefix(&self) -> &DavPathRef {
DavPathRef::new(&self.fullpath)
}
/// from URL encoded path and non-encoded prefix.
pub(crate) fn from_str_and_prefix(src: &str, prefix: &str) -> Result<DavPath, ParseError> {
let path = normalize_path(src.as_bytes())?;
let mut davpath = DavPath {
fullpath: path.to_vec(),
pfxlen: None,
};
davpath.set_prefix(prefix)?;
Ok(davpath)
}
/// from request.uri
pub(crate) fn from_uri_and_prefix(uri: &http::uri::Uri, prefix: &str) -> Result<Self, ParseError> {
match uri.path() {
"*" => {
Ok(DavPath {
fullpath: b"*".to_vec(),
pfxlen: None,
})
},
path if path.starts_with("/") => DavPath::from_str_and_prefix(path, prefix),
_ => Err(ParseError::InvalidPath),
}
}
/// from request.uri
pub fn from_uri(uri: &http::uri::Uri) -> Result<Self, ParseError> {
Ok(DavPath {
fullpath: uri.path().as_bytes().to_vec(),
pfxlen: None,
})
}
/// add a slash to the end of the path (if not already present).
pub(crate) fn add_slash(&mut self) {
if !self.is_collection() {
self.fullpath.push(b'/');
}
}
// add a slash
pub(crate) fn add_slash_if(&mut self, b: bool) {
if b && !self.is_collection() {
self.fullpath.push(b'/');
}
}
/// Add a segment to the end of the path.
pub(crate) fn push_segment(&mut self, b: &[u8]) {
if !self.is_collection() {
self.fullpath.push(b'/');
}
self.fullpath.extend_from_slice(b);
}
// as URL encoded string, with prefix.
pub(crate) fn as_url_string_with_prefix_debug(&self) -> String {
let mut p = encode_path(self.get_path());
if self.get_prefix().len() > 0 {
let mut u = encode_path(self.get_prefix());
u.extend_from_slice(b"[");
u.extend_from_slice(&p);
u.extend_from_slice(b"]");
p = u;
}
std::string::String::from_utf8(p).unwrap()
}
// Return the prefix.
fn get_prefix(&self) -> &[u8] {
&self.fullpath[..self.pfxlen.unwrap_or(0)]
}
/// return the URL prefix.
pub fn prefix(&self) -> &str {
std::str::from_utf8(self.get_prefix()).unwrap()
}
/// Return the parent directory.
pub(crate) fn parent(&self) -> DavPath {
let mut segs = self
.fullpath
.split(|&c| c == b'/')
.filter(|e| e.len() > 0)
.collect::<Vec<&[u8]>>();
segs.pop();
if segs.len() > 0 {
segs.push(b"");
}
segs.insert(0, b"");
DavPath {
pfxlen: self.pfxlen,
fullpath: segs.join(&b'/').to_vec(),
}
}
}
impl std::ops::Deref for DavPath {
type Target = DavPathRef;
fn deref(&self) -> &DavPathRef {
let pfxlen = self.pfxlen.unwrap_or(0);
DavPathRef::new(&self.fullpath[pfxlen..])
}
}
impl DavPathRef {
// NOTE: this is safe, it is what libstd does in std::path::Path::new(), see
// https://github.com/rust-lang/rust/blob/6700e186883a83008963d1fdba23eff2b1713e56/src/libstd/path.rs#L1788
fn new(path: &[u8]) -> &DavPathRef {
unsafe { &*(path as *const [u8] as *const DavPathRef) }
}
/// as raw bytes, not encoded, no prefix.
pub fn as_bytes(&self) -> &[u8] {
self.get_path()
}
/// as OS specific Path. never ends in "/".
pub fn as_pathbuf(&self) -> PathBuf {
let mut b = self.get_path();
if b.len() > 1 && b.ends_with(b"/") {
b = &b[..b.len() - 1];
}
let os_string = OsStr::from_bytes(b).to_owned();
PathBuf::from(os_string)
}
/// as URL encoded string, with prefix.
pub fn as_url_string(&self) -> String {
let p = encode_path(self.get_path());
std::string::String::from_utf8(p).unwrap()
}
/// is this a collection i.e. does the original URL path end in "/".
pub fn is_collection(&self) -> bool {
self.get_path().ends_with(b"/")
}
// non-public functions
//
// Return the path.
fn get_path(&self) -> &[u8] {
&self.fullpath
}
// is this a "star" request (only used with OPTIONS)
pub(crate) fn is_star(&self) -> bool {
self.get_path() == b"*"
}
/// as OS specific Path, relative (remove first slash)
///
/// Used to `push()` onto a pathbuf.
pub fn as_rel_ospath(&self) -> &Path {
let spath = self.get_path();
let mut path = if spath.len() > 0 { &spath[1..] } else { spath };
if path.ends_with(b"/") {
path = &path[..path.len() - 1];
}
let os_string = OsStr::from_bytes(path);
Path::new(os_string)
}
// get parent.
#[allow(dead_code)]
pub(crate) fn parent(&self) -> &DavPathRef {
let path = self.get_path();
let mut end = path.len();
while end > 0 {
end -= 1;
if path[end] == b'/' {
if end == 0 {
end = 1;
}
break;
}
}
DavPathRef::new(&path[..end])
}
/// The filename is the last segment of the path. Can be empty.
pub(crate) fn file_name(&self) -> &[u8] {
let segs = self
.get_path()
.split(|&c| c == b'/')
.filter(|e| e.len() > 0)
.collect::<Vec<&[u8]>>();
if segs.len() > 0 {
segs[segs.len() - 1]
} else {
b""
}
}
pub(crate) fn get_mime_type_str(&self) -> &'static str {
let name = self.file_name();
let d = name.rsplitn(2, |&c| c == b'.').collect::<Vec<&[u8]>>();
if d.len() > 1 {
if let Ok(ext) = std::str::from_utf8(d[0]) {
if let Some(t) = mime_guess::from_ext(ext).first_raw() {
return t;
}
}
}
"application/octet-stream"
}
}

191
src/errors.rs Normal file
View File

@@ -0,0 +1,191 @@
use std::error::Error;
use std::io::{self, ErrorKind};
use http::StatusCode;
use xml;
use crate::fs::FsError;
pub(crate) type DavResult<T> = Result<T, DavError>;
#[derive(Debug)]
pub(crate) enum DavError {
XmlReadError, // error reading/parsing xml
XmlParseError, // error interpreting xml
InvalidPath, // error parsing path
IllegalPath, // path not valid here
ForbiddenPath, // too many dotdots
UnknownDavMethod,
ChanError,
Utf8Error,
Status(StatusCode),
StatusClose(StatusCode),
FsError(FsError),
IoError(io::Error),
XmlReaderError(xml::reader::Error),
XmlWriterError(xml::writer::Error),
}
impl Error for DavError {
fn description(&self) -> &str {
"DAV error"
}
fn cause(&self) -> Option<&dyn Error> {
match self {
&DavError::FsError(ref e) => Some(e),
&DavError::IoError(ref e) => Some(e),
&DavError::XmlReaderError(ref e) => Some(e),
&DavError::XmlWriterError(ref e) => Some(e),
_ => None,
}
}
}
impl std::fmt::Display for DavError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
&DavError::XmlReaderError(_) => write!(f, "XML parse error"),
&DavError::XmlWriterError(_) => write!(f, "XML generate error"),
&DavError::IoError(_) => write!(f, "I/O error"),
_ => write!(f, "{:?}", self),
}
}
}
impl From<FsError> for DavError {
fn from(e: FsError) -> Self {
DavError::FsError(e)
}
}
impl From<DavError> for io::Error {
fn from(e: DavError) -> Self {
match e {
DavError::IoError(e) => e,
DavError::FsError(e) => e.into(),
_ => io::Error::new(io::ErrorKind::Other, e),
}
}
}
impl From<FsError> for io::Error {
fn from(e: FsError) -> Self {
fserror_to_ioerror(e)
}
}
impl From<io::Error> for DavError {
fn from(e: io::Error) -> Self {
DavError::IoError(e)
}
}
impl From<StatusCode> for DavError {
fn from(e: StatusCode) -> Self {
DavError::Status(e)
}
}
impl From<xml::reader::Error> for DavError {
fn from(e: xml::reader::Error) -> Self {
DavError::XmlReaderError(e)
}
}
impl From<xml::writer::Error> for DavError {
fn from(e: xml::writer::Error) -> Self {
DavError::XmlWriterError(e)
}
}
impl From<std::str::Utf8Error> for DavError {
fn from(_: std::str::Utf8Error) -> Self {
DavError::Utf8Error
}
}
impl From<std::string::FromUtf8Error> for DavError {
fn from(_: std::string::FromUtf8Error) -> Self {
DavError::Utf8Error
}
}
impl From<futures::channel::mpsc::SendError> for DavError {
fn from(_e: futures::channel::mpsc::SendError) -> Self {
DavError::ChanError
}
}
fn fserror_to_ioerror(e: FsError) -> io::Error {
match e {
FsError::NotImplemented => io::Error::new(io::ErrorKind::Other, "NotImplemented"),
FsError::GeneralFailure => io::Error::new(io::ErrorKind::Other, "GeneralFailure"),
FsError::Exists => io::Error::new(io::ErrorKind::AlreadyExists, "Exists"),
FsError::NotFound => io::Error::new(io::ErrorKind::NotFound, "Notfound"),
FsError::Forbidden => io::Error::new(io::ErrorKind::PermissionDenied, "Forbidden"),
FsError::InsufficientStorage => io::Error::new(io::ErrorKind::Other, "InsufficientStorage"),
FsError::LoopDetected => io::Error::new(io::ErrorKind::Other, "LoopDetected"),
FsError::PathTooLong => io::Error::new(io::ErrorKind::Other, "PathTooLong"),
FsError::TooLarge => io::Error::new(io::ErrorKind::Other, "TooLarge"),
FsError::IsRemote => io::Error::new(io::ErrorKind::Other, "IsRemote"),
}
}
fn ioerror_to_status(ioerror: &io::Error) -> StatusCode {
match ioerror.kind() {
ErrorKind::NotFound => StatusCode::NOT_FOUND,
ErrorKind::PermissionDenied => StatusCode::FORBIDDEN,
ErrorKind::AlreadyExists => StatusCode::CONFLICT,
ErrorKind::TimedOut => StatusCode::GATEWAY_TIMEOUT,
_ => StatusCode::BAD_GATEWAY,
}
}
fn fserror_to_status(e: &FsError) -> StatusCode {
match e {
FsError::NotImplemented => StatusCode::NOT_IMPLEMENTED,
FsError::GeneralFailure => StatusCode::INTERNAL_SERVER_ERROR,
FsError::Exists => StatusCode::METHOD_NOT_ALLOWED,
FsError::NotFound => StatusCode::NOT_FOUND,
FsError::Forbidden => StatusCode::FORBIDDEN,
FsError::InsufficientStorage => StatusCode::INSUFFICIENT_STORAGE,
FsError::LoopDetected => StatusCode::LOOP_DETECTED,
FsError::PathTooLong => StatusCode::URI_TOO_LONG,
FsError::TooLarge => StatusCode::PAYLOAD_TOO_LARGE,
FsError::IsRemote => StatusCode::BAD_GATEWAY,
}
}
impl DavError {
pub(crate) fn statuscode(&self) -> StatusCode {
match self {
&DavError::XmlReadError => StatusCode::BAD_REQUEST,
&DavError::XmlParseError => StatusCode::BAD_REQUEST,
&DavError::InvalidPath => StatusCode::BAD_REQUEST,
&DavError::IllegalPath => StatusCode::BAD_GATEWAY,
&DavError::ForbiddenPath => StatusCode::FORBIDDEN,
&DavError::UnknownDavMethod => StatusCode::NOT_IMPLEMENTED,
&DavError::ChanError => StatusCode::INTERNAL_SERVER_ERROR,
&DavError::Utf8Error => StatusCode::UNSUPPORTED_MEDIA_TYPE,
&DavError::IoError(ref e) => ioerror_to_status(e),
&DavError::FsError(ref e) => fserror_to_status(e),
&DavError::Status(e) => e,
&DavError::StatusClose(e) => e,
&DavError::XmlReaderError(ref _e) => StatusCode::BAD_REQUEST,
&DavError::XmlWriterError(ref _e) => StatusCode::INTERNAL_SERVER_ERROR,
}
}
pub(crate) fn must_close(&self) -> bool {
match self {
// non-fatal, keep the connnection open.
&DavError::Status(_) |
&DavError::FsError(FsError::NotFound) |
&DavError::FsError(FsError::Forbidden) |
&DavError::FsError(FsError::Exists) => false,
// close the connection to be sure.
_ => true,
}
}
}

124
src/fakels.rs Normal file
View File

@@ -0,0 +1,124 @@
//! Fake locksystem (to make Windows/macOS work).
//!
//! Several Webdav clients, like the ones on Windows and macOS, require just
//! basic functionality to mount the Webdav server in read-only mode. However
//! to be able to mount the Webdav server in read-write mode, they require the
//! Webdav server to have Webdav class 2 compliance - that means, LOCK/UNLOCK
//! support.
//!
//! In many cases, this is not actually important. A lot of the current Webdav
//! server implementations that are used to serve a filesystem just fake it:
//! LOCK/UNLOCK always succeed, checking for locktokens in
//! If: headers always succeeds, and nothing is every really locked.
//!
//! `FakeLs` implements such a fake locksystem.
use std::time::{Duration, SystemTime};
use uuid::Uuid;
use xmltree::Element;
use crate::davpath::DavPath;
use crate::ls::*;
/// Fake locksystem implementation.
#[derive(Debug, Clone)]
pub struct FakeLs {}
impl FakeLs {
/// Create a new "fakels" locksystem.
pub fn new() -> Box<FakeLs> {
Box::new(FakeLs {})
}
}
fn tm_limit(d: Option<Duration>) -> Duration {
match d {
None => Duration::new(120, 0),
Some(d) => {
if d.as_secs() > 120 {
Duration::new(120, 0)
} else {
d
}
},
}
}
impl DavLockSystem for FakeLs {
fn lock(
&self,
path: &DavPath,
principal: Option<&str>,
owner: Option<&Element>,
timeout: Option<Duration>,
shared: bool,
deep: bool,
) -> Result<DavLock, DavLock>
{
let timeout = tm_limit(timeout);
let timeout_at = SystemTime::now() + timeout;
let d = if deep { 'I' } else { '0' };
let s = if shared { 'S' } else { 'E' };
let token = format!("opaquetoken:{}/{}/{}", Uuid::new_v4().to_hyphenated(), d, s);
let lock = DavLock {
token: token,
path: path.clone(),
principal: principal.map(|s| s.to_string()),
owner: owner.cloned(),
timeout_at: Some(timeout_at),
timeout: Some(timeout),
shared: shared,
deep: deep,
};
debug!("lock {} created", &lock.token);
Ok(lock)
}
fn unlock(&self, _path: &DavPath, _token: &str) -> Result<(), ()> {
Ok(())
}
fn refresh(&self, path: &DavPath, token: &str, timeout: Option<Duration>) -> Result<DavLock, ()> {
debug!("refresh lock {}", token);
let v: Vec<&str> = token.split('/').collect();
let deep = v.len() > 1 && v[1] == "I";
let shared = v.len() > 2 && v[2] == "S";
let timeout = tm_limit(timeout);
let timeout_at = SystemTime::now() + timeout;
let lock = DavLock {
token: token.to_string(),
path: path.clone(),
principal: None,
owner: None,
timeout_at: Some(timeout_at),
timeout: Some(timeout),
shared: shared,
deep: deep,
};
Ok(lock)
}
fn check(
&self,
_path: &DavPath,
_principal: Option<&str>,
_ignore_principal: bool,
_deep: bool,
_submitted_tokens: Vec<&str>,
) -> Result<(), DavLock>
{
Ok(())
}
fn discover(&self, _path: &DavPath) -> Vec<DavLock> {
Vec::new()
}
fn delete(&self, _path: &DavPath) -> Result<(), ()> {
Ok(())
}
}

441
src/fs.rs Normal file
View File

@@ -0,0 +1,441 @@
//! Contains the structs and traits that define a filesystem backend.
//!
//! You only need this if you are going to implement your own
//! filesystem backend. Otherwise, just use 'LocalFs' or 'MemFs'.
//!
use std::fmt::Debug;
use std::io::SeekFrom;
use std::pin::Pin;
use std::time::{SystemTime, UNIX_EPOCH};
use futures::{future, Future, Stream, TryFutureExt};
use http::StatusCode;
use crate::davpath::DavPath;
macro_rules! notimplemented {
($method:expr) => {
Err(FsError::NotImplemented)
};
}
macro_rules! notimplemented_fut {
($method:expr) => {
Box::pin(future::ready(Err(FsError::NotImplemented)))
};
}
/// Errors generated by a filesystem implementation.
///
/// These are more result-codes than errors, really.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum FsError {
/// Operation not implemented (501)
NotImplemented,
/// Something went wrong (500)
GeneralFailure,
/// tried to create something, but it existed (405 / 412) (yes, 405. RFC4918 says so)
Exists,
/// File / Directory not found (404)
NotFound,
/// Not allowed (403)
Forbidden,
/// Out of space (507)
InsufficientStorage,
/// Symbolic link loop detected (ELOOP) (508)
LoopDetected,
/// The path is too long (ENAMETOOLONG) (414)
PathTooLong,
/// The file being PUT is too large (413)
TooLarge,
/// Trying to MOVE over a mount boundary (EXDEV) (502)
IsRemote,
}
/// The Result type.
pub type FsResult<T> = std::result::Result<T, FsError>;
/// A webdav property.
#[derive(Debug, Clone)]
pub struct DavProp {
/// Name of the property.
pub name: String,
/// XML prefix.
pub prefix: Option<String>,
/// XML namespace.
pub namespace: Option<String>,
/// Value of the property as raw XML.
pub xml: Option<Vec<u8>>,
}
/// Future returned by almost all of the DavFileSystem methods.
pub type FsFuture<'a, T> = Pin<Box<dyn Future<Output = FsResult<T>> + Send + 'a>>;
/// Convenience alias for a boxed Stream.
pub type FsStream<T> = Pin<Box<dyn Stream<Item = T> + Send>>;
/// Used as argument to the read_dir() method.
/// It is:
///
/// - an optimization hint (the implementation may call metadata() and
/// store the result in the returned directory entry)
/// - a way to get metadata instead of symlink_metadata from
/// the directory entry.
///
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReadDirMeta {
/// DavDirEntry.metadata() behaves as metadata()
Data,
/// DavDirEntry.metadata() behaves as symlink_metadata()
DataSymlink,
/// No optimizations, otherwise like DataSymlink.
None,
}
/// The trait that defines a filesystem.
pub trait DavFileSystem: Sync + Send + BoxCloneFs {
/// Open a file.
fn open<'a>(&'a self, path: &'a DavPath, options: OpenOptions) -> FsFuture<Box<dyn DavFile>>;
/// Perform read_dir.
fn read_dir<'a>(
&'a self,
path: &'a DavPath,
meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>>;
/// Return the metadata of a file or directory.
fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>>;
/// Return the metadata of a file, directory or symbolic link.
///
/// Differs from metadata() that if the path is a symbolic link,
/// it return the metadata for the link itself, not for the thing
/// it points to.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn symlink_metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
self.metadata(path)
}
/// Create a directory.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn create_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
notimplemented_fut!("create_dir")
}
/// Remove a directory.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn remove_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
notimplemented_fut!("remove_dir")
}
/// Remove a file.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn remove_file<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
notimplemented_fut!("remove_file")
}
/// Rename a file or directory.
///
/// Source and destination must be the same type (file/dir).
/// If the destination already exists and is a file, it
/// should be replaced. If it is a directory it should give
/// an error.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn rename<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
notimplemented_fut!("rename")
}
/// Copy a file
///
/// Should also copy the DAV properties, if properties
/// are implemented.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn copy<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
notimplemented_fut!("copy")
}
/// Set the access time of a file / directory.
///
/// The default implementation returns FsError::NotImplemented.
#[doc(hidden)]
#[allow(unused_variables)]
fn set_accessed<'a>(&'a self, path: &'a DavPath, tm: SystemTime) -> FsFuture<()> {
notimplemented_fut!("set_accessed")
}
/// Set the modified time of a file / directory.
///
/// The default implementation returns FsError::NotImplemented.
#[doc(hidden)]
#[allow(unused_variables)]
fn set_modified<'a>(&'a self, path: &'a DavPath, tm: SystemTime) -> FsFuture<()> {
notimplemented_fut!("set_mofified")
}
/// Indicator that tells if this filesystem driver supports DAV properties.
///
/// The default implementation returns `false`.
#[allow(unused_variables)]
fn have_props<'a>(&'a self, path: &'a DavPath) -> Pin<Box<dyn Future<Output = bool> + Send + 'a>> {
Box::pin(future::ready(false))
}
/// Patch the DAV properties of a node (add/remove props)
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn patch_props<'a>(
&'a self,
path: &'a DavPath,
patch: Vec<(bool, DavProp)>,
) -> FsFuture<Vec<(StatusCode, DavProp)>>
{
notimplemented_fut!("patch_props")
}
/// List/get the DAV properties of a node.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn get_props<'a>(&'a self, path: &'a DavPath, do_content: bool) -> FsFuture<Vec<DavProp>> {
notimplemented_fut!("get_props")
}
/// Get one specific named property of a node.
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn get_prop<'a>(&'a self, path: &'a DavPath, prop: DavProp) -> FsFuture<Vec<u8>> {
notimplemented_fut!("get_prop`")
}
/// Get quota of this filesystem (used/total space).
///
/// The first value returned is the amount of space used,
/// the second optional value is the total amount of space
/// (used + available).
///
/// The default implementation returns FsError::NotImplemented.
#[allow(unused_variables)]
fn get_quota<'a>(&'a self) -> FsFuture<(u64, Option<u64>)> {
notimplemented_fut!("get_quota`")
}
}
// BoxClone trait.
#[doc(hidden)]
pub trait BoxCloneFs {
fn box_clone(&self) -> Box<dyn DavFileSystem>;
}
// generic Clone, calls implementation-specific box_clone().
impl Clone for Box<dyn DavFileSystem> {
fn clone(&self) -> Box<dyn DavFileSystem> {
self.box_clone()
}
}
// implementation-specific clone.
#[doc(hidden)]
impl<FS: Clone + DavFileSystem + 'static> BoxCloneFs for FS {
fn box_clone(&self) -> Box<dyn DavFileSystem> {
Box::new((*self).clone())
}
}
/// One directory entry (or child node).
pub trait DavDirEntry: Send + Sync {
/// Name of the entry.
fn name(&self) -> Vec<u8>;
/// Metadata of the entry.
fn metadata<'a>(&'a self) -> FsFuture<Box<dyn DavMetaData>>;
/// Default implementation of `is_dir` just returns `metadata()?.is_dir()`.
/// Implementations can override this if their `metadata()` method is
/// expensive and there is a cheaper way to provide the same info
/// (e.g. dirent.d_type in unix filesystems).
fn is_dir<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.metadata().and_then(|meta| future::ok(meta.is_dir())))
}
/// Likewise. Default: `!is_dir()`.
fn is_file<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.metadata().and_then(|meta| future::ok(meta.is_file())))
}
/// Likewise. Default: `false`.
fn is_symlink<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.metadata().and_then(|meta| future::ok(meta.is_symlink())))
}
}
/// A `DavFile` is the equivalent of `std::fs::File`, should be
/// readable/writeable/seekable, and be able to return its metadata.
pub trait DavFile: Debug + Send + Sync {
fn metadata<'a>(&'a mut self) -> FsFuture<Box<dyn DavMetaData>>;
fn write_buf<'a>(&'a mut self, buf: Box<dyn bytes::Buf + Send>) -> FsFuture<()>;
fn write_bytes<'a>(&'a mut self, buf: bytes::Bytes) -> FsFuture<()>;
fn read_bytes<'a>(&'a mut self, count: usize) -> FsFuture<bytes::Bytes>;
fn seek<'a>(&'a mut self, pos: SeekFrom) -> FsFuture<u64>;
fn flush<'a>(&'a mut self) -> FsFuture<()>;
}
/// File metadata. Basically type, length, and some timestamps.
pub trait DavMetaData: Debug + BoxCloneMd + Send + Sync {
/// Size of the file.
fn len(&self) -> u64;
/// `Modified` timestamp.
fn modified(&self) -> FsResult<SystemTime>;
/// File or directory (aka collection).
fn is_dir(&self) -> bool;
/// Simplistic implementation of `etag()`
///
/// Returns a simple etag that basically is `\<length\>-\<timestamp_in_ms\>`
/// with the numbers in hex. Enough for most implementations.
fn etag(&self) -> Option<String> {
if let Ok(t) = self.modified() {
if let Ok(t) = t.duration_since(UNIX_EPOCH) {
let t = t.as_secs() * 1000000 + t.subsec_nanos() as u64 / 1000;
let tag = if self.is_file() && self.len() > 0 {
format!("{:x}-{:x}", self.len(), t)
} else {
format!("{:x}", t)
};
return Some(tag);
}
}
None
}
/// Is this a file and not a directory. Default: `!s_dir()`.
fn is_file(&self) -> bool {
!self.is_dir()
}
/// Is this a symbolic link. Default: false.
fn is_symlink(&self) -> bool {
false
}
/// Last access time. Default: `FsError::NotImplemented`.
fn accessed(&self) -> FsResult<SystemTime> {
notimplemented!("access time")
}
/// Creation time. Default: `FsError::NotImplemented`.
fn created(&self) -> FsResult<SystemTime> {
notimplemented!("creation time")
}
/// Inode change time (ctime). Default: `FsError::NotImplemented`.
fn status_changed(&self) -> FsResult<SystemTime> {
notimplemented!("status change time")
}
/// Is file executable (unix: has "x" mode bit). Default: `FsError::NotImplemented`.
fn executable(&self) -> FsResult<bool> {
notimplemented!("executable")
}
}
// generic Clone, calls implementation-specific box_clone().
impl Clone for Box<dyn DavMetaData> {
fn clone(&self) -> Box<dyn DavMetaData> {
self.box_clone()
}
}
// BoxCloneMd trait.
#[doc(hidden)]
pub trait BoxCloneMd {
fn box_clone(&self) -> Box<dyn DavMetaData>;
}
// implementation-specific clone.
#[doc(hidden)]
impl<MD: Clone + DavMetaData + 'static> BoxCloneMd for MD {
fn box_clone(&self) -> Box<dyn DavMetaData> {
Box::new((*self).clone())
}
}
/// OpenOptions for `open()`.
#[derive(Debug, Clone, Copy, Default)]
pub struct OpenOptions {
/// open for reading
pub read: bool,
/// open for writing
pub write: bool,
/// open in write-append mode
pub append: bool,
/// truncate file first when writing
pub truncate: bool,
/// create file if it doesn't exist
pub create: bool,
/// must create new file, fail if it already exists.
pub create_new: bool,
}
impl OpenOptions {
#[allow(dead_code)]
pub(crate) fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
truncate: false,
create: false,
create_new: false,
}
}
pub(crate) fn read() -> OpenOptions {
OpenOptions {
read: true,
write: false,
append: false,
truncate: false,
create: false,
create_new: false,
}
}
pub(crate) fn write() -> OpenOptions {
OpenOptions {
read: false,
write: true,
append: false,
truncate: false,
create: false,
create_new: false,
}
}
}
impl std::error::Error for FsError {
fn description(&self) -> &str {
"DavFileSystem error"
}
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
}
impl std::fmt::Display for FsError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{:?}", self)
}
}

290
src/handle_copymove.rs Normal file
View File

@@ -0,0 +1,290 @@
use futures::{future::BoxFuture, FutureExt, StreamExt};
use headers::HeaderMapExt;
use http::{Request, Response, StatusCode};
use crate::async_stream::AsyncStream;
use crate::body::Body;
use crate::conditional::*;
use crate::davheaders::{self, Depth};
use crate::davpath::DavPath;
use crate::errors::*;
use crate::fs::*;
use crate::multierror::{multi_error, MultiError};
use crate::{util::DavMethod, DavResult};
// map_err helper.
async fn add_status<'a>(
m_err: &'a mut MultiError,
path: &'a DavPath,
e: impl Into<DavError> + 'static,
) -> DavResult<()>
{
let daverror = e.into();
if let Err(x) = m_err.add_status(path, daverror.statuscode()).await {
return Err(x.into());
}
Err(daverror)
}
impl crate::DavInner {
pub(crate) fn do_copy<'a>(
&'a self,
source: &'a DavPath,
topdest: &'a DavPath,
dest: &'a DavPath,
depth: Depth,
mut multierror: &'a mut MultiError,
) -> BoxFuture<'a, DavResult<()>>
{
async move {
// when doing "COPY /a/b /a/b/c make sure we don't recursively
// copy /a/b/c/ into /a/b/c.
if source == topdest {
return Ok(());
}
// source must exist.
let meta = match self.fs.metadata(source).await {
Err(e) => return add_status(&mut multierror, source, e).await,
Ok(m) => m,
};
// if it's a file we can overwrite it.
if !meta.is_dir() {
return match self.fs.copy(source, dest).await {
Ok(_) => Ok(()),
Err(e) => {
debug!("do_copy: self.fs.copy error: {:?}", e);
add_status(&mut multierror, source, e).await
},
};
}
// Copying a directory onto an existing directory with Depth 0
// is not an error. It means "only copy properties" (which
// we do not do yet).
if let Err(e) = self.fs.create_dir(dest).await {
if depth != Depth::Zero || e != FsError::Exists {
debug!("do_copy: self.fs.create_dir({}) error: {:?}", dest, e);
return add_status(&mut multierror, dest, e).await;
}
}
// only recurse when Depth > 0.
if depth == Depth::Zero {
return Ok(());
}
let mut entries = match self.fs.read_dir(source, ReadDirMeta::DataSymlink).await {
Ok(entries) => entries,
Err(e) => {
debug!("do_copy: self.fs.read_dir error: {:?}", e);
return add_status(&mut multierror, source, e).await;
},
};
// If we encounter errors, just print them, and keep going.
// Last seen error is returned from function.
let mut retval = Ok::<_, DavError>(());
while let Some(dirent) = entries.next().await {
// NOTE: dirent.metadata() behaves like symlink_metadata()
let meta = match dirent.metadata().await {
Ok(meta) => meta,
Err(e) => return add_status(&mut multierror, source, e).await,
};
let name = dirent.name();
let mut nsrc = source.clone();
let mut ndest = dest.clone();
nsrc.push_segment(&name);
ndest.push_segment(&name);
if meta.is_dir() {
nsrc.add_slash();
ndest.add_slash();
}
// recurse.
if let Err(e) = self.do_copy(&nsrc, topdest, &ndest, depth, multierror).await {
retval = Err(e);
}
}
retval
}
.boxed()
}
// Right now we handle MOVE with a simple RENAME. RFC4918 #9.9.2 talks
// about "partially failed moves", which means that we might have to
// try to move directories with increasing granularity to move as much
// as possible instead of all-or-nothing.
//
// Note that this might not be optional, as the RFC says:
//
// "Any headers included with MOVE MUST be applied in processing every
// resource to be moved with the exception of the Destination header."
//
// .. so for perfect compliance we might have to process all resources
// one-by-one anyway. But seriously, who cares.
//
pub(crate) async fn do_move<'a>(
&'a self,
source: &'a DavPath,
dest: &'a DavPath,
mut multierror: &'a mut MultiError,
) -> DavResult<()>
{
if let Err(e) = self.fs.rename(source, dest).await {
add_status(&mut multierror, &source, e).await
} else {
Ok(())
}
}
pub(crate) async fn handle_copymove(
self,
req: &Request<()>,
method: DavMethod,
) -> DavResult<Response<Body>>
{
// get and check headers.
let overwrite = req
.headers()
.typed_get::<davheaders::Overwrite>()
.map_or(true, |o| o.0);
let depth = match req.headers().typed_get::<Depth>() {
Some(Depth::Infinity) | None => Depth::Infinity,
Some(Depth::Zero) if method == DavMethod::Copy => Depth::Zero,
_ => return Err(StatusCode::BAD_REQUEST.into()),
};
// decode and validate destination.
let dest = match req.headers().typed_get::<davheaders::Destination>() {
Some(dest) => DavPath::from_str_and_prefix(&dest.0, &self.prefix)?,
None => return Err(StatusCode::BAD_REQUEST.into()),
};
// for MOVE, tread with care- if the path ends in "/" but it actually
// is a symlink, we want to move the symlink, not what it points to.
let mut path = self.path(&req);
let meta = if method == DavMethod::Move {
let meta = self.fs.symlink_metadata(&path).await?;
if meta.is_symlink() {
let m2 = self.fs.metadata(&path).await?;
path.add_slash_if(m2.is_dir());
}
meta
} else {
self.fs.metadata(&path).await?
};
path.add_slash_if(meta.is_dir());
// parent of the destination must exist.
if !self.has_parent(&dest).await {
return Err(StatusCode::CONFLICT.into());
}
// for the destination, also check if it's a symlink. If we are going
// to remove it first, we want to remove the link, not what it points to.
let (dest_is_file, dmeta) = match self.fs.symlink_metadata(&dest).await {
Ok(meta) => {
let mut is_file = false;
if meta.is_symlink() {
if let Ok(m) = self.fs.metadata(&dest).await {
is_file = m.is_file();
}
}
if meta.is_file() {
is_file = true;
}
(is_file, Ok(meta))
},
Err(e) => (false, Err(e)),
};
// check if overwrite is "F"
let exists = dmeta.is_ok();
if !overwrite && exists {
return Err(StatusCode::PRECONDITION_FAILED.into());
}
// check if source == dest
if path == dest {
return Err(StatusCode::FORBIDDEN.into());
}
// check If and If-* headers for source URL
let tokens = match if_match_get_tokens(&req, Some(&meta), &self.fs, &self.ls, &path).await {
Ok(t) => t,
Err(s) => return Err(s.into()),
};
// check locks. since we cancel the entire operation if there is
// a conflicting lock, we do not return a 207 multistatus, but
// just a simple status.
if let Some(ref locksystem) = self.ls {
let t = tokens.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
let principal = self.principal.as_ref().map(|s| s.as_str());
if method == DavMethod::Move {
// for MOVE check if source path is locked
if let Err(_l) = locksystem.check(&path, principal, false, true, t.clone()) {
return Err(StatusCode::LOCKED.into());
}
}
// for MOVE and COPY check if destination is locked
if let Err(_l) = locksystem.check(&dest, principal, false, true, t) {
return Err(StatusCode::LOCKED.into());
}
}
let req_path = path.clone();
let items = AsyncStream::new(|tx| {
async move {
let mut multierror = MultiError::new(tx);
// see if we need to delete the destination first.
if overwrite && exists && depth != Depth::Zero && !dest_is_file {
trace!("handle_copymove: deleting destination {}", dest);
if let Err(_) = self
.delete_items(&mut multierror, Depth::Infinity, dmeta.unwrap(), &dest)
.await
{
return Ok(());
}
// should really do this per item, in case the delete partially fails. See TODO.md
if let Some(ref locksystem) = self.ls {
let _ = locksystem.delete(&dest);
}
}
// COPY or MOVE.
if method == DavMethod::Copy {
if let Ok(_) = self.do_copy(&path, &dest, &dest, depth, &mut multierror).await {
let s = if exists {
StatusCode::NO_CONTENT
} else {
StatusCode::CREATED
};
let _ = multierror.add_status(&path, s).await;
}
} else {
// move and if successful, remove locks at old location.
if let Ok(_) = self.do_move(&path, &dest, &mut multierror).await {
if let Some(ref locksystem) = self.ls {
locksystem.delete(&path).ok();
}
let s = if exists {
StatusCode::NO_CONTENT
} else {
StatusCode::CREATED
};
let _ = multierror.add_status(&path, s).await;
}
}
Ok::<_, DavError>(())
}
});
multi_error(req_path, items).await
}
}

168
src/handle_delete.rs Normal file
View File

@@ -0,0 +1,168 @@
use futures::{future::BoxFuture, FutureExt, StreamExt};
use headers::HeaderMapExt;
use http::{Request, Response, StatusCode};
use crate::async_stream::AsyncStream;
use crate::body::Body;
use crate::conditional::if_match_get_tokens;
use crate::davheaders::Depth;
use crate::davpath::DavPath;
use crate::errors::*;
use crate::fs::*;
use crate::multierror::{multi_error, MultiError};
use crate::DavResult;
// map_err helper.
async fn add_status<'a>(m_err: &'a mut MultiError, path: &'a DavPath, e: FsError) -> DavError {
let status = DavError::FsError(e).statuscode();
if let Err(x) = m_err.add_status(path, status).await {
return x.into();
}
DavError::Status(status)
}
// map_err helper for directories, the result statuscode
// mappings are not 100% the same.
async fn dir_status<'a>(res: &'a mut MultiError, path: &'a DavPath, e: FsError) -> DavError {
let status = match e {
FsError::Exists => StatusCode::CONFLICT,
e => DavError::FsError(e).statuscode(),
};
if let Err(x) = res.add_status(path, status).await {
return x.into();
}
DavError::Status(status)
}
impl crate::DavInner {
pub(crate) fn delete_items<'a>(
&'a self,
mut res: &'a mut MultiError,
depth: Depth,
meta: Box<dyn DavMetaData + 'a>,
path: &'a DavPath,
) -> BoxFuture<'a, DavResult<()>>
{
async move {
if !meta.is_dir() {
trace!("delete_items (file) {} {:?}", path, depth);
return match self.fs.remove_file(path).await {
Ok(x) => Ok(x),
Err(e) => Err(add_status(&mut res, path, e).await),
};
}
if depth == Depth::Zero {
trace!("delete_items (dir) {} {:?}", path, depth);
return match self.fs.remove_dir(path).await {
Ok(x) => Ok(x),
Err(e) => Err(add_status(&mut res, path, e).await),
};
}
// walk over all entries.
let mut entries = match self.fs.read_dir(path, ReadDirMeta::DataSymlink).await {
Ok(x) => Ok(x),
Err(e) => Err(add_status(&mut res, path, e).await),
}?;
let mut result = Ok(());
while let Some(dirent) = entries.next().await {
// if metadata() fails, skip to next entry.
// NOTE: dirent.metadata == symlink_metadata (!)
let meta = match dirent.metadata().await {
Ok(m) => m,
Err(e) => {
result = Err(add_status(&mut res, path, e).await);
continue;
},
};
let mut npath = path.clone();
npath.push_segment(&dirent.name());
npath.add_slash_if(meta.is_dir());
// do the actual work. If this fails with a non-fs related error,
// return immediately.
if let Err(e) = self.delete_items(&mut res, depth, meta, &npath).await {
match e {
DavError::Status(_) => {
result = Err(e);
continue;
},
_ => return Err(e),
}
}
}
// if we got any error, return with the error,
// and do not try to remove the directory.
result?;
match self.fs.remove_dir(path).await {
Ok(x) => Ok(x),
Err(e) => Err(dir_status(&mut res, path, e).await),
}
}
.boxed()
}
pub(crate) async fn handle_delete(self, req: &Request<()>) -> DavResult<Response<Body>> {
// RFC4918 9.6.1 DELETE for Collections.
// Note that allowing Depth: 0 is NOT RFC compliant.
let depth = match req.headers().typed_get::<Depth>() {
Some(Depth::Infinity) | None => Depth::Infinity,
Some(Depth::Zero) => Depth::Zero,
_ => return Err(DavError::Status(StatusCode::BAD_REQUEST)),
};
let mut path = self.path(&req);
let meta = self.fs.symlink_metadata(&path).await?;
if meta.is_symlink() {
if let Ok(m2) = self.fs.metadata(&path).await {
path.add_slash_if(m2.is_dir());
}
}
path.add_slash_if(meta.is_dir());
// check the If and If-* headers.
let tokens_res = if_match_get_tokens(&req, Some(&meta), &self.fs, &self.ls, &path).await;
let tokens = match tokens_res {
Ok(t) => t,
Err(s) => return Err(DavError::Status(s)),
};
// check locks. since we cancel the entire operation if there is
// a conflicting lock, we do not return a 207 multistatus, but
// just a simple status.
if let Some(ref locksystem) = self.ls {
let t = tokens.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
let principal = self.principal.as_ref().map(|s| s.as_str());
if let Err(_l) = locksystem.check(&path, principal, false, true, t) {
return Err(DavError::Status(StatusCode::LOCKED));
}
}
let req_path = path.clone();
let items = AsyncStream::new(|tx| {
async move {
// turn the Sink into something easier to pass around.
let mut multierror = MultiError::new(tx);
// now delete the path recursively.
let fut = self.delete_items(&mut multierror, depth, meta, &path);
if let Ok(()) = fut.await {
// Done. Now delete the path in the locksystem as well.
// Should really do this per resource, in case the delete partially fails. See TODO.pm
if let Some(ref locksystem) = self.ls {
locksystem.delete(&path).ok();
}
let _ = multierror.add_status(&path, StatusCode::NO_CONTENT).await;
}
Ok(())
}
});
multi_error(req_path, items).await
}
}

612
src/handle_gethead.rs Normal file
View File

@@ -0,0 +1,612 @@
use std::cmp;
use std::convert::TryInto;
use std::io::Write;
use futures::StreamExt;
use headers::HeaderMapExt;
use htmlescape;
use http::{status::StatusCode, Request, Response};
use bytes::Bytes;
use crate::async_stream::AsyncStream;
use crate::body::Body;
use crate::conditional;
use crate::davheaders;
use crate::davpath::DavPath;
use crate::errors::*;
use crate::fs::*;
use crate::util::systemtime_to_offsetdatetime;
use crate::DavMethod;
struct Range {
start: u64,
count: u64,
}
const BOUNDARY: &str = "BOUNDARY";
const BOUNDARY_START: &str = "\n--BOUNDARY\n";
const BOUNDARY_END: &str = "\n--BOUNDARY--\n";
const READ_BUF_SIZE: usize = 16384;
impl crate::DavInner {
pub(crate) async fn handle_get(&self, req: &Request<()>) -> DavResult<Response<Body>> {
let head = req.method() == &http::Method::HEAD;
let mut path = self.path(&req);
let mut is_hbs = false;
// check if it's a directory.
let meta = self.fs.metadata(&path).await?;
if meta.is_dir() {
//
// This is a directory. If the path doesn't end in "/", send a redir.
// Most webdav clients handle redirect really bad, but a client asking
// for a directory index is usually a browser.
//
if !path.is_collection() {
let mut res = Response::new(Body::empty());
path.add_slash();
res.headers_mut()
.insert("Location", path.with_prefix().as_url_string().parse().unwrap());
res.headers_mut().typed_insert(headers::ContentLength(0));
*res.status_mut() = StatusCode::FOUND;
return Ok(res);
}
// If indexfile was set, use it.
if let Some(indexfile) = self.indexfile.as_ref() {
path.push_segment(indexfile.as_bytes());
is_hbs = indexfile.ends_with(".hbs");
} else {
// Otherwise see if we need to generate a directory index.
return self.handle_autoindex(req, head).await;
}
}
// double check, is it a regular file.
let mut file = self.fs.open(&path, OpenOptions::read()).await?;
let mut meta = file.metadata().await?;
if !meta.is_file() {
return Err(DavError::Status(StatusCode::METHOD_NOT_ALLOWED));
}
// if it was a .hbs file, process it.
if is_hbs {
let (f, m) = read_handlebars(req, file).await?;
file = f;
meta = m;
}
let len = meta.len();
let mut curpos = 0u64;
let file_etag = davheaders::ETag::from_meta(&meta);
let mut ranges = Vec::new();
let mut do_range = match req.headers().typed_try_get::<davheaders::IfRange>() {
Ok(Some(r)) => conditional::ifrange_match(&r, file_etag.as_ref(), meta.modified().ok()),
Ok(None) => true,
Err(_) => false,
};
let mut res = Response::new(Body::empty());
let mut no_body = false;
// set Last-Modified and ETag headers.
if let Ok(modified) = meta.modified() {
res.headers_mut()
.typed_insert(headers::LastModified::from(modified));
}
if let Some(etag) = file_etag {
res.headers_mut().typed_insert(etag);
}
// Apache always adds an Accept-Ranges header, even with partial
// responses where it should be pretty obvious. So something somewhere
// probably depends on that.
res.headers_mut().typed_insert(headers::AcceptRanges::bytes());
// handle the if-headers.
if let Some(s) = conditional::if_match(&req, Some(&meta), &self.fs, &self.ls, &path).await {
*res.status_mut() = s;
no_body = true;
do_range = false;
}
// see if we want to get one or more ranges.
if do_range {
if let Some(r) = req.headers().typed_get::<headers::Range>() {
trace!("handle_gethead: range header {:?}", r);
use std::ops::Bound::*;
for range in r.iter() {
let (start, mut count, valid) = match range {
(Included(s), Included(e)) if e >= s => (s, e - s + 1, true),
(Included(s), Unbounded) if s <= len => (s, len - s, true),
(Unbounded, Included(n)) if n <= len => (len - n, n, true),
_ => (0, 0, false),
};
if !valid || start >= len {
let r = format!("bytes */{}", len);
res.headers_mut().insert("Content-Range", r.parse().unwrap());
*res.status_mut() = StatusCode::RANGE_NOT_SATISFIABLE;
ranges.clear();
no_body = true;
break;
}
if start + count > len {
count = len - start;
}
ranges.push(Range { start, count });
}
}
}
if ranges.len() > 0 {
// seek to beginning of the first range.
if let Err(_) = file.seek(std::io::SeekFrom::Start(ranges[0].start)).await {
let r = format!("bytes */{}", len);
res.headers_mut().insert("Content-Range", r.parse().unwrap());
*res.status_mut() = StatusCode::RANGE_NOT_SATISFIABLE;
ranges.clear();
no_body = true;
}
}
if ranges.len() > 0 {
curpos = ranges[0].start;
*res.status_mut() = StatusCode::PARTIAL_CONTENT;
if ranges.len() == 1 {
// add content-range header.
let r = format!(
"bytes {}-{}/{}",
ranges[0].start,
ranges[0].start + ranges[0].count - 1,
len
);
res.headers_mut().insert("Content-Range", r.parse().unwrap());
} else {
// add content-type header.
let r = format!("multipart/byteranges; boundary={}", BOUNDARY);
res.headers_mut().insert("Content-Type", r.parse().unwrap());
}
} else {
// normal request, send entire file.
ranges.push(Range { start: 0, count: len });
}
// set content-length and start if we're not doing multipart.
let content_type = if is_hbs {
"text/html; charset=UTF-8"
} else {
path.get_mime_type_str()
};
if ranges.len() <= 1 {
res.headers_mut()
.typed_insert(davheaders::ContentType(content_type.to_owned()));
let notmod = res.status() == StatusCode::NOT_MODIFIED;
let len = if head || !no_body || notmod {
ranges[0].count
} else {
0
};
res.headers_mut().typed_insert(headers::ContentLength(len));
}
if head || no_body {
return Ok(res);
}
// now just loop and send data.
*res.body_mut() = Body::from(AsyncStream::new(|mut tx| {
async move {
let zero = [0; 4096];
let multipart = ranges.len() > 1;
for range in ranges {
trace!("handle_get: start = {}, count = {}", range.start, range.count);
if curpos != range.start {
// this should never fail, but if it does, just skip this range
// and try the next one.
if let Err(_e) = file.seek(std::io::SeekFrom::Start(range.start)).await {
debug!("handle_get: failed to seek to {}: {:?}", range.start, _e);
continue;
}
curpos = range.start;
}
if multipart {
let mut hdrs = Vec::new();
let _ = write!(hdrs, "{}", BOUNDARY_START);
let _ = writeln!(
hdrs,
"Content-Range: bytes {}-{}/{}",
range.start,
range.start + range.count - 1,
len
);
let _ = writeln!(hdrs, "Content-Type: {}", content_type);
let _ = writeln!(hdrs, "");
tx.send(Bytes::from(hdrs)).await;
}
let mut count = range.count;
while count > 0 {
let blen = cmp::min(count, READ_BUF_SIZE as u64) as usize;
let mut buf = file.read_bytes(blen).await?;
if buf.len() == 0 {
// this is a cop out. if the file got truncated, just
// return zeroed bytes instead of file content.
let n = if count > 4096 { 4096 } else { count as usize };
buf = Bytes::copy_from_slice(&zero[..n]);
}
let len = buf.len() as u64;
count -= len;
curpos += len;
trace!("sending {} bytes", len);
tx.send(buf).await;
}
}
if multipart {
tx.send(Bytes::from(BOUNDARY_END)).await;
}
Ok::<(), std::io::Error>(())
}
}));
Ok(res)
}
pub(crate) async fn handle_autoindex(&self, req: &Request<()>, head: bool) -> DavResult<Response<Body>> {
let mut res = Response::new(Body::empty());
let path = self.path(&req);
// Is PROPFIND explicitly allowed?
let allow_propfind = self
.allow
.map(|x| x.contains(DavMethod::PropFind))
.unwrap_or(false);
// Only allow index generation if explicitly set to true, _or_ if it was
// unset, and PROPFIND is explicitly allowed.
if !self.autoindex.unwrap_or(allow_propfind) {
debug!("method {} not allowed on request {}", req.method(), req.uri());
return Err(DavError::StatusClose(StatusCode::METHOD_NOT_ALLOWED));
}
// read directory or bail.
let mut entries = self.fs.read_dir(&path, ReadDirMeta::Data).await?;
// start output
res.headers_mut()
.insert("Content-Type", "text/html; charset=utf-8".parse().unwrap());
*res.status_mut() = StatusCode::OK;
if head {
return Ok(res);
}
// now just loop and send data.
*res.body_mut() = Body::from(AsyncStream::new(|mut tx| {
async move {
// transform all entries into a dirent struct.
struct Dirent {
path: String,
name: String,
meta: Box<dyn DavMetaData>,
}
let mut dirents: Vec<Dirent> = Vec::new();
while let Some(dirent) = entries.next().await {
let mut name = dirent.name();
if name.starts_with(b".") {
continue;
}
let mut npath = path.clone();
npath.push_segment(&name);
if let Ok(meta) = dirent.metadata().await {
if meta.is_dir() {
name.push(b'/');
npath.add_slash();
}
dirents.push(Dirent {
path: npath.with_prefix().as_url_string(),
name: String::from_utf8_lossy(&name).to_string(),
meta: meta,
});
}
}
// now we can sort the dirent struct.
dirents.sort_by(|a, b| {
let adir = a.meta.is_dir();
let bdir = b.meta.is_dir();
if adir && !bdir {
std::cmp::Ordering::Less
} else if bdir && !adir {
std::cmp::Ordering::Greater
} else {
(a.name).cmp(&b.name)
}
});
// and output html
let upath = htmlescape::encode_minimal(&path.with_prefix().as_url_string());
let mut w = String::new();
w.push_str(
"\
<html><head>\n\
<title>Index of ",
);
w.push_str(&upath);
w.push_str("</title>\n");
w.push_str(
"\
<style>\n\
table {\n\
border-collapse: separate;\n\
border-spacing: 1.5em 0.25em;\n\
}\n\
h1 {\n\
padding-left: 0.3em;\n\
}\n\
a {\n\
text-decoration: none;\n\
color: blue;\n\
}\n\
.left {\n\
text-align: left;\n\
}\n\
.mono {\n\
font-family: monospace;\n\
}\n\
.mw20 {\n\
min-width: 20em;\n\
}\n\
</style>\n\
</head>\n\
<body>\n",
);
w.push_str(&format!("<h1>Index of {}</h1>", display_path(&path)));
w.push_str(
"\
<table>\n\
<tr>\n\
<th class=\"left mw20\">Name</th>\n\
<th class=\"left\">Last modified</th>\n\
<th>Size</th>\n\
</tr>\n\
<tr><th colspan=\"3\"><hr></th></tr>\n\
<tr>\n\
<td><a href=\"..\">Parent Directory</a></td>\n\
<td>&nbsp;</td>\n\
<td class=\"mono\" align=\"right\">[DIR] </td>\n\
</tr>\n",
);
tx.send(Bytes::from(w)).await;
for dirent in &dirents {
let modified = match dirent.meta.modified() {
Ok(t) => {
let tm = systemtime_to_offsetdatetime(t);
format!(
"{:04}-{:02}-{:02} {:02}:{:02}",
tm.year(),
tm.month(),
tm.day(),
tm.hour(),
tm.minute(),
)
},
Err(_) => "".to_string(),
};
let size = match dirent.meta.is_file() {
true => display_size(dirent.meta.len()),
false => "[DIR] ".to_string(),
};
let name = htmlescape::encode_minimal(&dirent.name);
let s = format!("<tr><td><a href=\"{}\">{}</a></td><td class=\"mono\">{}</td><td class=\"mono\" align=\"right\">{}</td></tr>",
dirent.path, name, modified, size);
tx.send(Bytes::from(s)).await;
}
let mut w = String::new();
w.push_str("<tr><th colspan=\"3\"><hr></th></tr>");
w.push_str("</table></body></html>");
tx.send(Bytes::from(w)).await;
Ok::<_, std::io::Error>(())
}
}));
Ok(res)
}
}
fn display_size(size: u64) -> String {
if size <= 1000 {
return format!("{} ", size);
}
if size <= 1_000_000 {
return format!("{} KiB", ((size / 10) as f64) / 100f64);
}
if size <= 1_000_000_000 {
return format!("{} MiB", ((size / 10_000) as f64) / 100f64);
}
if size <= 1_000_000_000_000 {
return format!("{} GiB", ((size / 10_000_000) as f64) / 100f64);
}
format!("{:2}TiB", ((size / 10_000_000_000) as f64) / 100f64)
}
fn display_path(path: &DavPath) -> String {
let path_dsp = String::from_utf8_lossy(path.with_prefix().as_bytes());
let path_url = path.with_prefix().as_url_string();
let dpath_segs = path_dsp.split("/").filter(|s| !s.is_empty()).collect::<Vec<_>>();
let upath_segs = path_url.split("/").filter(|s| !s.is_empty()).collect::<Vec<_>>();
let mut dpath = String::new();
let mut upath = String::new();
if dpath_segs.len() == 0 {
dpath.push_str("/");
} else {
dpath.push_str("<a href = \"/\">/</a>");
}
for idx in 0..dpath_segs.len() {
upath.push('/');
upath.push_str(upath_segs[idx]);
let dseg = htmlescape::encode_minimal(dpath_segs[idx]);
if idx == dpath_segs.len() - 1 {
dpath.push_str(&dseg);
} else {
dpath.push_str(&format!("<a href = \"{}\">{}</a>/", upath, dseg));
}
}
dpath
}
use std::collections::HashMap;
use std::io::{Error, ErrorKind, SeekFrom};
use std::time::SystemTime;
use crate::fs::{DavFile, DavMetaData, FsFuture, FsResult};
use futures::future::{self, FutureExt};
use handlebars::Handlebars;
use headers::{authorization::Basic, Authorization};
async fn read_handlebars(
req: &Request<()>,
mut file: Box<dyn DavFile>,
) -> DavResult<(Box<dyn DavFile>, Box<dyn DavMetaData>)>
{
let hbs = Handlebars::new();
let mut vars = HashMap::new();
let headers = req.headers();
// Read .hbs file into memory.
let len = file.metadata().await?.len();
let buffer = file.read_bytes(len.try_into().unwrap()).await?;
let data = std::str::from_utf8(&buffer)?;
// Set variables.
for hdr in &["User-Agent", "Host", "Referer"] {
if let Some(val) = headers.get(*hdr) {
let mut var = "HTTP_".to_string() + &hdr.replace('-', "_");
var.make_ascii_uppercase();
if let Ok(valstr) = val.to_str() {
vars.insert(var, valstr.to_string());
}
}
}
match headers.typed_get::<Authorization<Basic>>() {
Some(Authorization(basic)) => {
vars.insert("AUTH_TYPE".to_string(), "Basic".to_string());
vars.insert("REMOTE_USER".to_string(), basic.username().to_string());
},
_ => {},
}
// Render.
let result = hbs
.render_template(data, &vars)
.map_err(|_| DavError::Status(StatusCode::INTERNAL_SERVER_ERROR))?;
let mut hbsfile = HbsFile::new(result);
let hbsmeta = hbsfile.metadata().await?;
Ok((hbsfile, hbsmeta))
}
#[derive(Clone, Debug)]
struct HbsMeta {
mtime: SystemTime,
size: u64,
}
impl DavMetaData for HbsMeta {
fn len(&self) -> u64 {
self.size
}
fn created(&self) -> FsResult<SystemTime> {
Ok(self.mtime)
}
fn modified(&self) -> FsResult<SystemTime> {
Ok(self.mtime)
}
fn is_dir(&self) -> bool {
false
}
}
#[derive(Clone, Debug)]
struct HbsFile {
meta: HbsMeta,
pos: usize,
data: Vec<u8>,
}
impl HbsFile {
fn new(data: String) -> Box<dyn DavFile> {
Box::new(HbsFile {
meta: HbsMeta {
mtime: SystemTime::now(),
size: data.len() as u64,
},
data: data.into_bytes(),
pos: 0,
})
}
}
impl DavFile for HbsFile {
fn metadata<'a>(&'a mut self) -> FsFuture<Box<dyn DavMetaData>> {
async move { Ok(Box::new(self.meta.clone()) as Box<dyn DavMetaData>) }.boxed()
}
fn read_bytes<'a>(&'a mut self, count: usize) -> FsFuture<Bytes> {
async move {
let start = self.pos;
let end = std::cmp::min(self.pos + count, self.data.len());
self.pos += end - start;
let b = Bytes::copy_from_slice(&self.data[start..end]);
Ok(b)
}
.boxed()
}
fn seek<'a>(&'a mut self, pos: SeekFrom) -> FsFuture<u64> {
async move {
let (start, offset): (u64, i64) = match pos {
SeekFrom::Start(npos) => (0, npos as i64),
SeekFrom::Current(npos) => (self.pos as u64, npos),
SeekFrom::End(npos) => (self.data.len() as u64, npos),
};
if offset < 0 {
if -offset as u64 > start {
return Err(Error::new(ErrorKind::InvalidInput, "invalid seek").into());
}
self.pos = (start - (-offset as u64)) as usize;
} else {
self.pos = (start + offset as u64) as usize;
}
Ok(self.pos as u64)
}
.boxed()
}
fn write_buf<'a>(&'a mut self, _buf: Box<dyn bytes::Buf + Send>) -> FsFuture<()> {
Box::pin(future::ready(Err(FsError::NotImplemented)))
}
fn write_bytes<'a>(&'a mut self, _buf: bytes::Bytes) -> FsFuture<()> {
Box::pin(future::ready(Err(FsError::NotImplemented)))
}
fn flush<'a>(&'a mut self) -> FsFuture<()> {
Box::pin(future::ready(Ok(())))
}
}

334
src/handle_lock.rs Normal file
View File

@@ -0,0 +1,334 @@
use std::cmp;
use std::io::Cursor;
use std::time::Duration;
use headers::HeaderMapExt;
use http::StatusCode as SC;
use http::{Request, Response};
use xmltree::{self, Element};
use crate::body::Body;
use crate::conditional::{dav_if_match, if_match};
use crate::davheaders::{self, DavTimeout};
use crate::davpath::DavPath;
use crate::errors::*;
use crate::fs::{FsError, OpenOptions};
use crate::ls::*;
use crate::util::MemBuffer;
use crate::xmltree_ext::{self, ElementExt};
use crate::DavResult;
impl crate::DavInner {
pub(crate) async fn handle_lock(&self, req: &Request<()>, xmldata: &[u8]) -> DavResult<Response<Body>> {
// must have a locksystem or bail
let locksystem = match self.ls {
Some(ref ls) => ls,
None => return Err(SC::METHOD_NOT_ALLOWED.into()),
};
let mut res = Response::new(Body::empty());
// path and meta
let mut path = self.path(&req);
let meta = match self.fs.metadata(&path).await {
Ok(meta) => Some(self.fixpath(&mut res, &mut path, meta)),
Err(_) => None,
};
// lock refresh?
if xmldata.len() == 0 {
// get locktoken
let (_, tokens) = dav_if_match(&req, &self.fs, &self.ls, &path).await;
if tokens.len() != 1 {
return Err(SC::BAD_REQUEST.into());
}
// try refresh
// FIXME: you can refresh a lock owned by someone else. is that OK?
let timeout = get_timeout(&req, true, false);
let lock = match locksystem.refresh(&path, &tokens[0], timeout) {
Ok(lock) => lock,
Err(_) => return Err(SC::PRECONDITION_FAILED.into()),
};
// output result
let prop = build_lock_prop(&lock, true);
let mut emitter = xmltree_ext::emitter(MemBuffer::new())?;
prop.write_ev(&mut emitter)?;
let buffer = emitter.into_inner().take();
let ct = "application/xml; charset=utf-8".to_owned();
res.headers_mut().typed_insert(davheaders::ContentType(ct));
*res.body_mut() = Body::from(buffer);
return Ok(res);
}
// handle Depth:
let deep = match req.headers().typed_get::<davheaders::Depth>() {
Some(davheaders::Depth::Infinity) | None => true,
Some(davheaders::Depth::Zero) => false,
_ => return Err(SC::BAD_REQUEST.into()),
};
// handle the if-headers.
if let Some(s) = if_match(&req, meta.as_ref(), &self.fs, &self.ls, &path).await {
return Err(s.into());
}
// Cut & paste from method_put.rs ....
let mut oo = OpenOptions::write();
oo.create = true;
if req
.headers()
.typed_get::<davheaders::IfMatch>()
.map_or(false, |h| &h.0 == &davheaders::ETagList::Star)
{
oo.create = false;
}
if req
.headers()
.typed_get::<davheaders::IfNoneMatch>()
.map_or(false, |h| &h.0 == &davheaders::ETagList::Star)
{
oo.create_new = true;
}
// parse xml
let tree = xmltree::Element::parse2(Cursor::new(xmldata))?;
if tree.name != "lockinfo" {
return Err(DavError::XmlParseError);
}
// decode Element.
let mut shared: Option<bool> = None;
let mut owner: Option<Element> = None;
let mut locktype = false;
for elem in tree.child_elems_iter() {
match elem.name.as_str() {
"lockscope" => {
let name = elem.child_elems_iter().find_map(|e| Some(e.name.as_ref()));
match name {
Some("exclusive") => shared = Some(false),
Some("shared") => shared = Some(true),
_ => return Err(DavError::XmlParseError),
}
},
"locktype" => {
let name = elem.child_elems_iter().find_map(|e| Some(e.name.as_ref()));
match name {
Some("write") => locktype = true,
_ => return Err(DavError::XmlParseError),
}
},
"owner" => {
let mut o = elem.clone();
o.prefix = Some("D".to_owned());
owner = Some(o);
},
_ => return Err(DavError::XmlParseError),
}
}
// sanity check.
if !shared.is_some() || !locktype {
return Err(DavError::XmlParseError);
};
let shared = shared.unwrap();
// create lock
let timeout = get_timeout(&req, false, shared);
let principal = self.principal.as_ref().map(|s| s.as_str());
let lock = match locksystem.lock(&path, principal, owner.as_ref(), timeout, shared, deep) {
Ok(lock) => lock,
Err(_) => return Err(SC::LOCKED.into()),
};
// try to create file if it doesn't exist.
if let None = meta {
match self.fs.open(&path, oo).await {
Ok(_) => {},
Err(FsError::NotFound) | Err(FsError::Exists) => {
let s = if !oo.create || oo.create_new {
SC::PRECONDITION_FAILED
} else {
SC::CONFLICT
};
let _ = locksystem.unlock(&path, &lock.token);
return Err(s.into());
},
Err(e) => {
let _ = locksystem.unlock(&path, &lock.token);
return Err(e.into());
},
};
}
// output result
let lt = format!("<{}>", lock.token);
let ct = "application/xml; charset=utf-8".to_owned();
res.headers_mut().typed_insert(davheaders::LockToken(lt));
res.headers_mut().typed_insert(davheaders::ContentType(ct));
if let None = meta {
*res.status_mut() = SC::CREATED;
} else {
*res.status_mut() = SC::OK;
}
let mut emitter = xmltree_ext::emitter(MemBuffer::new())?;
let prop = build_lock_prop(&lock, true);
prop.write_ev(&mut emitter)?;
let buffer = emitter.into_inner().take();
*res.body_mut() = Body::from(buffer);
return Ok(res);
}
pub(crate) async fn handle_unlock(&self, req: &Request<()>) -> DavResult<Response<Body>> {
// must have a locksystem or bail
let locksystem = match self.ls {
Some(ref ls) => ls,
None => return Err(SC::METHOD_NOT_ALLOWED.into()),
};
// Must have Lock-Token header
let t = req
.headers()
.typed_get::<davheaders::LockToken>()
.ok_or(DavError::Status(SC::BAD_REQUEST))?;
let token = t.0.trim_matches(|c| c == '<' || c == '>');
let mut res = Response::new(Body::empty());
let mut path = self.path(&req);
if let Ok(meta) = self.fs.metadata(&path).await {
self.fixpath(&mut res, &mut path, meta);
}
match locksystem.unlock(&path, token) {
Ok(_) => {
*res.status_mut() = SC::NO_CONTENT;
Ok(res)
},
Err(_) => Err(SC::CONFLICT.into()),
}
}
}
pub(crate) fn list_lockdiscovery(ls: Option<&Box<dyn DavLockSystem>>, path: &DavPath) -> Element {
let mut elem = Element::new2("D:lockdiscovery");
// must have a locksystem or bail
let locksystem = match ls {
Some(ls) => ls,
None => return elem,
};
// list the locks.
let locks = locksystem.discover(path);
for lock in &locks {
elem.push_element(build_lock_prop(lock, false));
}
elem
}
pub(crate) fn list_supportedlock(ls: Option<&Box<dyn DavLockSystem>>) -> Element {
let mut elem = Element::new2("D:supportedlock");
// must have a locksystem or bail
if ls.is_none() {
return elem;
}
let mut entry = Element::new2("D:lockentry");
let mut scope = Element::new2("D:lockscope");
scope.push_element(Element::new2("D:exclusive"));
scope.push_element(Element::new2("D:write"));
entry.push_element(scope);
elem.push_element(entry);
let mut entry = Element::new2("D:lockentry");
let mut scope = Element::new2("D:lockscope");
scope.push_element(Element::new2("D:shared"));
scope.push_element(Element::new2("D:write"));
entry.push_element(scope);
elem.push_element(entry);
elem
}
// process timeout header
fn get_timeout(req: &Request<()>, refresh: bool, shared: bool) -> Option<Duration> {
let max_timeout = if shared {
Duration::new(86400, 0)
} else {
Duration::new(600, 0)
};
match req.headers().typed_get::<davheaders::Timeout>() {
Some(davheaders::Timeout(ref vec)) if vec.len() > 0 => {
match vec[0] {
DavTimeout::Infinite => {
if refresh {
None
} else {
Some(max_timeout)
}
},
DavTimeout::Seconds(n) => Some(cmp::min(max_timeout, Duration::new(n as u64, 0))),
}
},
_ => None,
}
}
fn build_lock_prop(lock: &DavLock, full: bool) -> Element {
let mut actlock = Element::new2("D:activelock");
let mut elem = Element::new2("D:lockscope");
elem.push_element(match lock.shared {
false => Element::new2("D:exclusive"),
true => Element::new2("D:shared"),
});
actlock.push_element(elem);
let mut elem = Element::new2("D:locktype");
elem.push_element(Element::new2("D:write"));
actlock.push_element(elem);
actlock.push_element(
Element::new2("D:depth").text(
match lock.deep {
false => "0",
true => "Infinity",
}
.to_string(),
),
);
actlock.push_element(Element::new2("D:timeout").text(match lock.timeout {
None => "Infinite".to_string(),
Some(d) => format!("Second-{}", d.as_secs()),
}));
let mut locktokenelem = Element::new2("D:locktoken");
locktokenelem.push_element(Element::new2("D:href").text(lock.token.clone()));
actlock.push_element(locktokenelem);
let mut lockroot = Element::new2("D:lockroot");
lockroot.push_element(Element::new2("D:href").text(lock.path.with_prefix().as_url_string()));
actlock.push_element(lockroot);
if let Some(ref o) = lock.owner {
actlock.push_element(o.clone());
}
if !full {
return actlock;
}
let mut ldis = Element::new2("D:lockdiscovery");
ldis.push_element(actlock);
let mut prop = Element::new2("D:prop").ns("D", "DAV:");
prop.push_element(ldis);
prop
}

50
src/handle_mkcol.rs Normal file
View File

@@ -0,0 +1,50 @@
use headers::HeaderMapExt;
use http::{Request, Response, StatusCode};
use crate::body::Body;
use crate::conditional::*;
use crate::davheaders;
use crate::fs::*;
use crate::{DavError, DavResult};
impl crate::DavInner {
pub(crate) async fn handle_mkcol(&self, req: &Request<()>) -> DavResult<Response<Body>> {
let mut path = self.path(&req);
let meta = self.fs.metadata(&path).await;
// check the If and If-* headers.
let res = if_match_get_tokens(&req, meta.as_ref().ok(), &self.fs, &self.ls, &path).await;
let tokens = match res {
Ok(t) => t,
Err(s) => return Err(DavError::Status(s)),
};
// if locked check if we hold that lock.
if let Some(ref locksystem) = self.ls {
let t = tokens.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
let principal = self.principal.as_ref().map(|s| s.as_str());
if let Err(_l) = locksystem.check(&path, principal, false, false, t) {
return Err(DavError::Status(StatusCode::LOCKED));
}
}
let mut res = Response::new(Body::empty());
match self.fs.create_dir(&path).await {
// RFC 4918 9.3.1 MKCOL Status Codes.
Err(FsError::Exists) => return Err(DavError::Status(StatusCode::METHOD_NOT_ALLOWED)),
Err(FsError::NotFound) => return Err(DavError::Status(StatusCode::CONFLICT)),
Err(e) => return Err(DavError::FsError(e)),
Ok(()) => {
if path.is_collection() {
path.add_slash();
res.headers_mut()
.typed_insert(davheaders::ContentLocation(path.with_prefix().as_url_string()));
}
*res.status_mut() = StatusCode::CREATED;
},
}
Ok(res)
}
}

72
src/handle_options.rs Normal file
View File

@@ -0,0 +1,72 @@
use headers::HeaderMapExt;
use http::{Request, Response};
use crate::body::Body;
use crate::util::{dav_method, DavMethod};
use crate::DavResult;
impl crate::DavInner {
pub(crate) async fn handle_options(&self, req: &Request<()>) -> DavResult<Response<Body>> {
let mut res = Response::new(Body::empty());
let h = res.headers_mut();
// We could simply not report webdav level 2 support if self.allow doesn't
// contain LOCK/UNLOCK. However we do advertise support, since there might
// be LOCK/UNLOCK support in another part of the URL space.
let dav = "1,2,3,sabredav-partialupdate";
h.insert("DAV", dav.parse().unwrap());
h.insert("MS-Author-Via", "DAV".parse().unwrap());
h.typed_insert(headers::ContentLength(0));
// Helper to add method to array if method is in fact
// allowed. If the current method is not OPTIONS, leave
// out the current method since we're probably called
// for DavMethodNotAllowed.
let method = dav_method(req.method()).unwrap_or(DavMethod::Options);
let islock = |m| m == DavMethod::Lock || m == DavMethod::Unlock;
let mm = |v: &mut Vec<String>, m: &str, y: DavMethod| {
if (y == DavMethod::Options || (y != method || islock(y) != islock(method))) &&
(!islock(y) || self.ls.is_some()) &&
self.allow.map(|x| x.contains(y)).unwrap_or(true)
{
v.push(m.to_string());
}
};
let path = self.path(&req);
let meta = self.fs.metadata(&path).await;
let is_unmapped = meta.is_err();
let is_file = meta.and_then(|m| Ok(m.is_file())).unwrap_or_default();
let is_star = path.is_star() && method == DavMethod::Options;
let mut v = Vec::new();
if is_unmapped && !is_star {
mm(&mut v, "OPTIONS", DavMethod::Options);
mm(&mut v, "MKCOL", DavMethod::MkCol);
mm(&mut v, "PUT", DavMethod::Put);
mm(&mut v, "LOCK", DavMethod::Lock);
} else {
if is_file || is_star {
mm(&mut v, "HEAD", DavMethod::Head);
mm(&mut v, "GET", DavMethod::Get);
mm(&mut v, "PATCH", DavMethod::Patch);
mm(&mut v, "PUT", DavMethod::Put);
}
mm(&mut v, "OPTIONS", DavMethod::Options);
mm(&mut v, "PROPFIND", DavMethod::PropFind);
mm(&mut v, "COPY", DavMethod::Copy);
if path.as_url_string() != "/" {
mm(&mut v, "MOVE", DavMethod::Move);
mm(&mut v, "DELETE", DavMethod::Delete);
}
mm(&mut v, "LOCK", DavMethod::Lock);
mm(&mut v, "UNLOCK", DavMethod::Unlock);
}
let a = v.clone().join(",").parse().unwrap();
res.headers_mut().insert("allow", a);
Ok(res)
}
}

963
src/handle_props.rs Normal file
View File

@@ -0,0 +1,963 @@
use std::borrow::Cow;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::io::{self, Cursor};
use bytes::Bytes;
use futures::{future::BoxFuture, FutureExt, StreamExt};
use headers::HeaderMapExt;
use http::{Request, Response, StatusCode};
use crate::xmltree_ext::*;
use xml::common::XmlVersion;
use xml::writer::EventWriter;
use xml::writer::XmlEvent as XmlWEvent;
use xml::EmitterConfig;
use xmltree::{Element, XMLNode};
use crate::async_stream::AsyncStream;
use crate::body::Body;
use crate::conditional::if_match_get_tokens;
use crate::davheaders;
use crate::davpath::*;
use crate::errors::*;
use crate::fs::*;
use crate::handle_lock::{list_lockdiscovery, list_supportedlock};
use crate::ls::*;
use crate::util::MemBuffer;
use crate::util::{dav_xml_error, systemtime_to_httpdate, systemtime_to_rfc3339};
use crate::{DavInner, DavResult};
const NS_APACHE_URI: &'static str = "http://apache.org/dav/props/";
const NS_DAV_URI: &'static str = "DAV:";
const NS_MS_URI: &'static str = "urn:schemas-microsoft-com:";
// list returned by PROPFIND <propname/>.
const PROPNAME_STR: &'static [&'static str] = &[
"D:creationdate",
"D:displayname",
"D:getcontentlanguage",
"D:getcontentlength",
"D:getcontenttype",
"D:getetag",
"D:getlastmodified",
"D:lockdiscovery",
"D:resourcetype",
"D:supportedlock",
"D:quota-available-bytes",
"D:quota-used-bytes",
"A:executable",
"Z:Win32LastAccessTime",
];
// properties returned by PROPFIND <allprop/> or empty body.
const ALLPROP_STR: &'static [&'static str] = &[
"D:creationdate",
"D:displayname",
"D:getcontentlanguage",
"D:getcontentlength",
"D:getcontenttype",
"D:getetag",
"D:getlastmodified",
"D:lockdiscovery",
"D:resourcetype",
"D:supportedlock",
];
// properties returned by PROPFIND with empty body for Microsoft clients.
const MS_ALLPROP_STR: &'static [&'static str] = &[
"D:creationdate",
"D:displayname",
"D:getcontentlanguage",
"D:getcontentlength",
"D:getcontenttype",
"D:getetag",
"D:getlastmodified",
"D:lockdiscovery",
"D:resourcetype",
"D:supportedlock",
"Z:Win32CreationTime",
"Z:Win32FileAttributes",
"Z:Win32LastAccessTime",
"Z:Win32LastModifiedTime",
];
lazy_static! {
static ref ALLPROP: Vec<Element> = init_staticprop(ALLPROP_STR);
static ref MS_ALLPROP: Vec<Element> = init_staticprop(MS_ALLPROP_STR);
static ref PROPNAME: Vec<Element> = init_staticprop(PROPNAME_STR);
}
type Emitter = EventWriter<MemBuffer>;
type Sender = crate::async_stream::Sender<bytes::Bytes, io::Error>;
struct StatusElement {
status: StatusCode,
element: Element,
}
struct PropWriter {
emitter: Emitter,
tx: Option<Sender>,
name: String,
props: Vec<Element>,
fs: Box<dyn DavFileSystem>,
ls: Option<Box<dyn DavLockSystem>>,
useragent: String,
q_cache: QuotaCache,
}
#[derive(Default, Clone, Copy)]
struct QuotaCache {
q_state: u32,
q_used: u64,
q_total: Option<u64>,
}
fn init_staticprop(p: &[&str]) -> Vec<Element> {
let mut v = Vec::new();
for a in p {
let mut e = Element::new2(*a);
e.namespace = match e.prefix.as_ref().map(|x| x.as_str()) {
Some("D") => Some(NS_DAV_URI.to_string()),
Some("A") => Some(NS_APACHE_URI.to_string()),
Some("Z") => Some(NS_MS_URI.to_string()),
_ => None,
};
v.push(e);
}
v
}
impl DavInner {
pub(crate) async fn handle_propfind(
self,
req: &Request<()>,
xmldata: &[u8],
) -> DavResult<Response<Body>>
{
// No checks on If: and If-* headers here, because I do not see
// the point and there's nothing in RFC4918 that indicates we should.
let mut res = Response::new(Body::empty());
res.headers_mut()
.typed_insert(headers::CacheControl::new().with_no_cache());
res.headers_mut().typed_insert(headers::Pragma::no_cache());
let depth = match req.headers().typed_get::<davheaders::Depth>() {
Some(davheaders::Depth::Infinity) | None => {
if req.headers().typed_get::<davheaders::XLitmus>().is_none() {
let ct = "application/xml; charset=utf-8".to_owned();
res.headers_mut().typed_insert(davheaders::ContentType(ct));
*res.status_mut() = StatusCode::FORBIDDEN;
*res.body_mut() = dav_xml_error("<D:propfind-finite-depth/>");
return Ok(res);
}
davheaders::Depth::Infinity
},
Some(d) => d.clone(),
};
// path and meta
let mut path = self.path(&req);
let meta = self.fs.metadata(&path).await?;
let meta = self.fixpath(&mut res, &mut path, meta);
let mut root = None;
if xmldata.len() > 0 {
root = match Element::parse(Cursor::new(xmldata)) {
Ok(t) => {
if t.name == "propfind" && t.namespace.as_ref().map(|s| s.as_str()) == Some("DAV:") {
Some(t)
} else {
return Err(DavError::XmlParseError.into());
}
},
Err(_) => return Err(DavError::XmlParseError.into()),
};
}
let (name, props) = match root {
None => ("allprop", Vec::new()),
Some(mut elem) => {
let includes = elem
.take_child("includes")
.map_or(Vec::new(), |n| n.take_child_elems());
match elem
.child_elems_into_iter()
.find(|e| e.name == "propname" || e.name == "prop" || e.name == "allprop")
{
Some(elem) => {
match elem.name.as_str() {
"propname" => ("propname", Vec::new()),
"prop" => ("prop", elem.take_child_elems()),
"allprop" => ("allprop", includes),
_ => return Err(DavError::XmlParseError.into()),
}
},
None => return Err(DavError::XmlParseError.into()),
}
},
};
trace!("propfind: type request: {}", name);
let mut pw = PropWriter::new(&req, &mut res, name, props, &self.fs, self.ls.as_ref())?;
*res.body_mut() = Body::from(AsyncStream::new(|tx| {
async move {
pw.set_tx(tx);
let is_dir = meta.is_dir();
pw.write_props(&path, meta).await?;
pw.flush().await?;
if is_dir && depth != davheaders::Depth::Zero {
let _ = self.propfind_directory(&path, depth, &mut pw).await;
}
pw.close().await?;
Ok(())
}
}));
Ok(res)
}
fn propfind_directory<'a>(
&'a self,
path: &'a DavPath,
depth: davheaders::Depth,
propwriter: &'a mut PropWriter,
) -> BoxFuture<'a, DavResult<()>>
{
async move {
let readdir_meta = match self.hide_symlinks {
Some(true) | None => ReadDirMeta::DataSymlink,
Some(false) => ReadDirMeta::Data,
};
let mut entries = match self.fs.read_dir(path, readdir_meta).await {
Ok(entries) => entries,
Err(e) => {
// if we cannot read_dir, just skip it.
error!("read_dir error {:?}", e);
return Ok(());
},
};
while let Some(dirent) = entries.next().await {
let mut npath = path.clone();
npath.push_segment(&dirent.name());
let meta = match dirent.metadata().await {
Ok(meta) => meta,
Err(e) => {
trace!("metadata error on {}. Skipping {:?}", npath, e);
continue;
},
};
if meta.is_symlink() {
continue;
}
if meta.is_dir() {
npath.add_slash();
}
let is_dir = meta.is_dir();
propwriter.write_props(&npath, meta).await?;
propwriter.flush().await?;
if depth == davheaders::Depth::Infinity && is_dir {
self.propfind_directory(&npath, depth, propwriter).await?;
}
}
Ok(())
}
.boxed()
}
// set/change a live property. returns StatusCode::CONTINUE if
// this wasnt't a live property (or, if we want it handled
// as a dead property, e.g. DAV:displayname).
fn liveprop_set(&self, prop: &Element, can_deadprop: bool) -> StatusCode {
match prop.namespace.as_ref().map(|x| x.as_str()) {
Some(NS_DAV_URI) => {
match prop.name.as_str() {
"getcontentlanguage" => {
if prop.get_text().is_none() || prop.has_child_elems() {
return StatusCode::CONFLICT;
}
// FIXME only here to make "litmus" happy, really...
if let Some(s) = prop.get_text() {
if davheaders::ContentLanguage::try_from(s.as_ref()).is_err() {
return StatusCode::CONFLICT;
}
}
if can_deadprop {
StatusCode::CONTINUE
} else {
StatusCode::FORBIDDEN
}
},
"displayname" => {
if prop.get_text().is_none() || prop.has_child_elems() {
return StatusCode::CONFLICT;
}
if can_deadprop {
StatusCode::CONTINUE
} else {
StatusCode::FORBIDDEN
}
},
"getlastmodified" => {
// we might allow setting modified time
// by using utimes() on unix. Not yet though.
if prop.get_text().is_none() || prop.has_child_elems() {
return StatusCode::CONFLICT;
}
StatusCode::FORBIDDEN
},
_ => StatusCode::FORBIDDEN,
}
},
Some(NS_APACHE_URI) => {
match prop.name.as_str() {
"executable" => {
// we could allow toggling the execute bit.
// to be implemented.
if prop.get_text().is_none() || prop.has_child_elems() {
return StatusCode::CONFLICT;
}
StatusCode::FORBIDDEN
},
_ => StatusCode::FORBIDDEN,
}
},
Some(NS_MS_URI) => {
match prop.name.as_str() {
"Win32CreationTime" |
"Win32FileAttributes" |
"Win32LastAccessTime" |
"Win32LastModifiedTime" => {
if prop.get_text().is_none() || prop.has_child_elems() {
return StatusCode::CONFLICT;
}
// Always report back that we successfully
// changed these, even if we didn't --
// makes the windows webdav client work.
StatusCode::OK
},
_ => StatusCode::FORBIDDEN,
}
},
_ => StatusCode::CONTINUE,
}
}
// In general, live properties cannot be removed, with the
// exception of getcontentlanguage and displayname.
fn liveprop_remove(&self, prop: &Element, can_deadprop: bool) -> StatusCode {
match prop.namespace.as_ref().map(|x| x.as_str()) {
Some(NS_DAV_URI) => {
match prop.name.as_str() {
"getcontentlanguage" | "displayname" => {
if can_deadprop {
StatusCode::OK
} else {
StatusCode::FORBIDDEN
}
},
_ => StatusCode::FORBIDDEN,
}
},
Some(NS_APACHE_URI) | Some(NS_MS_URI) => StatusCode::FORBIDDEN,
_ => StatusCode::CONTINUE,
}
}
pub(crate) async fn handle_proppatch(
self,
req: &Request<()>,
xmldata: &[u8],
) -> DavResult<Response<Body>>
{
let mut res = Response::new(Body::empty());
// file must exist.
let mut path = self.path(&req);
let meta = self.fs.metadata(&path).await?;
let meta = self.fixpath(&mut res, &mut path, meta);
// check the If and If-* headers.
let tokens = match if_match_get_tokens(&req, Some(&meta), &self.fs, &self.ls, &path).await {
Ok(t) => t,
Err(s) => return Err(s.into()),
};
// if locked check if we hold that lock.
if let Some(ref locksystem) = self.ls {
let t = tokens.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
let principal = self.principal.as_ref().map(|s| s.as_str());
if let Err(_l) = locksystem.check(&path, principal, false, false, t) {
return Err(StatusCode::LOCKED.into());
}
}
trace!(target: "xml", "proppatch input:\n{}]\n",
std::string::String::from_utf8_lossy(&xmldata));
// parse xml
let tree = Element::parse2(Cursor::new(xmldata))?;
if tree.name != "propertyupdate" {
return Err(DavError::XmlParseError);
}
let mut patch = Vec::new();
let mut ret = Vec::new();
let can_deadprop = self.fs.have_props(&path).await;
// walk over the element tree and feed "set" and "remove" items to
// the liveprop_set/liveprop_remove functions. If skipped by those,
// gather .them in the "patch" Vec to be processed as dead properties.
for elem in tree.child_elems_iter() {
for n in elem
.child_elems_iter()
.filter(|e| e.name == "prop")
.flat_map(|e| e.child_elems_iter())
{
match elem.name.as_str() {
"set" => {
match self.liveprop_set(&n, can_deadprop) {
StatusCode::CONTINUE => patch.push((true, element_to_davprop_full(&n))),
s => ret.push((s, element_to_davprop(&n))),
}
},
"remove" => {
match self.liveprop_remove(&n, can_deadprop) {
StatusCode::CONTINUE => patch.push((false, element_to_davprop(&n))),
s => ret.push((s, element_to_davprop(&n))),
}
},
_ => {},
}
}
}
// if any set/remove failed, stop processing here.
if ret.iter().any(|&(ref s, _)| s != &StatusCode::OK) {
ret = ret
.into_iter()
.map(|(s, p)| {
if s == StatusCode::OK {
(StatusCode::FAILED_DEPENDENCY, p)
} else {
(s, p)
}
})
.collect::<Vec<_>>();
ret.extend(patch.into_iter().map(|(_, p)| (StatusCode::FAILED_DEPENDENCY, p)));
} else if patch.len() > 0 {
// hmmm ... we assume nothing goes wrong here at the
// moment. if it does, we should roll back the earlier
// made changes to live props, but come on, we're not
// builing a transaction engine here.
let deadret = self.fs.patch_props(&path, patch).await?;
ret.extend(deadret.into_iter());
}
// group by statuscode.
let mut hm = HashMap::new();
for (code, prop) in ret.into_iter() {
if !hm.contains_key(&code) {
hm.insert(code, Vec::new());
}
let v = hm.get_mut(&code).unwrap();
v.push(davprop_to_element(prop));
}
// And reply.
let mut pw = PropWriter::new(&req, &mut res, "propertyupdate", Vec::new(), &self.fs, None)?;
*res.body_mut() = Body::from(AsyncStream::new(|tx| {
async move {
pw.set_tx(tx);
pw.write_propresponse(&path, hm)?;
pw.close().await?;
Ok::<_, io::Error>(())
}
}));
Ok(res)
}
}
impl PropWriter {
pub fn new(
req: &Request<()>,
res: &mut Response<Body>,
name: &str,
mut props: Vec<Element>,
fs: &Box<dyn DavFileSystem>,
ls: Option<&Box<dyn DavLockSystem>>,
) -> DavResult<PropWriter>
{
let contenttype = "application/xml; charset=utf-8".parse().unwrap();
res.headers_mut().insert("content-type", contenttype);
*res.status_mut() = StatusCode::MULTI_STATUS;
let mut emitter = EventWriter::new_with_config(
MemBuffer::new(),
EmitterConfig {
normalize_empty_elements: false,
perform_indent: false,
indent_string: Cow::Borrowed(""),
..Default::default()
},
);
emitter.write(XmlWEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("utf-8"),
standalone: None,
})?;
// user-agent header.
let ua = match req.headers().get("user-agent") {
Some(s) => s.to_str().unwrap_or(""),
None => "",
};
if name != "prop" && name != "propertyupdate" {
let mut v = Vec::new();
let iter = if name == "allprop" {
if ua.contains("Microsoft") {
MS_ALLPROP.iter()
} else {
ALLPROP.iter()
}
} else {
PROPNAME.iter()
};
for a in iter {
if !props
.iter()
.any(|e| a.namespace == e.namespace && a.name == e.name)
{
v.push(a.clone());
}
}
props.append(&mut v);
}
// check the prop namespaces to see what namespaces
// we need to put in the preamble.
let mut ev = XmlWEvent::start_element("D:multistatus").ns("D", NS_DAV_URI);
if name != "propertyupdate" {
let mut a = false;
let mut m = false;
for prop in &props {
match prop.namespace.as_ref().map(|x| x.as_str()) {
Some(NS_APACHE_URI) => a = true,
Some(NS_MS_URI) => m = true,
_ => {},
}
}
if a {
ev = ev.ns("A", NS_APACHE_URI);
}
if m {
ev = ev.ns("Z", NS_MS_URI);
}
}
emitter.write(ev)?;
Ok(PropWriter {
emitter: emitter,
tx: None,
name: name.to_string(),
props: props,
fs: fs.clone(),
ls: ls.map(|ls| ls.clone()),
useragent: ua.to_string(),
q_cache: Default::default(),
})
}
pub fn set_tx(&mut self, tx: Sender) {
self.tx = Some(tx)
}
fn build_elem<T>(&self, content: bool, pfx: &str, e: &Element, text: T) -> DavResult<StatusElement>
where T: Into<String> {
let mut elem = Element {
prefix: Some(pfx.to_string()),
namespace: None,
namespaces: None,
name: e.name.clone(),
attributes: HashMap::new(),
children: Vec::new(),
};
if content {
let t: String = text.into();
if t != "" {
elem.children.push(XMLNode::Text(t));
}
}
Ok(StatusElement {
status: StatusCode::OK,
element: elem,
})
}
async fn get_quota<'a>(
&'a self,
qc: &'a mut QuotaCache,
path: &'a DavPath,
meta: &'a dyn DavMetaData,
) -> FsResult<(u64, Option<u64>)>
{
// do lookup only once.
match qc.q_state {
0 => {
match self.fs.get_quota().await {
Err(e) => {
qc.q_state = 1;
return Err(e);
},
Ok((u, t)) => {
qc.q_used = u;
qc.q_total = t;
qc.q_state = 2;
},
}
},
1 => return Err(FsError::NotImplemented),
_ => {},
}
// if not "/", return for "used" just the size of this file/dir.
let used = if path.as_bytes() == b"/" {
qc.q_used
} else {
meta.len()
};
// calculate available space.
let avail = match qc.q_total {
None => None,
Some(total) => Some(if total > used { total - used } else { 0 }),
};
Ok((used, avail))
}
async fn build_prop<'a>(
&'a self,
prop: &'a Element,
path: &'a DavPath,
meta: &'a dyn DavMetaData,
qc: &'a mut QuotaCache,
docontent: bool,
) -> DavResult<StatusElement>
{
// in some cases, a live property might be stored in the
// dead prop database, like DAV:displayname.
let mut try_deadprop = false;
let mut pfx = "";
match prop.namespace.as_ref().map(|x| x.as_str()) {
Some(NS_DAV_URI) => {
pfx = "D";
match prop.name.as_str() {
"creationdate" => {
if let Ok(time) = meta.created() {
let tm = systemtime_to_rfc3339(time);
return self.build_elem(docontent, pfx, prop, tm);
}
// use ctime instead - apache seems to do this.
if let Ok(ctime) = meta.status_changed() {
let mut time = ctime;
if let Ok(mtime) = meta.modified() {
if mtime < ctime {
time = mtime;
}
}
let tm = systemtime_to_rfc3339(time);
return self.build_elem(docontent, pfx, prop, tm);
}
},
"displayname" | "getcontentlanguage" => {
try_deadprop = true;
},
"getetag" => {
if let Some(etag) = meta.etag() {
return self.build_elem(docontent, pfx, prop, etag);
}
},
"getcontentlength" => {
if !meta.is_dir() {
return self.build_elem(docontent, pfx, prop, meta.len().to_string());
}
},
"getcontenttype" => {
return if meta.is_dir() {
self.build_elem(docontent, pfx, prop, "httpd/unix-directory")
} else {
self.build_elem(docontent, pfx, prop, path.get_mime_type_str())
};
},
"getlastmodified" => {
if let Ok(time) = meta.modified() {
let tm = systemtime_to_httpdate(time);
return self.build_elem(docontent, pfx, prop, tm);
}
},
"resourcetype" => {
let mut elem = prop.clone();
if meta.is_dir() && docontent {
let dir = Element::new2("D:collection");
elem.children.push(XMLNode::Element(dir));
}
return Ok(StatusElement {
status: StatusCode::OK,
element: elem,
});
},
"supportedlock" => {
return Ok(StatusElement {
status: StatusCode::OK,
element: list_supportedlock(self.ls.as_ref()),
});
},
"lockdiscovery" => {
return Ok(StatusElement {
status: StatusCode::OK,
element: list_lockdiscovery(self.ls.as_ref(), path),
});
},
"quota-available-bytes" => {
let mut qc = qc;
if let Ok((_, Some(avail))) = self.get_quota(&mut qc, path, meta).await {
return self.build_elem(docontent, pfx, prop, avail.to_string());
}
},
"quota-used-bytes" => {
let mut qc = qc;
if let Ok((used, _)) = self.get_quota(&mut qc, path, meta).await {
let used = if self.useragent.contains("WebDAVFS") {
// Need this on MacOs, otherwise the value is off
// by a factor of 10 or so .. ?!?!!?
format!("{:014}", used)
} else {
used.to_string()
};
return self.build_elem(docontent, pfx, prop, used);
}
},
_ => {},
}
},
Some(NS_APACHE_URI) => {
pfx = "A";
match prop.name.as_str() {
"executable" => {
if let Ok(x) = meta.executable() {
let b = if x { "T" } else { "F" };
return self.build_elem(docontent, pfx, prop, b);
}
},
_ => {},
}
},
Some(NS_MS_URI) => {
pfx = "Z";
match prop.name.as_str() {
"Win32CreationTime" => {
if let Ok(time) = meta.created() {
let tm = systemtime_to_httpdate(time);
return self.build_elem(docontent, pfx, prop, tm);
}
// use ctime instead - apache seems to do this.
if let Ok(ctime) = meta.status_changed() {
let mut time = ctime;
if let Ok(mtime) = meta.modified() {
if mtime < ctime {
time = mtime;
}
}
let tm = systemtime_to_httpdate(time);
return self.build_elem(docontent, pfx, prop, tm);
}
},
"Win32LastAccessTime" => {
if let Ok(time) = meta.accessed() {
let tm = systemtime_to_httpdate(time);
return self.build_elem(docontent, pfx, prop, tm);
}
},
"Win32LastModifiedTime" => {
if let Ok(time) = meta.modified() {
let tm = systemtime_to_httpdate(time);
return self.build_elem(docontent, pfx, prop, tm);
}
},
"Win32FileAttributes" => {
let mut attr = 0u32;
// Enable when we implement permissions() on DavMetaData.
//if meta.permissions().readonly() {
// attr |= 0x0001;
//}
if path.file_name().starts_with(b".") {
attr |= 0x0002;
}
if meta.is_dir() {
attr |= 0x0010;
} else {
// this is the 'Archive' bit, which is set by
// default on _all_ files on creation and on
// modification.
attr |= 0x0020;
}
return self.build_elem(docontent, pfx, prop, format!("{:08x}", attr));
},
_ => {},
}
},
_ => {
try_deadprop = true;
},
}
if try_deadprop && self.name == "prop" && self.fs.have_props(path).await {
// asking for a specific property.
let dprop = element_to_davprop(prop);
if let Ok(xml) = self.fs.get_prop(path, dprop).await {
if let Ok(e) = Element::parse(Cursor::new(xml)) {
return Ok(StatusElement {
status: StatusCode::OK,
element: e,
});
}
}
}
let prop = if pfx != "" {
self.build_elem(false, pfx, prop, "").map(|s| s.element).unwrap()
} else {
prop.clone()
};
Ok(StatusElement {
status: StatusCode::NOT_FOUND,
element: prop,
})
}
pub async fn write_props<'a>(
&'a mut self,
path: &'a DavPath,
meta: Box<dyn DavMetaData + 'static>,
) -> Result<(), DavError>
{
// A HashMap<StatusCode, Vec<Element>> for the result.
let mut props = HashMap::new();
// Get properties one-by-one
let do_content = self.name != "propname";
let mut qc = self.q_cache;
for p in &self.props {
let res = self.build_prop(p, path, &*meta, &mut qc, do_content).await?;
if res.status == StatusCode::OK || (self.name != "propname" && self.name != "allprop") {
add_sc_elem(&mut props, res.status, res.element);
}
}
self.q_cache = qc;
// and list the dead properties as well.
if (self.name == "propname" || self.name == "allprop") && self.fs.have_props(path).await {
if let Ok(v) = self.fs.get_props(path, do_content).await {
v.into_iter()
.map(davprop_to_element)
.for_each(|e| add_sc_elem(&mut props, StatusCode::OK, e));
}
}
Ok::<(), DavError>(self.write_propresponse(path, props)?)
}
pub fn write_propresponse(
&mut self,
path: &DavPath,
props: HashMap<StatusCode, Vec<Element>>,
) -> Result<(), DavError>
{
self.emitter.write(XmlWEvent::start_element("D:response"))?;
let p = path.with_prefix().as_url_string();
Element::new2("D:href").text(p).write_ev(&mut self.emitter)?;
let mut keys = props.keys().collect::<Vec<_>>();
keys.sort();
for status in keys {
let v = props.get(status).unwrap();
self.emitter.write(XmlWEvent::start_element("D:propstat"))?;
self.emitter.write(XmlWEvent::start_element("D:prop"))?;
for i in v.iter() {
i.write_ev(&mut self.emitter)?;
}
self.emitter.write(XmlWEvent::end_element())?;
Element::new2("D:status")
.text("HTTP/1.1 ".to_string() + &status.to_string())
.write_ev(&mut self.emitter)?;
self.emitter.write(XmlWEvent::end_element())?;
}
self.emitter.write(XmlWEvent::end_element())?; // response
Ok(())
}
pub async fn flush(&mut self) -> DavResult<()> {
let buffer = self.emitter.inner_mut().take();
self.tx.as_mut().unwrap().send(Bytes::from(buffer)).await;
Ok(())
}
pub async fn close(&mut self) -> DavResult<()> {
let _ = self.emitter.write(XmlWEvent::end_element());
self.flush().await
}
}
fn add_sc_elem(hm: &mut HashMap<StatusCode, Vec<Element>>, sc: StatusCode, e: Element) {
if !hm.contains_key(&sc) {
hm.insert(sc, Vec::new());
}
hm.get_mut(&sc).unwrap().push(e)
}
fn element_to_davprop_full(elem: &Element) -> DavProp {
let mut emitter = EventWriter::new(Cursor::new(Vec::new()));
elem.write_ev(&mut emitter).ok();
let xml = emitter.into_inner().into_inner();
DavProp {
name: elem.name.clone(),
prefix: elem.prefix.clone(),
namespace: elem.namespace.clone(),
xml: Some(xml),
}
}
fn element_to_davprop(elem: &Element) -> DavProp {
DavProp {
name: elem.name.clone(),
prefix: elem.prefix.clone(),
namespace: elem.namespace.clone(),
xml: None,
}
}
fn davprop_to_element(prop: DavProp) -> Element {
if let Some(xml) = prop.xml {
return Element::parse2(Cursor::new(xml)).unwrap();
}
let mut elem = Element::new(&prop.name);
if let Some(ref ns) = prop.namespace {
let pfx = prop.prefix.as_ref().map(|p| p.as_str()).unwrap_or("");
elem = elem.ns(pfx, ns.as_str());
}
elem.prefix = prop.prefix;
elem.namespace = prop.namespace.clone();
elem
}

269
src/handle_put.rs Normal file
View File

@@ -0,0 +1,269 @@
use std::any::Any;
use std::error::Error as StdError;
use std::io;
use bytes::{Buf, Bytes};
use headers::HeaderMapExt;
use http::StatusCode as SC;
use http::{self, Request, Response};
use http_body::Body as HttpBody;
use crate::body::Body;
use crate::conditional::if_match_get_tokens;
use crate::davheaders;
use crate::fs::*;
use crate::{DavError, DavResult};
const SABRE: &'static str = "application/x-sabredav-partialupdate";
// This is a nice hack. If the type 'E' is actually an io::Error or a Box<io::Error>,
// convert it back into a real io::Error. If it is a DavError or a Box<DavError>,
// use its Into<io::Error> impl. Otherwise just wrap the error in io::Error::new.
//
// If we had specialization this would look a lot prettier.
//
// Also, this is senseless. It's not as if we _do_ anything with the
// io::Error, other than noticing "oops an error occured".
fn to_ioerror<E>(err: E) -> io::Error
where E: StdError + Sync + Send + 'static {
let e = &err as &dyn Any;
if e.is::<io::Error>() || e.is::<Box<io::Error>>() {
let err = Box::new(err) as Box<dyn Any>;
match err.downcast::<io::Error>() {
Ok(e) => *e,
Err(e) => {
match e.downcast::<Box<io::Error>>() {
Ok(e) => *(*e),
Err(_) => io::ErrorKind::Other.into(),
}
},
}
} else if e.is::<DavError>() || e.is::<Box<DavError>>() {
let err = Box::new(err) as Box<dyn Any>;
match err.downcast::<DavError>() {
Ok(e) => (*e).into(),
Err(e) => {
match e.downcast::<Box<DavError>>() {
Ok(e) => (*(*e)).into(),
Err(_) => io::ErrorKind::Other.into(),
}
},
}
} else {
io::Error::new(io::ErrorKind::Other, err)
}
}
impl crate::DavInner {
pub(crate) async fn handle_put<ReqBody, ReqData, ReqError>(
self,
req: &Request<()>,
body: ReqBody,
) -> DavResult<Response<Body>>
where
ReqBody: HttpBody<Data = ReqData, Error = ReqError>,
ReqData: Buf + Send + 'static,
ReqError: StdError + Send + Sync + 'static,
{
let mut start = 0;
let mut count = 0;
let mut have_count = false;
let mut do_range = false;
let mut oo = OpenOptions::write();
oo.create = true;
oo.truncate = true;
if let Some(n) = req.headers().typed_get::<headers::ContentLength>() {
count = n.0;
have_count = true;
}
let path = self.path(&req);
let meta = self.fs.metadata(&path).await;
// close connection on error.
let mut res = Response::new(Body::empty());
res.headers_mut().typed_insert(headers::Connection::close());
// SabreDAV style PATCH?
if req.method() == &http::Method::PATCH {
if !req
.headers()
.typed_get::<davheaders::ContentType>()
.map_or(false, |ct| ct.0 == SABRE)
{
return Err(DavError::StatusClose(SC::UNSUPPORTED_MEDIA_TYPE));
}
if !have_count {
return Err(DavError::StatusClose(SC::LENGTH_REQUIRED));
};
let r = req
.headers()
.typed_get::<davheaders::XUpdateRange>()
.ok_or(DavError::StatusClose(SC::BAD_REQUEST))?;
match r {
davheaders::XUpdateRange::FromTo(b, e) => {
if b > e || e - b + 1 != count {
return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE));
}
start = b;
},
davheaders::XUpdateRange::AllFrom(b) => {
start = b;
},
davheaders::XUpdateRange::Last(n) => {
if let Ok(ref m) = meta {
if n > m.len() {
return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE));
}
start = m.len() - n;
}
},
davheaders::XUpdateRange::Append => {
oo.append = true;
},
}
do_range = true;
oo.truncate = false;
}
// Apache-style Content-Range header?
match req.headers().typed_try_get::<headers::ContentRange>() {
Ok(Some(range)) => {
if let Some((b, e)) = range.bytes_range() {
if b > e {
return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE));
}
if have_count {
if e - b + 1 != count {
return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE));
}
} else {
count = e - b + 1;
have_count = true;
}
start = b;
do_range = true;
oo.truncate = false;
}
},
Ok(None) => {},
Err(_) => return Err(DavError::StatusClose(SC::BAD_REQUEST)),
}
// check the If and If-* headers.
let tokens = if_match_get_tokens(&req, meta.as_ref().ok(), &self.fs, &self.ls, &path);
let tokens = match tokens.await {
Ok(t) => t,
Err(s) => return Err(DavError::StatusClose(s)),
};
// if locked check if we hold that lock.
if let Some(ref locksystem) = self.ls {
let t = tokens.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
let principal = self.principal.as_ref().map(|s| s.as_str());
if let Err(_l) = locksystem.check(&path, principal, false, false, t) {
return Err(DavError::StatusClose(SC::LOCKED));
}
}
// tweak open options.
if req
.headers()
.typed_get::<davheaders::IfMatch>()
.map_or(false, |h| &h.0 == &davheaders::ETagList::Star)
{
oo.create = false;
}
if req
.headers()
.typed_get::<davheaders::IfNoneMatch>()
.map_or(false, |h| &h.0 == &davheaders::ETagList::Star)
{
oo.create_new = true;
}
let mut file = match self.fs.open(&path, oo).await {
Ok(f) => f,
Err(FsError::NotFound) | Err(FsError::Exists) => {
let s = if !oo.create || oo.create_new {
SC::PRECONDITION_FAILED
} else {
SC::CONFLICT
};
return Err(DavError::StatusClose(s));
},
Err(e) => return Err(DavError::FsError(e)),
};
if do_range {
// seek to beginning of requested data.
if let Err(_) = file.seek(std::io::SeekFrom::Start(start)).await {
return Err(DavError::StatusClose(SC::RANGE_NOT_SATISFIABLE));
}
}
res.headers_mut().typed_insert(headers::AcceptRanges::bytes());
pin_utils::pin_mut!(body);
// loop, read body, write to file.
let mut total = 0u64;
while let Some(data) = body.data().await {
let mut buf = data.map_err(|e| to_ioerror(e))?;
let buflen = buf.remaining();
total += buflen as u64;
// consistency check.
if have_count && total > count {
break;
}
// The `Buf` might actually be a `Bytes`.
let b = {
let b: &mut dyn std::any::Any = &mut buf;
b.downcast_mut::<Bytes>()
};
if let Some(bytes) = b {
let bytes = std::mem::replace(bytes, Bytes::new());
file.write_bytes(bytes).await?;
} else {
file.write_buf(Box::new(buf)).await?;
}
}
file.flush().await?;
if have_count && total > count {
error!("PUT file: sender is sending more bytes than expected");
return Err(DavError::StatusClose(SC::BAD_REQUEST));
}
if have_count && total < count {
error!("PUT file: premature EOF on input");
return Err(DavError::StatusClose(SC::BAD_REQUEST));
}
// Report whether we created or updated the file.
*res.status_mut() = match meta {
Ok(_) => SC::NO_CONTENT,
Err(_) => {
res.headers_mut().typed_insert(headers::ContentLength(0));
SC::CREATED
},
};
// no errors, connection may be kept open.
res.headers_mut().remove(http::header::CONNECTION);
if let Ok(m) = file.metadata().await {
if let Some(etag) = davheaders::ETag::from_meta(&m) {
res.headers_mut().typed_insert(etag);
}
if let Ok(modified) = m.modified() {
res.headers_mut()
.typed_insert(headers::LastModified::from(modified));
}
}
Ok(res)
}
}

176
src/lib.rs Normal file
View File

@@ -0,0 +1,176 @@
#![doc(html_root_url = "https://docs.rs/webdav-handler/0.2.0")]
//! ## Generic async HTTP/Webdav handler
//!
//! [`Webdav`] (RFC4918) is defined as
//! HTTP (GET/HEAD/PUT/DELETE) plus a bunch of extension methods (PROPFIND, etc).
//! These extension methods are used to manage collections (like unix directories),
//! get information on collections (like unix `ls` or `readdir`), rename and
//! copy items, lock/unlock items, etc.
//!
//! A `handler` is a piece of code that takes a `http::Request`, processes it in some
//! way, and then generates a `http::Response`. This library is a `handler` that maps
//! the HTTP/Webdav protocol to the filesystem. Or actually, "a" filesystem. Included
//! is an adapter for the local filesystem (`localfs`), and an adapter for an
//! in-memory filesystem (`memfs`).
//!
//! So this library can be used as a handler with HTTP servers like [hyper],
//! [warp], [actix-web], etc. Either as a correct and complete HTTP handler for
//! files (GET/HEAD) or as a handler for the entire Webdav protocol. In the latter case, you can
//! mount it as a remote filesystem: Linux, Windows, macOS can all mount Webdav filesystems.
//!
//! ## Backend interfaces.
//!
//! The backend interfaces are similar to the ones from the Go `x/net/webdav package`:
//!
//! - the library contains a [HTTP handler][DavHandler].
//! - you supply a [filesystem][DavFileSystem] for backend storage, which can optionally
//! implement reading/writing [DAV properties][DavProp].
//! - you can supply a [locksystem][DavLockSystem] that handles webdav locks.
//!
//! The handler in this library works with the standard http types
//! from the `http` and `http_body` crates. That means that you can use it
//! straight away with http libraries / frameworks that also work with
//! those types, like hyper. Compatibility modules for [actix-web][actix-compat]
//! and [warp][warp-compat] are also provided.
//!
//! ## Implemented standards.
//!
//! Currently [passes the "basic", "copymove", "props", "locks" and "http"
//! checks][README_litmus] of the Webdav Litmus Test testsuite. That's all of the base
//! [RFC4918] webdav specification.
//!
//! The litmus test suite also has tests for RFC3744 "acl" and "principal",
//! RFC5842 "bind", and RFC3253 "versioning". Those we do not support right now.
//!
//! The relevant parts of the HTTP RFCs are also implemented, such as the
//! preconditions (If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since,
//! If-Range), partial transfers (Range).
//!
//! Also implemented is `partial PUT`, for which there are currently two
//! non-standard ways to do it: [`PUT` with the `Content-Range` header][PUT],
//! which is what Apache's `mod_dav` implements, and [`PATCH` with the `X-Update-Range`
//! header][PATCH] from `SabreDav`.
//!
//! ## Backends.
//!
//! Included are two filesystems:
//!
//! - [`LocalFs`]: serves a directory on the local filesystem
//! - [`MemFs`]: ephemeral in-memory filesystem. supports DAV properties.
//!
//! Also included are two locksystems:
//!
//! - [`MemLs`]: ephemeral in-memory locksystem.
//! - [`FakeLs`]: fake locksystem. just enough LOCK/UNLOCK support for macOS/Windows.
//!
//! ## Example.
//!
//! Example server using [hyper] that serves the /tmp directory in r/w mode. You should be
//! able to mount this network share from Linux, macOS and Windows. [Examples][examples]
//! for other frameworks are also available.
//!
//! ```no_run
//! use std::convert::Infallible;
//! use webdav_handler::{fakels::FakeLs, localfs::LocalFs, DavHandler};
//!
//! #[tokio::main]
//! async fn main() {
//! let dir = "/tmp";
//! let addr = ([127, 0, 0, 1], 4918).into();
//!
//! let dav_server = DavHandler::builder()
//! .filesystem(LocalFs::new(dir, false, false, false))
//! .locksystem(FakeLs::new())
//! .build_handler();
//!
//! let make_service = hyper::service::make_service_fn(move |_| {
//! let dav_server = dav_server.clone();
//! async move {
//! let func = move |req| {
//! let dav_server = dav_server.clone();
//! async move {
//! Ok::<_, Infallible>(dav_server.handle(req).await)
//! }
//! };
//! Ok::<_, Infallible>(hyper::service::service_fn(func))
//! }
//! });
//!
//! println!("Serving {} on {}", dir, addr);
//! let _ = hyper::Server::bind(&addr)
//! .serve(make_service)
//! .await
//! .map_err(|e| eprintln!("server error: {}", e));
//! }
//! ```
//! [DavHandler]: struct.DavHandler.html
//! [DavFileSystem]: fs/index.html
//! [DavLockSystem]: ls/index.html
//! [DavProp]: fs/struct.DavProp.html
//! [`WebDav`]: https://tools.ietf.org/html/rfc4918
//! [RFC4918]: https://tools.ietf.org/html/rfc4918
//! [`MemLs`]: memls/index.html
//! [`MemFs`]: memfs/index.html
//! [`LocalFs`]: localfs/index.html
//! [`FakeLs`]: fakels/index.html
//! [actix-compat]: actix/index.html
//! [warp-compat]: warp/index.html
//! [README_litmus]: https://github.com/miquels/webdav-handler-rs/blob/master/README.litmus-test.md
//! [examples]: https://github.com/miquels/webdav-handler-rs/tree/master/examples/
//! [PUT]: https://github.com/miquels/webdav-handler-rs/tree/master/doc/Apache-PUT-with-Content-Range.md
//! [PATCH]: https://github.com/miquels/webdav-handler-rs/tree/master/doc/SABREDAV-partialupdate.md
//! [hyper]: https://hyper.rs/
//! [warp]: https://crates.io/crates/warp
//! [actix-web]: https://actix.rs/
#![cfg_attr(docsrs, feature(doc_cfg))]
#[macro_use]
extern crate log;
#[macro_use]
extern crate lazy_static;
mod async_stream;
mod conditional;
mod davhandler;
mod davheaders;
mod errors;
mod handle_copymove;
mod handle_delete;
mod handle_gethead;
mod handle_lock;
mod handle_mkcol;
mod handle_options;
mod handle_props;
mod handle_put;
mod localfs_macos;
mod localfs_windows;
mod multierror;
mod tree;
mod util;
mod voidfs;
mod xmltree_ext;
pub mod body;
pub mod davpath;
pub mod fakels;
pub mod fs;
pub mod localfs;
pub mod ls;
pub mod memfs;
pub mod memls;
#[cfg(any(docsrs, feature = "actix-compat"))]
#[cfg_attr(docsrs, doc(cfg(feature = "actix-compat")))]
pub mod actix;
#[cfg(any(docsrs, feature = "warp-compat"))]
#[cfg_attr(docsrs, doc(cfg(feature = "warp-compat")))]
pub mod warp;
pub(crate) use crate::davhandler::DavInner;
pub(crate) use crate::errors::{DavError, DavResult};
pub(crate) use crate::fs::*;
pub use crate::davhandler::{DavConfig, DavHandler};
pub use crate::util::{DavMethod, DavMethodSet};

791
src/localfs.rs Normal file
View File

@@ -0,0 +1,791 @@
//! Local filesystem access.
//!
//! This implementation is stateless. So the easiest way to use it
//! is to create a new instance in your handler every time
//! you need one.
use std::any::Any;
use std::collections::VecDeque;
use std::future::Future;
use std::io::{self, ErrorKind, Read, Seek, SeekFrom, Write};
use std::os::unix::ffi::OsStrExt;
use std::os::unix::fs::DirBuilderExt;
use std::os::unix::fs::MetadataExt;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use bytes::{Buf, Bytes, BytesMut};
use futures::{future, future::BoxFuture, FutureExt, Stream};
use pin_utils::pin_mut;
use tokio::task;
use libc;
use crate::davpath::DavPath;
use crate::fs::*;
use crate::localfs_macos::DUCacheBuilder;
const RUNTIME_TYPE_BASIC: u32 = 1;
const RUNTIME_TYPE_THREADPOOL: u32 = 2;
static RUNTIME_TYPE: AtomicU32 = AtomicU32::new(0);
#[derive(Clone, Copy)]
#[repr(u32)]
enum RuntimeType {
Basic = RUNTIME_TYPE_BASIC,
ThreadPool = RUNTIME_TYPE_THREADPOOL,
}
impl RuntimeType {
#[inline]
fn get() -> RuntimeType {
match RUNTIME_TYPE.load(Ordering::Relaxed) {
RUNTIME_TYPE_BASIC => RuntimeType::Basic,
RUNTIME_TYPE_THREADPOOL => RuntimeType::ThreadPool,
_ => {
let dbg = format!("{:?}", tokio::runtime::Handle::current());
let rt = if dbg.contains("ThreadPool") {
RuntimeType::ThreadPool
} else {
RuntimeType::Basic
};
RUNTIME_TYPE.store(rt as u32, Ordering::SeqCst);
rt
},
}
}
}
// Run some code via block_in_place() or spawn_blocking().
//
// There's also a method on LocalFs for this, use the freestanding
// function if you do not want the fs_access_guard() closure to be used.
#[inline]
async fn blocking<F, R>(func: F) -> R
where
F: FnOnce() -> R,
F: Send + 'static,
R: Send + 'static,
{
match RuntimeType::get() {
RuntimeType::Basic => task::spawn_blocking(func).await.unwrap(),
RuntimeType::ThreadPool => task::block_in_place(func),
}
}
#[derive(Debug, Clone)]
struct LocalFsMetaData(std::fs::Metadata);
/// Local Filesystem implementation.
#[derive(Clone)]
pub struct LocalFs {
pub(crate) inner: Arc<LocalFsInner>,
}
// inner struct.
pub(crate) struct LocalFsInner {
pub basedir: PathBuf,
pub public: bool,
pub case_insensitive: bool,
pub macos: bool,
pub is_file: bool,
pub fs_access_guard: Option<Box<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>>,
}
#[derive(Debug)]
struct LocalFsFile(Option<std::fs::File>);
struct LocalFsReadDir {
fs: LocalFs,
do_meta: ReadDirMeta,
buffer: VecDeque<io::Result<LocalFsDirEntry>>,
dir_cache: Option<DUCacheBuilder>,
iterator: Option<std::fs::ReadDir>,
fut: Option<BoxFuture<'static, ReadDirBatch>>,
}
// a DirEntry either already has the metadata available, or a handle
// to the filesystem so it can call fs.blocking()
enum Meta {
Data(io::Result<std::fs::Metadata>),
Fs(LocalFs),
}
// Items from the readdir stream.
struct LocalFsDirEntry {
meta: Meta,
entry: std::fs::DirEntry,
}
impl LocalFs {
/// Create a new LocalFs DavFileSystem, serving "base".
///
/// If "public" is set to true, all files and directories created will be
/// publically readable (mode 644/755), otherwise they will be private
/// (mode 600/700). Umask stil overrides this.
///
/// If "case_insensitive" is set to true, all filesystem lookups will
/// be case insensitive. Note that this has a _lot_ of overhead!
pub fn new<P: AsRef<Path>>(base: P, public: bool, case_insensitive: bool, macos: bool) -> Box<LocalFs> {
let inner = LocalFsInner {
basedir: base.as_ref().to_path_buf(),
public: public,
macos: macos,
case_insensitive: case_insensitive,
is_file: false,
fs_access_guard: None,
};
Box::new({
LocalFs {
inner: Arc::new(inner),
}
})
}
/// Create a new LocalFs DavFileSystem, serving "file".
///
/// This is like `new()`, but it always serves this single file.
/// The request path is ignored.
pub fn new_file<P: AsRef<Path>>(file: P, public: bool) -> Box<LocalFs> {
let inner = LocalFsInner {
basedir: file.as_ref().to_path_buf(),
public: public,
macos: false,
case_insensitive: false,
is_file: true,
fs_access_guard: None,
};
Box::new({
LocalFs {
inner: Arc::new(inner),
}
})
}
// Like new() but pass in a fs_access_guard hook.
#[doc(hidden)]
pub fn new_with_fs_access_guard<P: AsRef<Path>>(
base: P,
public: bool,
case_insensitive: bool,
macos: bool,
fs_access_guard: Option<Box<dyn Fn() -> Box<dyn Any> + Send + Sync + 'static>>,
) -> Box<LocalFs>
{
let inner = LocalFsInner {
basedir: base.as_ref().to_path_buf(),
public: public,
macos: macos,
case_insensitive: case_insensitive,
is_file: false,
fs_access_guard: fs_access_guard,
};
Box::new({
LocalFs {
inner: Arc::new(inner),
}
})
}
fn fspath_dbg(&self, path: &DavPath) -> PathBuf {
let mut pathbuf = self.inner.basedir.clone();
if !self.inner.is_file {
pathbuf.push(path.as_rel_ospath());
}
pathbuf
}
fn fspath(&self, path: &DavPath) -> PathBuf {
if self.inner.case_insensitive {
crate::localfs_windows::resolve(&self.inner.basedir, &path)
} else {
let mut pathbuf = self.inner.basedir.clone();
if !self.inner.is_file {
pathbuf.push(path.as_rel_ospath());
}
pathbuf
}
}
// threadpool::blocking() adapter, also runs the before/after hooks.
#[doc(hidden)]
pub async fn blocking<F, R>(&self, func: F) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let this = self.clone();
blocking(move || {
let _guard = this.inner.fs_access_guard.as_ref().map(|f| f());
func()
})
.await
}
}
// This implementation is basically a bunch of boilerplate to
// wrap the std::fs call in self.blocking() calls.
impl DavFileSystem for LocalFs {
fn metadata<'a>(&'a self, davpath: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
async move {
if let Some(meta) = self.is_virtual(davpath) {
return Ok(meta);
}
let path = self.fspath(davpath);
if self.is_notfound(&path) {
return Err(FsError::NotFound);
}
self.blocking(move || {
match std::fs::metadata(path) {
Ok(meta) => Ok(Box::new(LocalFsMetaData(meta)) as Box<dyn DavMetaData>),
Err(e) => Err(e.into()),
}
})
.await
}
.boxed()
}
fn symlink_metadata<'a>(&'a self, davpath: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
async move {
if let Some(meta) = self.is_virtual(davpath) {
return Ok(meta);
}
let path = self.fspath(davpath);
if self.is_notfound(&path) {
return Err(FsError::NotFound);
}
self.blocking(move || {
match std::fs::symlink_metadata(path) {
Ok(meta) => Ok(Box::new(LocalFsMetaData(meta)) as Box<dyn DavMetaData>),
Err(e) => Err(e.into()),
}
})
.await
}
.boxed()
}
// read_dir is a bit more involved - but not much - than a simple wrapper,
// because it returns a stream.
fn read_dir<'a>(
&'a self,
davpath: &'a DavPath,
meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>>
{
async move {
trace!("FS: read_dir {:?}", self.fspath_dbg(davpath));
let path = self.fspath(davpath);
let path2 = path.clone();
let iter = self.blocking(move || std::fs::read_dir(&path)).await;
match iter {
Ok(iterator) => {
let strm = LocalFsReadDir {
fs: self.clone(),
do_meta: meta,
buffer: VecDeque::new(),
dir_cache: self.dir_cache_builder(path2),
iterator: Some(iterator),
fut: None,
};
Ok(Box::pin(strm) as FsStream<Box<dyn DavDirEntry>>)
},
Err(e) => Err(e.into()),
}
}
.boxed()
}
fn open<'a>(&'a self, path: &'a DavPath, options: OpenOptions) -> FsFuture<Box<dyn DavFile>> {
async move {
trace!("FS: open {:?}", self.fspath_dbg(path));
if self.is_forbidden(path) {
return Err(FsError::Forbidden);
}
let mode = if self.inner.public { 0o644 } else { 0o600 };
let path = self.fspath(path);
self.blocking(move || {
let res = std::fs::OpenOptions::new()
.read(options.read)
.write(options.write)
.append(options.append)
.truncate(options.truncate)
.create(options.create)
.create_new(options.create_new)
.mode(mode)
.open(path);
match res {
Ok(file) => Ok(Box::new(LocalFsFile(Some(file))) as Box<dyn DavFile>),
Err(e) => Err(e.into()),
}
})
.await
}
.boxed()
}
fn create_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: create_dir {:?}", self.fspath_dbg(path));
if self.is_forbidden(path) {
return Err(FsError::Forbidden);
}
let mode = if self.inner.public { 0o755 } else { 0o700 };
let path = self.fspath(path);
self.blocking(move || {
std::fs::DirBuilder::new()
.mode(mode)
.create(path)
.map_err(|e| e.into())
})
.await
}
.boxed()
}
fn remove_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: remove_dir {:?}", self.fspath_dbg(path));
let path = self.fspath(path);
self.blocking(move || std::fs::remove_dir(path).map_err(|e| e.into()))
.await
}
.boxed()
}
fn remove_file<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: remove_file {:?}", self.fspath_dbg(path));
if self.is_forbidden(path) {
return Err(FsError::Forbidden);
}
let path = self.fspath(path);
self.blocking(move || std::fs::remove_file(path).map_err(|e| e.into()))
.await
}
.boxed()
}
fn rename<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: rename {:?} {:?}", self.fspath_dbg(from), self.fspath_dbg(to));
if self.is_forbidden(from) || self.is_forbidden(to) {
return Err(FsError::Forbidden);
}
let frompath = self.fspath(from);
let topath = self.fspath(to);
self.blocking(move || {
match std::fs::rename(&frompath, &topath) {
Ok(v) => Ok(v),
Err(e) => {
// webdav allows a rename from a directory to a file.
// note that this check is racy, and I'm not quite sure what
// we should do if the source is a symlink. anyway ...
if e.raw_os_error() == Some(libc::ENOTDIR) && frompath.is_dir() {
// remove and try again.
let _ = std::fs::remove_file(&topath);
std::fs::rename(frompath, topath).map_err(|e| e.into())
} else {
Err(e.into())
}
},
}
})
.await
}
.boxed()
}
fn copy<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: copy {:?} {:?}", self.fspath_dbg(from), self.fspath_dbg(to));
if self.is_forbidden(from) || self.is_forbidden(to) {
return Err(FsError::Forbidden);
}
let path_from = self.fspath(from);
let path_to = self.fspath(to);
match self.blocking(move || std::fs::copy(path_from, path_to)).await {
Ok(_) => Ok(()),
Err(e) => {
debug!(
"copy({:?}, {:?}) failed: {}",
self.fspath_dbg(from),
self.fspath_dbg(to),
e
);
Err(e.into())
},
}
}
.boxed()
}
}
// read_batch() result.
struct ReadDirBatch {
iterator: Option<std::fs::ReadDir>,
buffer: VecDeque<io::Result<LocalFsDirEntry>>,
}
// Read the next batch of LocalFsDirEntry structs (up to 256).
// This is sync code, must be run in `blocking()`.
fn read_batch(iterator: Option<std::fs::ReadDir>, fs: LocalFs, do_meta: ReadDirMeta) -> ReadDirBatch {
let mut buffer = VecDeque::new();
let mut iterator = match iterator {
Some(i) => i,
None => {
return ReadDirBatch {
buffer,
iterator: None,
}
},
};
let _guard = match do_meta {
ReadDirMeta::None => None,
_ => fs.inner.fs_access_guard.as_ref().map(|f| f()),
};
for _ in 0..256 {
match iterator.next() {
Some(Ok(entry)) => {
let meta = match do_meta {
ReadDirMeta::Data => Meta::Data(std::fs::metadata(entry.path())),
ReadDirMeta::DataSymlink => Meta::Data(entry.metadata()),
ReadDirMeta::None => Meta::Fs(fs.clone()),
};
let d = LocalFsDirEntry {
meta: meta,
entry: entry,
};
buffer.push_back(Ok(d))
},
Some(Err(e)) => {
buffer.push_back(Err(e));
break;
},
None => break,
}
}
ReadDirBatch {
buffer,
iterator: Some(iterator),
}
}
impl LocalFsReadDir {
// Create a future that calls read_batch().
//
// The 'iterator' is moved into the future, and returned when it completes,
// together with a list of directory entries.
fn read_batch(&mut self) -> BoxFuture<'static, ReadDirBatch> {
let iterator = self.iterator.take();
let fs = self.fs.clone();
let do_meta = self.do_meta;
let fut: BoxFuture<ReadDirBatch> = blocking(move || read_batch(iterator, fs, do_meta)).boxed();
fut
}
}
// The stream implementation tries to be smart and batch I/O operations
impl<'a> Stream for LocalFsReadDir {
type Item = Box<dyn DavDirEntry>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = Pin::into_inner(self);
// If the buffer is empty, fill it.
if this.buffer.len() == 0 {
// If we have no pending future, create one.
if this.fut.is_none() {
if this.iterator.is_none() {
return Poll::Ready(None);
}
this.fut = Some(this.read_batch());
}
// Poll the future.
let fut = this.fut.as_mut().unwrap();
pin_mut!(fut);
match Pin::new(&mut fut).poll(cx) {
Poll::Ready(batch) => {
this.fut.take();
if let Some(ref mut nb) = this.dir_cache {
for e in &batch.buffer {
if let Ok(ref e) = e {
nb.add(e.entry.file_name());
}
}
}
this.buffer = batch.buffer;
this.iterator = batch.iterator;
},
Poll::Pending => return Poll::Pending,
}
}
// we filled the buffer, now pop from the buffer.
match this.buffer.pop_front() {
Some(Ok(item)) => Poll::Ready(Some(Box::new(item))),
Some(Err(_)) | None => {
// fuse the iterator.
this.iterator.take();
// finish the cache.
if let Some(ref mut nb) = this.dir_cache {
nb.finish();
}
// return end-of-stream.
Poll::Ready(None)
},
}
}
}
enum Is {
File,
Dir,
Symlink,
}
impl LocalFsDirEntry {
async fn is_a(&self, is: Is) -> FsResult<bool> {
match self.meta {
Meta::Data(Ok(ref meta)) => {
Ok(match is {
Is::File => meta.file_type().is_file(),
Is::Dir => meta.file_type().is_dir(),
Is::Symlink => meta.file_type().is_symlink(),
})
},
Meta::Data(Err(ref e)) => Err(e.into()),
Meta::Fs(ref fs) => {
let fullpath = self.entry.path();
let ft = fs
.blocking(move || std::fs::metadata(&fullpath))
.await?
.file_type();
Ok(match is {
Is::File => ft.is_file(),
Is::Dir => ft.is_dir(),
Is::Symlink => ft.is_symlink(),
})
},
}
}
}
impl DavDirEntry for LocalFsDirEntry {
fn metadata<'a>(&'a self) -> FsFuture<Box<dyn DavMetaData>> {
match self.meta {
Meta::Data(ref meta) => {
let m = match meta {
Ok(meta) => Ok(Box::new(LocalFsMetaData(meta.clone())) as Box<dyn DavMetaData>),
Err(e) => Err(e.into()),
};
Box::pin(future::ready(m))
},
Meta::Fs(ref fs) => {
let fullpath = self.entry.path();
fs.blocking(move || {
match std::fs::metadata(&fullpath) {
Ok(meta) => Ok(Box::new(LocalFsMetaData(meta)) as Box<dyn DavMetaData>),
Err(e) => Err(e.into()),
}
})
.boxed()
},
}
}
fn name(&self) -> Vec<u8> {
self.entry.file_name().as_bytes().to_vec()
}
fn is_dir<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.is_a(Is::Dir))
}
fn is_file<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.is_a(Is::File))
}
fn is_symlink<'a>(&'a self) -> FsFuture<bool> {
Box::pin(self.is_a(Is::Symlink))
}
}
impl DavFile for LocalFsFile {
fn metadata<'a>(&'a mut self) -> FsFuture<Box<dyn DavMetaData>> {
async move {
let file = self.0.take().unwrap();
let (meta, file) = blocking(move || (file.metadata(), file)).await;
self.0 = Some(file);
Ok(Box::new(LocalFsMetaData(meta?)) as Box<dyn DavMetaData>)
}
.boxed()
}
fn write_bytes<'a>(&'a mut self, buf: Bytes) -> FsFuture<()> {
async move {
let mut file = self.0.take().unwrap();
let (res, file) = blocking(move || (file.write_all(&buf), file)).await;
self.0 = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn write_buf<'a>(&'a mut self, mut buf: Box<dyn Buf + Send>) -> FsFuture<()> {
async move {
let mut file = self.0.take().unwrap();
let (res, file) = blocking(move || {
while buf.remaining() > 0 {
let n = match file.write(buf.chunk()) {
Ok(n) => n,
Err(e) => return (Err(e), file),
};
buf.advance(n);
}
(Ok(()), file)
})
.await;
self.0 = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn read_bytes<'a>(&'a mut self, count: usize) -> FsFuture<Bytes> {
async move {
let mut file = self.0.take().unwrap();
let (res, file) = blocking(move || {
let mut buf = BytesMut::with_capacity(count);
let res = unsafe {
buf.set_len(count);
file.read(&mut buf).map(|n| {
buf.set_len(n);
buf.freeze()
})
};
(res, file)
})
.await;
self.0 = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn seek<'a>(&'a mut self, pos: SeekFrom) -> FsFuture<u64> {
async move {
let mut file = self.0.take().unwrap();
let (res, file) = blocking(move || (file.seek(pos), file)).await;
self.0 = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
fn flush<'a>(&'a mut self) -> FsFuture<()> {
async move {
let mut file = self.0.take().unwrap();
let (res, file) = blocking(move || (file.flush(), file)).await;
self.0 = Some(file);
res.map_err(|e| e.into())
}
.boxed()
}
}
impl DavMetaData for LocalFsMetaData {
fn len(&self) -> u64 {
self.0.len()
}
fn created(&self) -> FsResult<SystemTime> {
self.0.created().map_err(|e| e.into())
}
fn modified(&self) -> FsResult<SystemTime> {
self.0.modified().map_err(|e| e.into())
}
fn accessed(&self) -> FsResult<SystemTime> {
self.0.accessed().map_err(|e| e.into())
}
fn status_changed(&self) -> FsResult<SystemTime> {
Ok(UNIX_EPOCH + Duration::new(self.0.ctime() as u64, 0))
}
fn is_dir(&self) -> bool {
self.0.is_dir()
}
fn is_file(&self) -> bool {
self.0.is_file()
}
fn is_symlink(&self) -> bool {
self.0.file_type().is_symlink()
}
fn executable(&self) -> FsResult<bool> {
if self.0.is_file() {
return Ok((self.0.permissions().mode() & 0o100) > 0);
}
Err(FsError::NotImplemented)
}
// same as the default apache etag.
fn etag(&self) -> Option<String> {
let modified = self.0.modified().ok()?;
let t = modified.duration_since(UNIX_EPOCH).ok()?;
let t = t.as_secs() * 1000000 + t.subsec_nanos() as u64 / 1000;
if self.is_file() {
Some(format!("{:x}-{:x}-{:x}", self.0.ino(), self.0.len(), t))
} else {
Some(format!("{:x}-{:x}", self.0.ino(), t))
}
}
}
impl From<&io::Error> for FsError {
fn from(e: &io::Error) -> Self {
if let Some(errno) = e.raw_os_error() {
// specific errors.
match errno {
libc::EMLINK | libc::ENOSPC | libc::EDQUOT => return FsError::InsufficientStorage,
libc::EFBIG => return FsError::TooLarge,
libc::EACCES | libc::EPERM => return FsError::Forbidden,
libc::ENOTEMPTY | libc::EEXIST => return FsError::Exists,
libc::ELOOP => return FsError::LoopDetected,
libc::ENAMETOOLONG => return FsError::PathTooLong,
libc::ENOTDIR => return FsError::Forbidden,
libc::EISDIR => return FsError::Forbidden,
libc::EROFS => return FsError::Forbidden,
libc::ENOENT => return FsError::NotFound,
libc::ENOSYS => return FsError::NotImplemented,
libc::EXDEV => return FsError::IsRemote,
_ => {},
}
} else {
// not an OS error - must be "not implemented"
// (e.g. metadata().created() on systems without st_crtime)
return FsError::NotImplemented;
}
// generic mappings for-whatever is left.
match e.kind() {
ErrorKind::NotFound => FsError::NotFound,
ErrorKind::PermissionDenied => FsError::Forbidden,
_ => FsError::GeneralFailure,
}
}
}
impl From<io::Error> for FsError {
fn from(e: io::Error) -> Self {
(&e).into()
}
}

282
src/localfs_macos.rs Normal file
View File

@@ -0,0 +1,282 @@
// Optimizations for macOS and the macOS finder.
//
// - after it reads a directory, macOS likes to do a PROPSTAT of all
// files in the directory with "._" prefixed. so after each PROPSTAT
// with Depth: 1 we keep a cache of "._" files we've seen, so that
// we can easily tell which ones did _not_ exist.
// - deny existence of ".localized" files
// - fake a ".metadata_never_index" in the root
// - fake a ".ql_disablethumbnails" file in the root.
//
use std::ffi::OsString;
use std::os::unix::ffi::OsStrExt;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use lru::LruCache;
use parking_lot::Mutex;
use crate::davpath::DavPath;
use crate::fs::*;
use crate::localfs::LocalFs;
const DU_CACHE_ENTRIES: usize = 4096;
const DU_CACHE_MAX_AGE: u64 = 60;
const DU_CACHE_SLEEP_MS: u64 = 10037;
lazy_static! {
static ref DU_CACHE: Arc<DUCache> = Arc::new(DUCache::new(DU_CACHE_ENTRIES));
}
static DIR_ID: AtomicUsize = AtomicUsize::new(1);
// Dot underscore cache entry.
struct Entry {
// Time the entry in the cache was created.
time: SystemTime,
// Modification time of the parent directory.
dir_modtime: SystemTime,
// Unique ID of the parent entry.
dir_id: usize,
}
// Dot underscore cache.
struct DUCache {
cache: Mutex<LruCache<PathBuf, Entry>>,
}
impl DUCache {
// return a new instance.
fn new(size: usize) -> DUCache {
thread::spawn(move || {
loop {
// House keeping. Every 10 seconds, remove entries older than
// DU_CACHE_MAX_AGE seconds from the LRU cache.
thread::sleep(Duration::from_millis(DU_CACHE_SLEEP_MS));
{
let mut cache = DU_CACHE.cache.lock();
let now = SystemTime::now();
while let Some((_k, e)) = cache.peek_lru() {
if let Ok(age) = now.duration_since(e.time) {
trace!(target: "webdav_cache", "DUCache: purge check {:?}", _k);
if age.as_secs() <= DU_CACHE_MAX_AGE {
break;
}
if let Some((_k, _)) = cache.pop_lru() {
trace!(target: "webdav_cache", "DUCache: purging {:?} (age {})", _k, age.as_secs());
} else {
break;
}
} else {
break;
}
}
}
}
});
DUCache {
cache: Mutex::new(LruCache::new(size)),
}
}
// Lookup a "._filename" entry in the cache. If we are sure the path
// does _not_ exist, return `true`.
//
// Note that it's assumed the file_name() DOES start with "._".
fn negative(&self, path: &PathBuf) -> bool {
// parent directory must be present in the cache.
let mut dir = match path.parent() {
Some(d) => d.to_path_buf(),
None => return false,
};
dir.push(".");
let (dir_id, dir_modtime) = {
let cache = self.cache.lock();
match cache.peek(&dir) {
Some(t) => (t.dir_id, t.dir_modtime),
None => {
trace!(target: "webdav_cache", "DUCache::negative({:?}): parent not in cache", path);
return false;
},
}
};
// Get the metadata of the parent to see if it changed.
// This is pretty cheap, since it's most likely in the kernel cache.
let valid = match std::fs::metadata(&dir) {
Ok(m) => m.modified().map(|m| m == dir_modtime).unwrap_or(false),
Err(_) => false,
};
let mut cache = self.cache.lock();
if !valid {
trace!(target: "webdav_cache", "DUCache::negative({:?}): parent in cache but stale", path);
cache.pop(&dir);
return false;
}
// Now if there is _no_ entry in the cache for this file,
// or it is not valid (different timestamp), it did not exist
// the last time we did a readdir().
match cache.peek(path) {
Some(t) => {
trace!(target: "webdav_cache", "DUCache::negative({:?}): in cache, valid: {}", path, t.dir_id != dir_id);
t.dir_id != dir_id
},
None => {
trace!(target: "webdav_cache", "DUCache::negative({:?}): not in cache", path);
true
},
}
}
}
// Storage for the entries of one dir while we're collecting them.
#[derive(Default)]
pub(crate) struct DUCacheBuilder {
dir: PathBuf,
entries: Vec<OsString>,
done: bool,
}
impl DUCacheBuilder {
// return a new instance.
pub fn start(dir: PathBuf) -> DUCacheBuilder {
DUCacheBuilder {
dir: dir,
entries: Vec::new(),
done: false,
}
}
// add a filename to the list we have
pub fn add(&mut self, filename: OsString) {
if let Some(f) = Path::new(&filename).file_name() {
if f.as_bytes().starts_with(b"._") {
self.entries.push(filename);
}
}
}
// Process the "._" files we collected.
//
// We add all the "._" files we saw in the directory, and the
// directory itself (with "/." added).
pub fn finish(&mut self) {
if self.done {
return;
}
self.done = true;
// Get parent directory modification time.
let meta = match std::fs::metadata(&self.dir) {
Ok(m) => m,
Err(_) => return,
};
let dir_modtime = match meta.modified() {
Ok(t) => t,
Err(_) => return,
};
let dir_id = DIR_ID.fetch_add(1, Ordering::SeqCst);
let now = SystemTime::now();
let mut cache = DU_CACHE.cache.lock();
// Add "/." to directory and store it.
let mut path = self.dir.clone();
path.push(".");
let entry = Entry {
time: now,
dir_modtime: dir_modtime,
dir_id: dir_id,
};
cache.put(path, entry);
// Now add the "._" files.
for filename in self.entries.drain(..) {
// create full path and add it to the cache.
let mut path = self.dir.clone();
path.push(filename);
let entry = Entry {
time: now,
dir_modtime: dir_modtime,
dir_id: dir_id,
};
cache.put(path, entry);
}
}
}
// Fake metadata for an empty file.
#[derive(Debug, Clone)]
struct EmptyMetaData;
impl DavMetaData for EmptyMetaData {
fn len(&self) -> u64 {
0
}
fn is_dir(&self) -> bool {
false
}
fn modified(&self) -> FsResult<SystemTime> {
// Tue May 30 04:00:00 CEST 2000
Ok(UNIX_EPOCH + Duration::new(959652000, 0))
}
fn created(&self) -> FsResult<SystemTime> {
self.modified()
}
}
impl LocalFs {
// Is this a virtualfile ?
#[inline]
pub(crate) fn is_virtual(&self, path: &DavPath) -> Option<Box<dyn DavMetaData>> {
if !self.inner.macos {
return None;
}
match path.as_bytes() {
b"/.metadata_never_index" => {},
b"/.ql_disablethumbnails" => {},
_ => return None,
}
Some(Box::new(EmptyMetaData {}))
}
// This file can never exist.
#[inline]
pub(crate) fn is_forbidden(&self, path: &DavPath) -> bool {
if !self.inner.macos {
return false;
}
match path.as_bytes() {
b"/.metadata_never_index" => return true,
b"/.ql_disablethumbnails" => return true,
_ => {},
}
path.file_name() == b".localized"
}
// File might not exists because of negative cache entry.
#[inline]
pub(crate) fn is_notfound(&self, path: &PathBuf) -> bool {
if !self.inner.macos {
return false;
}
match path.file_name().map(|p| p.as_bytes()) {
Some(b".localized") => true,
Some(name) if name.starts_with(b"._") => DU_CACHE.negative(path),
_ => false,
}
}
// Return a "directory cache builder".
#[inline]
pub(crate) fn dir_cache_builder(&self, path: PathBuf) -> Option<DUCacheBuilder> {
if self.inner.macos {
Some(DUCacheBuilder::start(path))
} else {
None
}
}
}

244
src/localfs_windows.rs Normal file
View File

@@ -0,0 +1,244 @@
// Optimizations for windows and the windows webdav mini-redirector.
//
// The main thing here is case-insensitive path lookups,
// and caching that.
//
use std::ffi::{OsStr, OsString};
use std::fs;
use std::io::ErrorKind;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use lru::LruCache;
use parking_lot::Mutex;
use crate::davpath::DavPath;
const CACHE_ENTRIES: usize = 4096;
const CACHE_MAX_AGE: u64 = 15 * 60;
const CACHE_SLEEP_MS: u64 = 30059;
lazy_static! {
static ref CACHE: Arc<Cache> = Arc::new(Cache::new(CACHE_ENTRIES));
}
// Do a case-insensitive path lookup.
pub(crate) fn resolve<'a>(base: impl Into<PathBuf>, path: &DavPath) -> PathBuf {
let base = base.into();
let path = path.as_rel_ospath();
// must be rooted, and valid UTF-8.
let mut fullpath = base.clone();
fullpath.push(&path);
if !fullpath.has_root() || fullpath.to_str().is_none() {
return fullpath;
}
// must have a parent.
let parent = match fullpath.parent() {
Some(p) => p,
None => return fullpath,
};
// deref in advance: first lazy_static, then Arc.
let cache = &*(&*CACHE);
// In the cache?
if let Some((path, _)) = cache.get(&fullpath) {
return path;
}
// if the file exists, fine.
if fullpath.metadata().is_ok() {
return fullpath;
}
// we need the path as a list of segments.
let segs = path.iter().collect::<Vec<_>>();
if segs.len() == 0 {
return fullpath;
}
// if the parent exists, do a lookup there straight away
// instead of starting from the root.
let (parent, parent_exists) = if segs.len() > 1 {
match cache.get(parent) {
Some((path, _)) => (path, true),
None => {
let exists = parent.exists();
if exists {
cache.insert(parent);
}
(parent.to_path_buf(), exists)
},
}
} else {
(parent.to_path_buf(), true)
};
if parent_exists {
let (newpath, stop) = lookup(parent, segs[segs.len() - 1], true);
if !stop {
cache.insert(&newpath);
}
return newpath;
}
// start from the root, then add segments one by one.
let mut stop = false;
let mut newpath = base;
let lastseg = segs.len() - 1;
for (idx, seg) in segs.into_iter().enumerate() {
if !stop {
if idx == lastseg {
// Save the path leading up to this file or dir.
cache.insert(&newpath);
}
let (n, s) = lookup(newpath, seg, false);
newpath = n;
stop = s;
} else {
newpath.push(seg);
}
}
if !stop {
// resolved succesfully. save in cache.
cache.insert(&newpath);
}
newpath
}
// lookup a filename in a directory in a case insensitive way.
fn lookup(mut path: PathBuf, seg: &OsStr, no_init_check: bool) -> (PathBuf, bool) {
// does it exist as-is?
let mut path2 = path.clone();
path2.push(seg);
if !no_init_check {
match path2.metadata() {
Ok(_) => return (path2, false),
Err(ref e) if e.kind() != ErrorKind::NotFound => {
// stop on errors other than "NotFound".
return (path2, true);
},
Err(_) => {},
}
}
// first, lowercase filename.
let filename = match seg.to_str() {
Some(s) => s.to_lowercase(),
None => return (path2, true),
};
// we have to read the entire directory.
let dir = match path.read_dir() {
Ok(dir) => dir,
Err(_) => return (path2, true),
};
for entry in dir.into_iter() {
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
let entry_name = entry.file_name();
let name = match entry_name.to_str() {
Some(n) => n,
None => continue,
};
if name.to_lowercase() == filename {
path.push(&name);
return (path, false);
}
}
(path2, true)
}
// The cache stores a mapping of lowercased path -> actual path.
pub struct Cache {
cache: Mutex<LruCache<PathBuf, Entry>>,
}
#[derive(Clone)]
struct Entry {
// Full case-sensitive pathname.
path: PathBuf,
// Unix timestamp.
time: u64,
}
// helper
fn pathbuf_to_lowercase(path: PathBuf) -> PathBuf {
let s = match OsString::from(path).into_string() {
Ok(s) => OsString::from(s.to_lowercase()),
Err(s) => s,
};
PathBuf::from(s)
}
impl Cache {
pub fn new(size: usize) -> Cache {
thread::spawn(move || {
// House keeping. Every 30 seconds, remove entries older than
// CACHE_MAX_AGE seconds from the LRU cache.
loop {
thread::sleep(Duration::from_millis(CACHE_SLEEP_MS));
if let Ok(d) = SystemTime::now().duration_since(UNIX_EPOCH) {
let now = d.as_secs();
let mut cache = CACHE.cache.lock();
while let Some((_k, e)) = cache.peek_lru() {
trace!(target: "webdav_cache", "Cache: purge check: {:?}", _k);
if e.time + CACHE_MAX_AGE > now {
break;
}
let _age = now - e.time;
if let Some((_k, _)) = cache.pop_lru() {
trace!(target: "webdav_cache", "Cache: purging {:?} (age {})", _k, _age);
} else {
break;
}
}
drop(cache);
}
}
});
Cache {
cache: Mutex::new(LruCache::new(size)),
}
}
// Insert an entry into the cache.
pub fn insert(&self, path: &Path) {
let lc_path = pathbuf_to_lowercase(PathBuf::from(path));
if let Ok(d) = SystemTime::now().duration_since(UNIX_EPOCH) {
let e = Entry {
path: PathBuf::from(path),
time: d.as_secs(),
};
let mut cache = self.cache.lock();
cache.put(lc_path, e);
}
}
// Get an entry from the cache, and validate it. If it's valid
// return the actual pathname and metadata. If it's invalid remove
// it from the cache and return None.
pub fn get(&self, path: &Path) -> Option<(PathBuf, fs::Metadata)> {
// First lowercase the entire path.
let lc_path = pathbuf_to_lowercase(PathBuf::from(path));
// Lookup.
let e = {
let mut cache = self.cache.lock();
cache.get(&lc_path)?.clone()
};
// Found, validate.
match fs::metadata(&e.path) {
Err(_) => {
let mut cache = self.cache.lock();
cache.pop(&lc_path);
None
},
Ok(m) => Some((e.path, m)),
}
}
}

93
src/ls.rs Normal file
View File

@@ -0,0 +1,93 @@
//! Contains the structs and traits that define a `locksystem` backend.
//!
//! Note that the methods DO NOT return futures, they are sychronous.
//! This is because currently only two locksystems exist, `MemLs` and `FakeLs`.
//! Both of them do not do any I/O, all methods return instantly.
//!
//! If ever a locksystem gets built that does I/O (to a filesystem,
//! a database, or over the network) we'll need to revisit this.
//!
use crate::davpath::DavPath;
use std::fmt::Debug;
use std::time::{Duration, SystemTime};
use xmltree::Element;
/// Type of the locks returned by DavLockSystem methods.
#[derive(Debug, Clone)]
pub struct DavLock {
/// Token.
pub token: String,
/// Path/
pub path: DavPath,
/// Principal.
pub principal: Option<String>,
/// Owner.
pub owner: Option<Element>,
/// When the lock turns stale (absolute).
pub timeout_at: Option<SystemTime>,
/// When the lock turns stale (relative).
pub timeout: Option<Duration>,
/// Shared.
pub shared: bool,
/// Deep.
pub deep: bool,
}
/// The trait that defines a locksystem.
pub trait DavLockSystem: Debug + Sync + Send + BoxCloneLs {
/// Lock a node. Returns `Ok(new_lock)` if succeeded,
/// or `Err(conflicting_lock)` if failed.
fn lock(
&self,
path: &DavPath,
principal: Option<&str>,
owner: Option<&Element>,
timeout: Option<Duration>,
shared: bool,
deep: bool,
) -> Result<DavLock, DavLock>;
/// Unlock a node. Returns `Ok(())` if succeeded, `Err (())` if failed
/// (because lock doesn't exist)
fn unlock(&self, path: &DavPath, token: &str) -> Result<(), ()>;
/// Refresh lock. Returns updated lock if succeeded.
fn refresh(&self, path: &DavPath, token: &str, timeout: Option<Duration>) -> Result<DavLock, ()>;
/// Check if node is locked and if so, if we own all the locks.
/// If not, returns as Err one conflicting lock.
fn check(
&self,
path: &DavPath,
principal: Option<&str>,
ignore_principal: bool,
deep: bool,
submitted_tokens: Vec<&str>,
) -> Result<(), DavLock>;
/// Find and return all locks that cover a given path.
fn discover(&self, path: &DavPath) -> Vec<DavLock>;
/// Delete all locks at this path and below (after MOVE or DELETE)
fn delete(&self, path: &DavPath) -> Result<(), ()>;
}
#[doc(hidden)]
pub trait BoxCloneLs {
fn box_clone(&self) -> Box<dyn DavLockSystem>;
}
// generic Clone, calls implementation-specific box_clone().
impl Clone for Box<dyn DavLockSystem> {
fn clone(&self) -> Box<dyn DavLockSystem> {
self.box_clone()
}
}
// implementation-specific clone.
#[doc(hidden)]
impl<LS: Clone + DavLockSystem + 'static> BoxCloneLs for LS {
fn box_clone(&self) -> Box<dyn DavLockSystem> {
Box::new((*self).clone())
}
}

587
src/memfs.rs Normal file
View File

@@ -0,0 +1,587 @@
//! Simple in-memory filesystem.
//!
//! This implementation has state, so if you create a
//! new instance in a handler(), it will be empty every time.
//!
//! This means you have to create the instance once, using `MemFs::new`, store
//! it in your handler struct, and clone() it every time you pass
//! it to the DavHandler. As a MemFs struct is just a handle, cloning is cheap.
use std::collections::HashMap;
use std::io::{Error, ErrorKind, SeekFrom};
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use bytes::{Buf, Bytes};
use futures::{
future,
future::{BoxFuture, FutureExt},
};
use http::StatusCode;
use crate::davpath::DavPath;
use crate::fs::*;
use crate::tree;
type Tree = tree::Tree<Vec<u8>, MemFsNode>;
/// Ephemeral in-memory filesystem.
#[derive(Debug)]
pub struct MemFs {
tree: Arc<Mutex<Tree>>,
}
#[derive(Debug, Clone)]
enum MemFsNode {
Dir(MemFsDirNode),
File(MemFsFileNode),
}
#[derive(Debug, Clone)]
struct MemFsDirNode {
props: HashMap<String, DavProp>,
mtime: SystemTime,
crtime: SystemTime,
}
#[derive(Debug, Clone)]
struct MemFsFileNode {
props: HashMap<String, DavProp>,
mtime: SystemTime,
crtime: SystemTime,
data: Vec<u8>,
}
#[derive(Debug, Clone)]
struct MemFsDirEntry {
mtime: SystemTime,
crtime: SystemTime,
is_dir: bool,
name: Vec<u8>,
size: u64,
}
#[derive(Debug)]
struct MemFsFile {
tree: Arc<Mutex<Tree>>,
node_id: u64,
pos: usize,
append: bool,
}
impl MemFs {
/// Create a new "memfs" filesystem.
pub fn new() -> Box<MemFs> {
let root = MemFsNode::new_dir();
Box::new(MemFs {
tree: Arc::new(Mutex::new(Tree::new(root))),
})
}
fn do_open(&self, tree: &mut Tree, path: &[u8], options: OpenOptions) -> FsResult<Box<dyn DavFile>> {
let node_id = match tree.lookup(path) {
Ok(n) => {
if options.create_new {
return Err(FsError::Exists);
}
n
},
Err(FsError::NotFound) => {
if !options.create {
return Err(FsError::NotFound);
}
let parent_id = tree.lookup_parent(path)?;
tree.add_child(parent_id, file_name(path), MemFsNode::new_file(), true)?
},
Err(e) => return Err(e),
};
let node = tree.get_node_mut(node_id).unwrap();
if node.is_dir() {
return Err(FsError::Forbidden);
}
if options.truncate {
node.as_file_mut()?.data.truncate(0);
node.update_mtime(SystemTime::now());
}
Ok(Box::new(MemFsFile {
tree: self.tree.clone(),
node_id: node_id,
pos: 0,
append: options.append,
}))
}
}
impl Clone for MemFs {
fn clone(&self) -> Self {
MemFs {
tree: Arc::clone(&self.tree),
}
}
}
impl DavFileSystem for MemFs {
fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
async move {
let tree = &*self.tree.lock().unwrap();
let node_id = tree.lookup(path.as_bytes())?;
let meta = tree.get_node(node_id)?.as_dirent(path.as_bytes());
Ok(Box::new(meta) as Box<dyn DavMetaData>)
}
.boxed()
}
fn read_dir<'a>(
&'a self,
path: &'a DavPath,
_meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>>
{
async move {
let tree = &*self.tree.lock().unwrap();
let node_id = tree.lookup(path.as_bytes())?;
if !tree.get_node(node_id)?.is_dir() {
return Err(FsError::Forbidden);
}
let mut v: Vec<Box<dyn DavDirEntry>> = Vec::new();
for (name, dnode_id) in tree.get_children(node_id)? {
if let Ok(node) = tree.get_node(dnode_id) {
v.push(Box::new(node.as_dirent(&name)));
}
}
let strm = futures::stream::iter(v.into_iter());
Ok(Box::pin(strm) as FsStream<Box<dyn DavDirEntry>>)
}
.boxed()
}
fn open<'a>(&'a self, path: &'a DavPath, options: OpenOptions) -> FsFuture<Box<dyn DavFile>> {
async move {
let tree = &mut *self.tree.lock().unwrap();
self.do_open(tree, path.as_bytes(), options)
}
.boxed()
}
fn create_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
trace!("FS: create_dir {:?}", path);
let tree = &mut *self.tree.lock().unwrap();
let path = path.as_bytes();
let parent_id = tree.lookup_parent(path)?;
tree.add_child(parent_id, file_name(path), MemFsNode::new_dir(), false)?;
tree.get_node_mut(parent_id)?.update_mtime(SystemTime::now());
Ok(())
}
.boxed()
}
fn remove_file<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let parent_id = tree.lookup_parent(path.as_bytes())?;
let node_id = tree.lookup(path.as_bytes())?;
tree.delete_node(node_id)?;
tree.get_node_mut(parent_id)?.update_mtime(SystemTime::now());
Ok(())
}
.boxed()
}
fn remove_dir<'a>(&'a self, path: &'a DavPath) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let parent_id = tree.lookup_parent(path.as_bytes())?;
let node_id = tree.lookup(path.as_bytes())?;
tree.delete_node(node_id)?;
tree.get_node_mut(parent_id)?.update_mtime(SystemTime::now());
Ok(())
}
.boxed()
}
fn rename<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let node_id = tree.lookup(from.as_bytes())?;
let parent_id = tree.lookup_parent(from.as_bytes())?;
let dst_id = tree.lookup_parent(to.as_bytes())?;
tree.move_node(node_id, dst_id, file_name(to.as_bytes()), true)?;
tree.get_node_mut(parent_id)?.update_mtime(SystemTime::now());
tree.get_node_mut(dst_id)?.update_mtime(SystemTime::now());
Ok(())
}
.boxed()
}
fn copy<'a>(&'a self, from: &'a DavPath, to: &'a DavPath) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
// source must exist.
let snode_id = tree.lookup(from.as_bytes())?;
// make sure destination exists, create if needed.
{
let mut oo = OpenOptions::write();
oo.create = true;
self.do_open(tree, to.as_bytes(), oo)?;
}
let dnode_id = tree.lookup(to.as_bytes())?;
// copy.
let mut data = (*tree.get_node_mut(snode_id)?).clone();
match data {
MemFsNode::Dir(ref mut d) => d.crtime = SystemTime::now(),
MemFsNode::File(ref mut f) => f.crtime = SystemTime::now(),
}
*tree.get_node_mut(dnode_id)? = data;
Ok(())
}
.boxed()
}
fn have_props<'a>(&'a self, _path: &'a DavPath) -> BoxFuture<'a, bool> {
future::ready(true).boxed()
}
fn patch_props<'a>(
&'a self,
path: &'a DavPath,
mut patch: Vec<(bool, DavProp)>,
) -> FsFuture<Vec<(StatusCode, DavProp)>>
{
async move {
let tree = &mut *self.tree.lock().unwrap();
let node_id = tree.lookup(path.as_bytes())?;
let node = tree.get_node_mut(node_id)?;
let props = node.get_props_mut();
let mut res = Vec::new();
let patch = patch.drain(..).collect::<Vec<_>>();
for (set, p) in patch.into_iter() {
let prop = cloneprop(&p);
let status = if set {
props.insert(propkey(&p.namespace, &p.name), p);
StatusCode::OK
} else {
props.remove(&propkey(&p.namespace, &p.name));
// the below map was added to signify if the remove succeeded or
// failed. however it seems that removing non-existant properties
// always succeed, so just return success.
// .map(|_| StatusCode::OK).unwrap_or(StatusCode::NOT_FOUND)
StatusCode::OK
};
res.push((status, prop));
}
Ok(res)
}
.boxed()
}
fn get_props<'a>(&'a self, path: &'a DavPath, do_content: bool) -> FsFuture<Vec<DavProp>> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let node_id = tree.lookup(path.as_bytes())?;
let node = tree.get_node(node_id)?;
let mut res = Vec::new();
for (_, p) in node.get_props() {
res.push(if do_content { p.clone() } else { cloneprop(p) });
}
Ok(res)
}
.boxed()
}
fn get_prop<'a>(&'a self, path: &'a DavPath, prop: DavProp) -> FsFuture<Vec<u8>> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let node_id = tree.lookup(path.as_bytes())?;
let node = tree.get_node(node_id)?;
let p = node
.get_props()
.get(&propkey(&prop.namespace, &prop.name))
.ok_or(FsError::NotFound)?;
Ok(p.xml.clone().ok_or(FsError::NotFound)?)
}
.boxed()
}
}
// small helper.
fn propkey(ns: &Option<String>, name: &str) -> String {
ns.to_owned().as_ref().unwrap_or(&"".to_string()).clone() + name
}
// small helper.
fn cloneprop(p: &DavProp) -> DavProp {
DavProp {
name: p.name.clone(),
namespace: p.namespace.clone(),
prefix: p.prefix.clone(),
xml: None,
}
}
impl DavDirEntry for MemFsDirEntry {
fn metadata<'a>(&'a self) -> FsFuture<Box<dyn DavMetaData>> {
let meta = (*self).clone();
Box::pin(future::ok(Box::new(meta) as Box<dyn DavMetaData>))
}
fn name(&self) -> Vec<u8> {
self.name.clone()
}
}
impl DavFile for MemFsFile {
fn metadata<'a>(&'a mut self) -> FsFuture<Box<dyn DavMetaData>> {
async move {
let tree = &*self.tree.lock().unwrap();
let node = tree.get_node(self.node_id)?;
let meta = node.as_dirent(b"");
Ok(Box::new(meta) as Box<dyn DavMetaData>)
}
.boxed()
}
fn read_bytes<'a>(&'a mut self, count: usize) -> FsFuture<Bytes> {
async move {
let tree = &*self.tree.lock().unwrap();
let node = tree.get_node(self.node_id)?;
let file = node.as_file()?;
let curlen = file.data.len();
let mut start = self.pos;
let mut end = self.pos + count;
if start > curlen {
start = curlen
}
if end > curlen {
end = curlen
}
let cnt = end - start;
self.pos += cnt;
Ok(Bytes::copy_from_slice(&file.data[start..end]))
}
.boxed()
}
fn write_bytes<'a>(&'a mut self, buf: Bytes) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let node = tree.get_node_mut(self.node_id)?;
let file = node.as_file_mut()?;
if self.append {
self.pos = file.data.len();
}
let end = self.pos + buf.len();
if end > file.data.len() {
file.data.resize(end, 0);
}
file.data[self.pos..end].copy_from_slice(&buf);
self.pos = end;
Ok(())
}
.boxed()
}
fn write_buf<'a>(&'a mut self, mut buf: Box<dyn Buf + Send>) -> FsFuture<()> {
async move {
let tree = &mut *self.tree.lock().unwrap();
let node = tree.get_node_mut(self.node_id)?;
let file = node.as_file_mut()?;
if self.append {
self.pos = file.data.len();
}
let end = self.pos + buf.remaining();
if end > file.data.len() {
file.data.resize(end, 0);
}
while buf.has_remaining() {
let b = buf.chunk();
let len = b.len();
file.data[self.pos..self.pos + len].copy_from_slice(b);
buf.advance(len);
self.pos += len;
}
Ok(())
}
.boxed()
}
fn flush<'a>(&'a mut self) -> FsFuture<()> {
future::ok(()).boxed()
}
fn seek<'a>(&'a mut self, pos: SeekFrom) -> FsFuture<u64> {
async move {
let (start, offset): (u64, i64) = match pos {
SeekFrom::Start(npos) => {
self.pos = npos as usize;
return Ok(npos);
},
SeekFrom::Current(npos) => (self.pos as u64, npos),
SeekFrom::End(npos) => {
let tree = &*self.tree.lock().unwrap();
let node = tree.get_node(self.node_id)?;
let curlen = node.as_file()?.data.len() as u64;
(curlen, npos)
},
};
if offset < 0 {
if -offset as u64 > start {
return Err(Error::new(ErrorKind::InvalidInput, "invalid seek").into());
}
self.pos = (start - (-offset as u64)) as usize;
} else {
self.pos = (start + offset as u64) as usize;
}
Ok(self.pos as u64)
}
.boxed()
}
}
impl DavMetaData for MemFsDirEntry {
fn len(&self) -> u64 {
self.size
}
fn created(&self) -> FsResult<SystemTime> {
Ok(self.crtime)
}
fn modified(&self) -> FsResult<SystemTime> {
Ok(self.mtime)
}
fn is_dir(&self) -> bool {
self.is_dir
}
}
impl MemFsNode {
fn new_dir() -> MemFsNode {
MemFsNode::Dir(MemFsDirNode {
crtime: SystemTime::now(),
mtime: SystemTime::now(),
props: HashMap::new(),
})
}
fn new_file() -> MemFsNode {
MemFsNode::File(MemFsFileNode {
crtime: SystemTime::now(),
mtime: SystemTime::now(),
props: HashMap::new(),
data: Vec::new(),
})
}
// helper to create MemFsDirEntry from a node.
fn as_dirent(&self, name: &[u8]) -> MemFsDirEntry {
let (is_dir, size, mtime, crtime) = match self {
&MemFsNode::File(ref file) => (false, file.data.len() as u64, file.mtime, file.crtime),
&MemFsNode::Dir(ref dir) => (true, 0, dir.mtime, dir.crtime),
};
MemFsDirEntry {
name: name.to_vec(),
mtime: mtime,
crtime: crtime,
is_dir: is_dir,
size: size as u64,
}
}
fn update_mtime(&mut self, tm: std::time::SystemTime) {
match self {
&mut MemFsNode::Dir(ref mut d) => d.mtime = tm,
&mut MemFsNode::File(ref mut f) => f.mtime = tm,
}
}
fn is_dir(&self) -> bool {
match self {
&MemFsNode::Dir(_) => true,
&MemFsNode::File(_) => false,
}
}
fn as_file(&self) -> FsResult<&MemFsFileNode> {
match self {
&MemFsNode::File(ref n) => Ok(n),
_ => Err(FsError::Forbidden),
}
}
fn as_file_mut(&mut self) -> FsResult<&mut MemFsFileNode> {
match self {
&mut MemFsNode::File(ref mut n) => Ok(n),
_ => Err(FsError::Forbidden),
}
}
fn get_props(&self) -> &HashMap<String, DavProp> {
match self {
&MemFsNode::File(ref n) => &n.props,
&MemFsNode::Dir(ref d) => &d.props,
}
}
fn get_props_mut(&mut self) -> &mut HashMap<String, DavProp> {
match self {
&mut MemFsNode::File(ref mut n) => &mut n.props,
&mut MemFsNode::Dir(ref mut d) => &mut d.props,
}
}
}
trait TreeExt {
fn lookup_segs(&self, segs: Vec<&[u8]>) -> FsResult<u64>;
fn lookup(&self, path: &[u8]) -> FsResult<u64>;
fn lookup_parent(&self, path: &[u8]) -> FsResult<u64>;
}
impl TreeExt for Tree {
fn lookup_segs(&self, segs: Vec<&[u8]>) -> FsResult<u64> {
let mut node_id = tree::ROOT_ID;
let mut is_dir = true;
for seg in segs.into_iter() {
if !is_dir {
return Err(FsError::Forbidden);
}
if self.get_node(node_id)?.is_dir() {
node_id = self.get_child(node_id, seg)?;
} else {
is_dir = false;
}
}
Ok(node_id)
}
fn lookup(&self, path: &[u8]) -> FsResult<u64> {
self.lookup_segs(path.split(|&c| c == b'/').filter(|s| s.len() > 0).collect())
}
// pop the last segment off the path, do a lookup, then
// check if the result is a directory.
fn lookup_parent(&self, path: &[u8]) -> FsResult<u64> {
let mut segs: Vec<&[u8]> = path.split(|&c| c == b'/').filter(|s| s.len() > 0).collect();
segs.pop();
let node_id = self.lookup_segs(segs)?;
if !self.get_node(node_id)?.is_dir() {
return Err(FsError::Forbidden);
}
Ok(node_id)
}
}
// helper
fn file_name(path: &[u8]) -> Vec<u8> {
path.split(|&c| c == b'/')
.filter(|s| s.len() > 0)
.last()
.unwrap_or(b"")
.to_vec()
}

393
src/memls.rs Normal file
View File

@@ -0,0 +1,393 @@
//! Simple in-memory locksystem.
//!
//! This implementation has state - if you create a
//! new instance in a handler(), it will be empty every time.
//!
//! This means you have to create the instance once, using `MemLs::new`, store
//! it in your handler struct, and clone() it every time you pass
//! it to the DavHandler. As a MemLs struct is just a handle, cloning is cheap.
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::time::{Duration, SystemTime};
use uuid::Uuid;
use xmltree::Element;
use crate::davpath::DavPath;
use crate::fs::FsResult;
use crate::ls::*;
use crate::tree;
type Tree = tree::Tree<Vec<u8>, Vec<DavLock>>;
/// Ephemeral in-memory LockSystem.
#[derive(Debug, Clone)]
pub struct MemLs(Arc<Mutex<MemLsInner>>);
#[derive(Debug)]
struct MemLsInner {
tree: Tree,
locks: HashMap<Vec<u8>, u64>,
}
impl MemLs {
/// Create a new "memls" locksystem.
pub fn new() -> Box<MemLs> {
let inner = MemLsInner {
tree: Tree::new(Vec::new()),
locks: HashMap::new(),
};
Box::new(MemLs(Arc::new(Mutex::new(inner))))
}
}
impl DavLockSystem for MemLs {
fn lock(
&self,
path: &DavPath,
principal: Option<&str>,
owner: Option<&Element>,
timeout: Option<Duration>,
shared: bool,
deep: bool,
) -> Result<DavLock, DavLock>
{
let inner = &mut *self.0.lock().unwrap();
// any locks in the path?
let rc = check_locks_to_path(&inner.tree, path, None, true, &Vec::new(), shared);
trace!("lock: check_locks_to_path: {:?}", rc);
rc?;
// if it's a deep lock we need to check if there are locks furter along the path.
if deep {
let rc = check_locks_from_path(&inner.tree, path, None, true, &Vec::new(), shared);
trace!("lock: check_locks_from_path: {:?}", rc);
rc?;
}
// create lock.
let node = get_or_create_path_node(&mut inner.tree, path);
let timeout_at = match timeout {
None => None,
Some(d) => Some(SystemTime::now() + d),
};
let lock = DavLock {
token: Uuid::new_v4().to_urn().to_string(),
path: path.clone(),
principal: principal.map(|s| s.to_string()),
owner: owner.cloned(),
timeout_at: timeout_at,
timeout: timeout,
shared: shared,
deep: deep,
};
trace!("lock {} created", &lock.token);
let slock = lock.clone();
node.push(slock);
Ok(lock)
}
fn unlock(&self, path: &DavPath, token: &str) -> Result<(), ()> {
let inner = &mut *self.0.lock().unwrap();
let node_id = match lookup_lock(&inner.tree, path, token) {
None => {
trace!("unlock: {} not found at {}", token, path);
return Err(());
},
Some(n) => n,
};
let len = {
let node = inner.tree.get_node_mut(node_id).unwrap();
let idx = node.iter().position(|n| n.token.as_str() == token).unwrap();
node.remove(idx);
node.len()
};
if len == 0 {
inner.tree.delete_node(node_id).ok();
}
Ok(())
}
fn refresh(&self, path: &DavPath, token: &str, timeout: Option<Duration>) -> Result<DavLock, ()> {
trace!("refresh lock {}", token);
let inner = &mut *self.0.lock().unwrap();
let node_id = match lookup_lock(&inner.tree, path, token) {
None => {
trace!("lock not found");
return Err(());
},
Some(n) => n,
};
let node = (&mut inner.tree).get_node_mut(node_id).unwrap();
let idx = node.iter().position(|n| n.token.as_str() == token).unwrap();
let lock = &mut node[idx];
let timeout_at = match timeout {
None => None,
Some(d) => Some(SystemTime::now() + d),
};
lock.timeout = timeout;
lock.timeout_at = timeout_at;
Ok(lock.clone())
}
fn check(
&self,
path: &DavPath,
principal: Option<&str>,
ignore_principal: bool,
deep: bool,
submitted_tokens: Vec<&str>,
) -> Result<(), DavLock>
{
let inner = &*self.0.lock().unwrap();
let _st = submitted_tokens.clone();
let rc = check_locks_to_path(
&inner.tree,
path,
principal,
ignore_principal,
&submitted_tokens,
false,
);
trace!("check: check_lock_to_path: {:?}: {:?}", _st, rc);
rc?;
// if it's a deep lock we need to check if there are locks furter along the path.
if deep {
let rc = check_locks_from_path(
&inner.tree,
path,
principal,
ignore_principal,
&submitted_tokens,
false,
);
trace!("check: check_locks_from_path: {:?}", rc);
rc?;
}
Ok(())
}
fn discover(&self, path: &DavPath) -> Vec<DavLock> {
let inner = &*self.0.lock().unwrap();
list_locks(&inner.tree, path)
}
fn delete(&self, path: &DavPath) -> Result<(), ()> {
let inner = &mut *self.0.lock().unwrap();
if let Some(node_id) = lookup_node(&inner.tree, path) {
(&mut inner.tree).delete_subtree(node_id).ok();
}
Ok(())
}
}
// check if there are any locks along the path.
fn check_locks_to_path(
tree: &Tree,
path: &DavPath,
principal: Option<&str>,
ignore_principal: bool,
submitted_tokens: &Vec<&str>,
shared_ok: bool,
) -> Result<(), DavLock>
{
// path segments
let segs = path_to_segs(path, true);
let last_seg = segs.len() - 1;
// state
let mut holds_lock = false;
let mut first_lock_seen: Option<&DavLock> = None;
// walk over path segments starting at root.
let mut node_id = tree::ROOT_ID;
for (i, seg) in segs.into_iter().enumerate() {
node_id = match get_child(tree, node_id, seg) {
Ok(n) => n,
Err(_) => break,
};
let node_locks = match tree.get_node(node_id) {
Ok(n) => n,
Err(_) => break,
};
for nl in node_locks {
if i < last_seg && !nl.deep {
continue;
}
if submitted_tokens.iter().any(|t| &nl.token == t) &&
(ignore_principal || principal == nl.principal.as_ref().map(|p| p.as_str()))
{
// fine, we hold this lock.
holds_lock = true;
} else {
// exclusive locks are fatal.
if !nl.shared {
return Err(nl.to_owned());
}
// remember first shared lock seen.
if !shared_ok {
first_lock_seen.get_or_insert(nl);
}
}
}
}
// return conflicting lock on error.
if !holds_lock && first_lock_seen.is_some() {
return Err(first_lock_seen.unwrap().to_owned());
}
Ok(())
}
// See if there are locks in any path below this collection.
fn check_locks_from_path(
tree: &Tree,
path: &DavPath,
principal: Option<&str>,
ignore_principal: bool,
submitted_tokens: &Vec<&str>,
shared_ok: bool,
) -> Result<(), DavLock>
{
let node_id = match lookup_node(tree, path) {
Some(id) => id,
None => return Ok(()),
};
check_locks_from_node(
tree,
node_id,
principal,
ignore_principal,
submitted_tokens,
shared_ok,
)
}
// See if there are locks in any nodes below this node.
fn check_locks_from_node(
tree: &Tree,
node_id: u64,
principal: Option<&str>,
ignore_principal: bool,
submitted_tokens: &Vec<&str>,
shared_ok: bool,
) -> Result<(), DavLock>
{
let node_locks = match tree.get_node(node_id) {
Ok(n) => n,
Err(_) => return Ok(()),
};
for nl in node_locks {
if !nl.shared || !shared_ok {
if !submitted_tokens.iter().any(|t| t == &nl.token) ||
(!ignore_principal && principal != nl.principal.as_ref().map(|p| p.as_str()))
{
return Err(nl.to_owned());
}
}
}
if let Ok(children) = tree.get_children(node_id) {
for (_, node_id) in children {
if let Err(l) = check_locks_from_node(
tree,
node_id,
principal,
ignore_principal,
submitted_tokens,
shared_ok,
) {
return Err(l);
}
}
}
Ok(())
}
// Find or create node.
fn get_or_create_path_node<'a>(tree: &'a mut Tree, path: &DavPath) -> &'a mut Vec<DavLock> {
let mut node_id = tree::ROOT_ID;
for seg in path_to_segs(path, false) {
node_id = match tree.get_child(node_id, seg) {
Ok(n) => n,
Err(_) => tree.add_child(node_id, seg.to_vec(), Vec::new(), false).unwrap(),
};
}
tree.get_node_mut(node_id).unwrap()
}
// Find lock in path.
fn lookup_lock(tree: &Tree, path: &DavPath, token: &str) -> Option<u64> {
trace!("lookup_lock: {}", token);
let mut node_id = tree::ROOT_ID;
for seg in path_to_segs(path, true) {
trace!(
"lookup_lock: node {} seg {}",
node_id,
String::from_utf8_lossy(seg)
);
node_id = match get_child(tree, node_id, seg) {
Ok(n) => n,
Err(_) => break,
};
let node = tree.get_node(node_id).unwrap();
trace!("lookup_lock: locks here: {:?}", &node);
if node.iter().any(|n| n.token == token) {
return Some(node_id);
}
}
trace!("lookup_lock: fail");
None
}
// Find node ID for path.
fn lookup_node(tree: &Tree, path: &DavPath) -> Option<u64> {
let mut node_id = tree::ROOT_ID;
for seg in path_to_segs(path, false) {
node_id = match tree.get_child(node_id, seg) {
Ok(n) => n,
Err(_) => return None,
};
}
Some(node_id)
}
// Find all locks in a path
fn list_locks(tree: &Tree, path: &DavPath) -> Vec<DavLock> {
let mut locks = Vec::new();
let mut node_id = tree::ROOT_ID;
if let Ok(node) = tree.get_node(node_id) {
locks.extend_from_slice(node);
}
for seg in path_to_segs(path, false) {
node_id = match tree.get_child(node_id, seg) {
Ok(n) => n,
Err(_) => break,
};
if let Ok(node) = tree.get_node(node_id) {
locks.extend_from_slice(node);
}
}
locks
}
fn path_to_segs(path: &DavPath, include_root: bool) -> Vec<&[u8]> {
let path = path.as_bytes();
let mut segs: Vec<&[u8]> = path.split(|&c| c == b'/').filter(|s| s.len() > 0).collect();
if include_root {
segs.insert(0, b"");
}
segs
}
fn get_child(tree: &Tree, node_id: u64, seg: &[u8]) -> FsResult<u64> {
if seg.len() == 0 {
return Ok(node_id);
}
tree.get_child(node_id, seg)
}

148
src/multierror.rs Normal file
View File

@@ -0,0 +1,148 @@
use std::io;
use futures::{Stream, StreamExt};
use http::{Response, StatusCode};
use xml;
use xml::common::XmlVersion;
use xml::writer::EventWriter;
use xml::writer::XmlEvent as XmlWEvent;
use xml::EmitterConfig;
use crate::async_stream::AsyncStream;
use crate::body::Body;
use crate::davpath::DavPath;
use crate::util::MemBuffer;
use crate::DavError;
type Sender = crate::async_stream::Sender<(DavPath, StatusCode), DavError>;
pub(crate) struct MultiError(Sender);
impl MultiError {
pub fn new(sender: Sender) -> MultiError {
MultiError(sender)
}
pub async fn add_status<'a>(
&'a mut self,
path: &'a DavPath,
status: impl Into<DavError> + 'static,
) -> Result<(), futures::channel::mpsc::SendError>
{
let status = status.into().statuscode();
self.0.send((path.clone(), status)).await;
Ok(())
}
}
type XmlWriter<'a> = EventWriter<MemBuffer>;
fn write_elem<'b, S>(xw: &mut XmlWriter, name: S, text: &str) -> Result<(), DavError>
where S: Into<xml::name::Name<'b>> {
let n = name.into();
xw.write(XmlWEvent::start_element(n))?;
if text.len() > 0 {
xw.write(XmlWEvent::characters(text))?;
}
xw.write(XmlWEvent::end_element())?;
Ok(())
}
fn write_response(mut w: &mut XmlWriter, path: &DavPath, sc: StatusCode) -> Result<(), DavError> {
w.write(XmlWEvent::start_element("D:response"))?;
let p = path.with_prefix().as_url_string();
write_elem(&mut w, "D:href", &p)?;
write_elem(&mut w, "D:status", &format!("HTTP/1.1 {}", sc))?;
w.write(XmlWEvent::end_element())?;
Ok(())
}
pub(crate) async fn multi_error<S>(req_path: DavPath, status_stream: S) -> Result<Response<Body>, DavError>
where S: Stream<Item = Result<(DavPath, StatusCode), DavError>> + Send + 'static {
// read the first path/status item
let mut status_stream = Box::pin(status_stream);
let (path, status) = match status_stream.next().await {
None => {
debug!("multi_error: empty status_stream");
return Err(DavError::ChanError);
},
Some(Err(e)) => return Err(e),
Some(Ok(item)) => item,
};
let mut items = Vec::new();
if path == req_path {
// the first path/status item was for the request path.
// see if there is a next item.
match status_stream.next().await {
None => {
// No, this was the first and only item.
let resp = Response::builder().status(status).body(Body::empty()).unwrap();
return Ok(resp);
},
Some(Err(e)) => return Err(e),
Some(Ok(item)) => {
// Yes, more than one response.
items.push(Ok((path, status)));
items.push(Ok(item));
},
}
} else {
items.push(Ok((path, status)));
}
// Transform path/status items to XML.
let body = AsyncStream::new(|mut tx| {
async move {
// Write initial header.
let mut xw = EventWriter::new_with_config(
MemBuffer::new(),
EmitterConfig {
perform_indent: true,
..EmitterConfig::default()
},
);
xw.write(XmlWEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("utf-8"),
standalone: None,
})
.map_err(DavError::from)?;
xw.write(XmlWEvent::start_element("D:multistatus").ns("D", "DAV:"))
.map_err(DavError::from)?;
let data = xw.inner_mut().take();
tx.send(data).await;
// now write the items.
let mut status_stream = futures::stream::iter(items).chain(status_stream);
while let Some(res) = status_stream.next().await {
let (path, status) = res?;
let status = if status == StatusCode::NO_CONTENT {
StatusCode::OK
} else {
status
};
write_response(&mut xw, &path, status)?;
let data = xw.inner_mut().take();
tx.send(data).await;
}
// and finally write the trailer.
xw.write(XmlWEvent::end_element()).map_err(DavError::from)?;
let data = xw.inner_mut().take();
tx.send(data).await;
Ok::<_, io::Error>(())
}
});
// return response.
let resp = Response::builder()
.header("content-type", "application/xml; charset=utf-8")
.status(StatusCode::MULTI_STATUS)
.body(Body::from(body))
.unwrap();
Ok(resp)
}

199
src/tree.rs Normal file
View File

@@ -0,0 +1,199 @@
use std::borrow::Borrow;
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use crate::FsError;
use crate::FsResult;
#[derive(Debug)]
/// A tree contains a bunch of nodes.
pub struct Tree<K: Eq + Hash, D> {
nodes: HashMap<u64, Node<K, D>>,
node_id: u64,
}
/// id of the root node of the tree.
pub const ROOT_ID: u64 = 1;
#[derive(Debug)]
/// Node itself. "data" contains user-modifiable data.
pub struct Node<K: Eq + Hash, D> {
pub data: D,
id: u64,
parent_id: u64,
children: HashMap<K, u64>,
}
#[derive(Debug)]
// Iterator over the children of a node.
pub struct Children<K>(std::vec::IntoIter<(K, u64)>);
impl<K: Eq + Hash + Debug + Clone, D: Debug> Tree<K, D> {
/// Get new tree and initialize the root with 'data'.
pub fn new(data: D) -> Tree<K, D> {
let mut t = Tree {
nodes: HashMap::new(),
node_id: ROOT_ID,
};
t.new_node(99999999, data);
t
}
fn new_node(&mut self, parent: u64, data: D) -> u64 {
let id = self.node_id;
self.node_id += 1;
let node = Node {
id: id,
parent_id: parent,
data: data,
children: HashMap::new(),
};
self.nodes.insert(id, node);
id
}
/// add a child node to an existing node.
pub fn add_child(&mut self, parent: u64, key: K, data: D, overwrite: bool) -> FsResult<u64> {
{
let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?;
if !overwrite && pnode.children.contains_key(&key) {
return Err(FsError::Exists);
}
}
let id = self.new_node(parent, data);
let pnode = self.nodes.get_mut(&parent).unwrap();
pnode.children.insert(key, id);
Ok(id)
}
/*
* unused ...
pub fn remove_child(&mut self, parent: u64, key: &K) -> FsResult<()> {
let id = {
let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?;
let id = *pnode.children.get(key).ok_or(FsError::NotFound)?;
let node = self.nodes.get(&id).unwrap();
if node.children.len() > 0 {
return Err(FsError::Forbidden);
}
id
};
{
let pnode = self.nodes.get_mut(&parent).unwrap();
pnode.children.remove(key);
}
self.nodes.remove(&id);
Ok(())
}*/
/// Get a child node by key K.
pub fn get_child<Q: ?Sized>(&self, parent: u64, key: &Q) -> FsResult<u64>
where
K: Borrow<Q>,
Q: Hash + Eq,
{
let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?;
let id = pnode.children.get(key).ok_or(FsError::NotFound)?;
Ok(*id)
}
/// Get all children of this node. Returns an iterator over <K, D>.
pub fn get_children(&self, parent: u64) -> FsResult<Children<K>> {
let pnode = self.nodes.get(&parent).ok_or(FsError::NotFound)?;
let mut v = Vec::new();
for (k, i) in &pnode.children {
v.push(((*k).clone(), *i));
}
Ok(Children(v.into_iter()))
}
/// Get reference to a node.
pub fn get_node(&self, id: u64) -> FsResult<&D> {
let n = self.nodes.get(&id).ok_or(FsError::NotFound)?;
Ok(&n.data)
}
/// Get mutable reference to a node.
pub fn get_node_mut(&mut self, id: u64) -> FsResult<&mut D> {
let n = self.nodes.get_mut(&id).ok_or(FsError::NotFound)?;
Ok(&mut n.data)
}
fn delete_node_from_parent(&mut self, id: u64) -> FsResult<()> {
let parent_id = self.nodes.get(&id).ok_or(FsError::NotFound)?.parent_id;
let key = {
let pnode = self.nodes.get(&parent_id).unwrap();
let mut key = None;
for (k, i) in &pnode.children {
if i == &id {
key = Some((*k).clone());
break;
}
}
key
};
let key = key.unwrap();
let pnode = self.nodes.get_mut(&parent_id).unwrap();
pnode.children.remove(&key);
Ok(())
}
/// Delete a node. Fails if node has children. Returns node itself.
pub fn delete_node(&mut self, id: u64) -> FsResult<Node<K, D>> {
{
let n = self.nodes.get(&id).ok_or(FsError::NotFound)?;
if n.children.len() > 0 {
return Err(FsError::Forbidden);
}
}
self.delete_node_from_parent(id)?;
Ok(self.nodes.remove(&id).unwrap())
}
/// Delete a subtree.
pub fn delete_subtree(&mut self, id: u64) -> FsResult<()> {
let children = {
let n = self.nodes.get(&id).ok_or(FsError::NotFound)?;
n.children.iter().map(|(_, &v)| v).collect::<Vec<u64>>()
};
for c in children.into_iter() {
self.delete_subtree(c)?;
}
self.delete_node_from_parent(id)
}
/// Move a node to a new position and new name in the tree.
/// If "overwrite" is true, will replace an existing
/// node, but only if it doesn't have any children.
pub fn move_node(&mut self, id: u64, new_parent: u64, new_name: K, overwrite: bool) -> FsResult<()> {
let dest = {
let pnode = self.nodes.get(&new_parent).ok_or(FsError::NotFound)?;
if let Some(cid) = pnode.children.get(&new_name) {
let cnode = self.nodes.get(cid).unwrap();
if !overwrite || cnode.children.len() > 0 {
return Err(FsError::Exists);
}
Some(*cid)
} else {
None
}
};
self.delete_node_from_parent(id)?;
self.nodes.get_mut(&id).unwrap().parent_id = new_parent;
if let Some(dest) = dest {
self.nodes.remove(&dest);
}
let pnode = self.nodes.get_mut(&new_parent).unwrap();
pnode.children.insert(new_name, id);
Ok(())
}
}
impl<K> Iterator for Children<K> {
type Item = (K, u64);
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}

208
src/util.rs Normal file
View File

@@ -0,0 +1,208 @@
use std::io::{Cursor, Write};
use std::time::{SystemTime, UNIX_EPOCH};
use bytes::Bytes;
use headers::Header;
use http::method::InvalidMethod;
use crate::body::Body;
use crate::errors::DavError;
use crate::DavResult;
/// HTTP Methods supported by DavHandler.
#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)]
#[repr(u32)]
pub enum DavMethod {
Head = 0x0001,
Get = 0x0002,
Put = 0x0004,
Patch = 0x0008,
Options = 0x0010,
PropFind = 0x0020,
PropPatch = 0x0040,
MkCol = 0x0080,
Copy = 0x0100,
Move = 0x0200,
Delete = 0x0400,
Lock = 0x0800,
Unlock = 0x1000,
}
// translate method into our own enum that has webdav methods as well.
pub(crate) fn dav_method(m: &http::Method) -> DavResult<DavMethod> {
let m = match m {
&http::Method::HEAD => DavMethod::Head,
&http::Method::GET => DavMethod::Get,
&http::Method::PUT => DavMethod::Put,
&http::Method::PATCH => DavMethod::Patch,
&http::Method::DELETE => DavMethod::Delete,
&http::Method::OPTIONS => DavMethod::Options,
_ => {
match m.as_str() {
"PROPFIND" => DavMethod::PropFind,
"PROPPATCH" => DavMethod::PropPatch,
"MKCOL" => DavMethod::MkCol,
"COPY" => DavMethod::Copy,
"MOVE" => DavMethod::Move,
"LOCK" => DavMethod::Lock,
"UNLOCK" => DavMethod::Unlock,
_ => {
return Err(DavError::UnknownDavMethod);
},
}
},
};
Ok(m)
}
// for external use.
impl std::convert::TryFrom<&http::Method> for DavMethod {
type Error = InvalidMethod;
fn try_from(value: &http::Method) -> Result<Self, Self::Error> {
dav_method(value).map_err(|_| {
// A trick to get at the value of http::method::InvalidMethod.
http::method::Method::from_bytes(b"").unwrap_err()
})
}
}
/// A set of allowed [`DavMethod`]s.
///
/// [`DavMethod`]: enum.DavMethod.html
#[derive(Clone, Copy, Debug)]
pub struct DavMethodSet(u32);
impl DavMethodSet {
pub const HTTP_RO: DavMethodSet =
DavMethodSet(DavMethod::Get as u32 | DavMethod::Head as u32 | DavMethod::Options as u32);
pub const HTTP_RW: DavMethodSet = DavMethodSet(Self::HTTP_RO.0 | DavMethod::Put as u32);
pub const WEBDAV_RO: DavMethodSet = DavMethodSet(Self::HTTP_RO.0 | DavMethod::PropFind as u32);
pub const WEBDAV_RW: DavMethodSet = DavMethodSet(0xffffffff);
/// New set, all methods allowed.
pub fn all() -> DavMethodSet {
DavMethodSet(0xffffffff)
}
/// New empty set.
pub fn none() -> DavMethodSet {
DavMethodSet(0)
}
/// Add a method.
pub fn add(&mut self, m: DavMethod) -> &Self {
self.0 |= m as u32;
self
}
/// Remove a method.
pub fn remove(&mut self, m: DavMethod) -> &Self {
self.0 &= !(m as u32);
self
}
/// Check if a method is in the set.
pub fn contains(&self, m: DavMethod) -> bool {
self.0 & (m as u32) > 0
}
/// Generate an DavMethodSet from a list of words.
pub fn from_vec(v: Vec<impl AsRef<str>>) -> Result<DavMethodSet, InvalidMethod> {
let mut m: u32 = 0;
for w in &v {
m |= match w.as_ref().to_lowercase().as_str() {
"head" => DavMethod::Head as u32,
"get" => DavMethod::Get as u32,
"put" => DavMethod::Put as u32,
"patch" => DavMethod::Patch as u32,
"delete" => DavMethod::Delete as u32,
"options" => DavMethod::Options as u32,
"propfind" => DavMethod::PropFind as u32,
"proppatch" => DavMethod::PropPatch as u32,
"mkcol" => DavMethod::MkCol as u32,
"copy" => DavMethod::Copy as u32,
"move" => DavMethod::Move as u32,
"lock" => DavMethod::Lock as u32,
"unlock" => DavMethod::Unlock as u32,
"http-ro" => Self::HTTP_RO.0,
"http-rw" => Self::HTTP_RW.0,
"webdav-ro" => Self::WEBDAV_RO.0,
"webdav-rw" => Self::WEBDAV_RW.0,
_ => {
// A trick to get at the value of http::method::InvalidMethod.
let invalid_method = http::method::Method::from_bytes(b"").unwrap_err();
return Err(invalid_method);
},
};
}
Ok(DavMethodSet(m))
}
}
pub(crate) fn dav_xml_error(body: &str) -> Body {
let xml = format!(
"{}\n{}\n{}\n{}\n",
r#"<?xml version="1.0" encoding="utf-8" ?>"#, r#"<D:error xmlns:D="DAV:">"#, body, r#"</D:error>"#
);
Body::from(xml)
}
pub(crate) fn systemtime_to_offsetdatetime(t: SystemTime) -> time::OffsetDateTime {
match t.duration_since(UNIX_EPOCH) {
Ok(t) => {
let tm = time::OffsetDateTime::from_unix_timestamp(t.as_secs() as i64);
tm.to_offset(time::offset!(UTC))
},
Err(_) => time::OffsetDateTime::unix_epoch().to_offset(time::offset!(UTC)),
}
}
pub(crate) fn systemtime_to_httpdate(t: SystemTime) -> String {
let d = headers::Date::from(t);
let mut v = Vec::new();
d.encode(&mut v);
v[0].to_str().unwrap().to_owned()
}
pub(crate) fn systemtime_to_rfc3339(t: SystemTime) -> String {
// 1996-12-19T16:39:57Z
systemtime_to_offsetdatetime(t).format("%FT%H:%M:%SZ")
}
// A buffer that implements "Write".
#[derive(Clone)]
pub(crate) struct MemBuffer(Cursor<Vec<u8>>);
impl MemBuffer {
pub fn new() -> MemBuffer {
MemBuffer(Cursor::new(Vec::new()))
}
pub fn take(&mut self) -> Bytes {
let buf = std::mem::replace(self.0.get_mut(), Vec::new());
self.0.set_position(0);
Bytes::from(buf)
}
}
impl Write for MemBuffer {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::UNIX_EPOCH;
#[test]
fn test_rfc3339() {
assert!(systemtime_to_rfc3339(UNIX_EPOCH) == "1970-01-01T00:00:00Z");
}
}

51
src/voidfs.rs Normal file
View File

@@ -0,0 +1,51 @@
//! Placeholder filesystem. Returns FsError::NotImplemented on every method.
//!
use crate::davpath::DavPath;
use crate::fs::*;
use std::any::Any;
/// Placeholder filesystem.
#[derive(Debug, Clone)]
pub struct VoidFs;
pub fn is_voidfs(fs: &dyn Any) -> bool {
fs.is::<Box<VoidFs>>()
}
impl VoidFs {
pub fn new() -> Box<VoidFs> {
Box::new(VoidFs)
}
}
impl DavFileSystem for VoidFs {
fn metadata<'a>(&'a self, _path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
Box::pin(async { Err(FsError::NotImplemented) })
}
fn read_dir<'a>(
&'a self,
_path: &'a DavPath,
_meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>>
{
Box::pin(async { Err(FsError::NotImplemented) })
}
fn open<'a>(&'a self, _path: &'a DavPath, _options: OpenOptions) -> FsFuture<Box<dyn DavFile>> {
Box::pin(async { Err(FsError::NotImplemented) })
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::memfs::MemFs;
#[test]
fn test_is_void() {
assert!(is_voidfs(&VoidFs::new()));
assert!(!is_voidfs(&MemFs::new()));
}
}

94
src/warp.rs Normal file
View File

@@ -0,0 +1,94 @@
//! Adapter for the `warp` HTTP server framework.
//!
//! The filters in this module will always succeed and never
//! return an error. For example, if a file is not found, the
//! filter will return a 404 reply, and not an internal
//! rejection.
//!
use std::convert::Infallible;
use std::path::Path;
use crate::{fakels::FakeLs, localfs::LocalFs, DavHandler};
use warp::{filters::BoxedFilter, Filter, Reply};
/// Reply-filter that runs a DavHandler.
///
/// Just pass in a pre-configured DavHandler. If a prefix was not
/// configured, it will be the request path up to this point.
pub fn dav_handler(handler: DavHandler) -> BoxedFilter<(impl Reply,)> {
use http::header::HeaderMap;
use http::uri::Uri;
use http::Response;
use warp::path::{FullPath, Tail};
warp::method()
.and(warp::path::full())
.and(warp::path::tail())
.and(warp::header::headers_cloned())
.and(warp::body::stream())
.and_then(
move |method, path_full: FullPath, path_tail: Tail, headers: HeaderMap, body| {
let handler = handler.clone();
async move {
// rebuild an http::Request struct.
let path_str = path_full.as_str();
let uri = path_str.parse::<Uri>().unwrap();
let mut builder = http::Request::builder().method(method).uri(uri);
for (k, v) in headers.iter() {
builder = builder.header(k, v);
}
let request = builder.body(body).unwrap();
let response = if handler.config.prefix.is_some() {
// Run a handler with the configured path prefix.
handler.handle_stream(request).await
} else {
// Run a handler with the current path prefix.
let path_len = path_str.len();
let tail_len = path_tail.as_str().len();
let prefix = path_str[..path_len - tail_len].to_string();
let config = DavHandler::builder().strip_prefix(prefix);
handler.handle_stream_with(config, request).await
};
// Need to remap the http_body::Body to a hyper::Body.
let (parts, body) = response.into_parts();
let response = Response::from_parts(parts, hyper::Body::wrap_stream(body));
Ok::<_, Infallible>(response)
}
},
)
.boxed()
}
/// Creates a Filter that serves files and directories at the
/// base path joined with the remainder of the request path,
/// like `warp::filters::fs::dir`.
///
/// The behaviour for serving a directory depends on the flags:
///
/// - `index_html`: if an `index.html` file is found, serve it.
/// - `auto_index`: create a directory listing.
/// - no flags set: 404.
pub fn dav_dir(base: impl AsRef<Path>, index_html: bool, auto_index: bool) -> BoxedFilter<(impl Reply,)> {
let mut builder = DavHandler::builder()
.filesystem(LocalFs::new(base, false, false, false))
.locksystem(FakeLs::new())
.autoindex(auto_index);
if index_html {
builder = builder.indexfile("index.html".to_string())
}
let handler = builder.build_handler();
dav_handler(handler)
}
/// Creates a Filter that serves a single file, ignoring the request path,
/// like `warp::filters::fs::file`.
pub fn dav_file(file: impl AsRef<Path>) -> BoxedFilter<(impl Reply,)> {
let handler = DavHandler::builder()
.filesystem(LocalFs::new_file(file, false))
.locksystem(FakeLs::new())
.build_handler();
dav_handler(handler)
}

186
src/xmltree_ext.rs Normal file
View File

@@ -0,0 +1,186 @@
use std::borrow::Cow;
use std::io::{Read, Write};
use xml;
use xml::common::XmlVersion;
use xml::writer::EventWriter;
use xml::writer::XmlEvent as XmlWEvent;
use xml::EmitterConfig;
use xmltree::{self, Element, XMLNode};
use crate::{DavError, DavResult};
pub(crate) trait ElementExt {
/// Builder.
fn new2<'a, E: Into<&'a str>>(e: E) -> Self;
/// Builder.
fn ns<S: Into<String>>(self, prefix: S, namespace: S) -> Self;
/// Builder.
fn text<'a, T: Into<String>>(self, t: T) -> Self;
/// Like parse, but returns DavError.
fn parse2<R: Read>(r: R) -> Result<Element, DavError>;
/// Add a child element.
fn push_element(&mut self, e: Element);
/// Iterator over the children that are Elements.
fn child_elems_into_iter(self) -> Box<dyn Iterator<Item = Element>>;
/// Iterator over the children that are Elements.
fn child_elems_iter<'a>(&'a self) -> Box<dyn Iterator<Item = &'a Element> + 'a>;
/// Vec of the children that are Elements.
fn take_child_elems(self) -> Vec<Element>;
/// Does the element have children that are also Elements.
fn has_child_elems(&self) -> bool;
/// Write the element using an EventWriter.
fn write_ev<W: Write>(&self, emitter: &mut EventWriter<W>) -> xml::writer::Result<()>;
}
impl ElementExt for Element {
fn ns<S: Into<String>>(mut self, prefix: S, namespace: S) -> Element {
let mut ns = self.namespaces.unwrap_or(xmltree::Namespace::empty());
ns.force_put(prefix.into(), namespace.into());
self.namespaces = Some(ns);
self
}
fn new2<'a, N: Into<&'a str>>(n: N) -> Element {
let v: Vec<&str> = n.into().splitn(2, ':').collect();
if v.len() == 1 {
Element::new(v[0])
} else {
let mut e = Element::new(v[1]);
e.prefix = Some(v[0].to_string());
e
}
}
fn text<S: Into<String>>(mut self, t: S) -> Element {
let nodes = self
.children
.drain(..)
.filter(|n| n.as_text().is_none())
.collect();
self.children = nodes;
self.children.push(XMLNode::Text(t.into()));
self
}
fn push_element(&mut self, e: Element) {
self.children.push(XMLNode::Element(e));
}
fn child_elems_into_iter(self) -> Box<dyn Iterator<Item = Element>> {
let iter = self.children.into_iter().filter_map(|n| {
match n {
XMLNode::Element(e) => Some(e),
_ => None,
}
});
Box::new(iter)
}
fn child_elems_iter<'a>(&'a self) -> Box<dyn Iterator<Item = &'a Element> + 'a> {
let iter = self.children.iter().filter_map(|n| n.as_element());
Box::new(iter)
}
fn take_child_elems(self) -> Vec<Element> {
self.children
.into_iter()
.filter_map(|n| {
match n {
XMLNode::Element(e) => Some(e),
_ => None,
}
})
.collect()
}
fn has_child_elems(&self) -> bool {
self.children.iter().find_map(|n| n.as_element()).is_some()
}
fn parse2<R: Read>(r: R) -> Result<Element, DavError> {
let res = Element::parse(r);
match res {
Ok(elems) => Ok(elems),
Err(xmltree::ParseError::MalformedXml(_)) => Err(DavError::XmlParseError),
Err(_) => Err(DavError::XmlReadError),
}
}
fn write_ev<W: Write>(&self, emitter: &mut EventWriter<W>) -> xml::writer::Result<()> {
use xml::attribute::Attribute;
use xml::name::Name;
use xml::namespace::Namespace;
use xml::writer::events::XmlEvent;
let mut name = Name::local(&self.name);
if let Some(ref ns) = self.namespace {
name.namespace = Some(ns);
}
if let Some(ref p) = self.prefix {
name.prefix = Some(p);
}
let mut attributes = Vec::with_capacity(self.attributes.len());
for (k, v) in &self.attributes {
attributes.push(Attribute {
name: Name::local(k),
value: v,
});
}
let empty_ns = Namespace::empty();
let namespace = if let Some(ref ns) = self.namespaces {
Cow::Borrowed(ns)
} else {
Cow::Borrowed(&empty_ns)
};
emitter.write(XmlEvent::StartElement {
name,
attributes: Cow::Owned(attributes),
namespace,
})?;
for node in &self.children {
match node {
XMLNode::Element(elem) => elem.write_ev(emitter)?,
XMLNode::Text(text) => emitter.write(XmlEvent::Characters(text))?,
XMLNode::Comment(comment) => emitter.write(XmlEvent::Comment(comment))?,
XMLNode::CData(comment) => emitter.write(XmlEvent::CData(comment))?,
XMLNode::ProcessingInstruction(name, data) => {
match data.to_owned() {
Some(string) => {
emitter.write(XmlEvent::ProcessingInstruction {
name,
data: Some(&string),
})?
},
None => emitter.write(XmlEvent::ProcessingInstruction { name, data: None })?,
}
},
}
// elem.write_ev(emitter)?;
}
emitter.write(XmlEvent::EndElement { name: Some(name) })?;
Ok(())
}
}
pub(crate) fn emitter<W: Write>(w: W) -> DavResult<EventWriter<W>> {
let mut emitter = EventWriter::new_with_config(
w,
EmitterConfig {
perform_indent: false,
indent_string: Cow::Borrowed(""),
..Default::default()
},
);
emitter.write(XmlWEvent::StartDocument {
version: XmlVersion::Version10,
encoding: Some("utf-8"),
standalone: None,
})?;
Ok(emitter)
}