Compare commits
4 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
286c55d5b4 | ||
|
6ec35bdbbd | ||
|
82417de8a8 | ||
|
cc553060e0 |
5
.cargo/config
Normal file
5
.cargo/config
Normal file
@ -0,0 +1,5 @@
|
||||
[source]
|
||||
[source.debian-packages]
|
||||
directory = "/usr/share/cargo/registry"
|
||||
[source.crates-io]
|
||||
replace-with = "debian-packages"
|
@ -1,5 +0,0 @@
|
||||
# [source]
|
||||
# [source.debian-packages]
|
||||
# directory = "/usr/share/cargo/registry"
|
||||
# [source.crates-io]
|
||||
# replace-with = "debian-packages"
|
88
Cargo.toml
88
Cargo.toml
@ -1,68 +1,41 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"proxmox-access-control",
|
||||
"proxmox-acme",
|
||||
"proxmox-acme-api",
|
||||
"proxmox-api-macro",
|
||||
"proxmox-apt",
|
||||
"proxmox-apt-api-types",
|
||||
"proxmox-async",
|
||||
"proxmox-auth-api",
|
||||
"proxmox-borrow",
|
||||
"proxmox-client",
|
||||
"proxmox-compression",
|
||||
"proxmox-config-digest",
|
||||
"proxmox-daemon",
|
||||
"proxmox-dns-api",
|
||||
"proxmox-http",
|
||||
"proxmox-http-error",
|
||||
"proxmox-human-byte",
|
||||
"proxmox-io",
|
||||
"proxmox-lang",
|
||||
"proxmox-ldap",
|
||||
"proxmox-log",
|
||||
"proxmox-login",
|
||||
"proxmox-metrics",
|
||||
"proxmox-network-api",
|
||||
"proxmox-notify",
|
||||
"proxmox-openid",
|
||||
"proxmox-product-config",
|
||||
"proxmox-rest-server",
|
||||
"proxmox-router",
|
||||
"proxmox-rrd",
|
||||
"proxmox-rrd-api-types",
|
||||
"proxmox-schema",
|
||||
"proxmox-section-config",
|
||||
"proxmox-sendmail",
|
||||
"proxmox-serde",
|
||||
"proxmox-shared-cache",
|
||||
"proxmox-shared-memory",
|
||||
"proxmox-simple-config",
|
||||
"proxmox-sortable-macro",
|
||||
"proxmox-subscription",
|
||||
"proxmox-sys",
|
||||
"proxmox-syslog-api",
|
||||
"proxmox-systemd",
|
||||
"proxmox-tfa",
|
||||
"proxmox-time",
|
||||
"proxmox-time-api",
|
||||
"proxmox-uuid",
|
||||
"proxmox-worker-task",
|
||||
"pbs-api-types",
|
||||
]
|
||||
exclude = [
|
||||
"build",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
authors = ["Proxmox Support Team <support@proxmox.com>"]
|
||||
edition = "2021"
|
||||
license = "AGPL-3"
|
||||
repository = "https://git.proxmox.com/?p=proxmox.git"
|
||||
homepage = "https://proxmox.com"
|
||||
exclude = [ "debian" ]
|
||||
rust-version = "1.82"
|
||||
homepage = "https://www.proxmox.com"
|
||||
|
||||
[workspace.dependencies]
|
||||
# any features enabled here are enabled on all members using 'workspace = true'!
|
||||
@ -71,29 +44,24 @@ rust-version = "1.82"
|
||||
anyhow = "1.0"
|
||||
base32 = "0.4"
|
||||
base64 = "0.13"
|
||||
bitflags = "2.4"
|
||||
bytes = "1.0"
|
||||
const_format = "0.2"
|
||||
crc32fast = "1"
|
||||
crossbeam-channel = "0.5"
|
||||
endian_trait = "0.6"
|
||||
env_logger = "0.11"
|
||||
flate2 = "1.0"
|
||||
foreign-types = "0.3"
|
||||
form_urlencoded = "1.1"
|
||||
futures = "0.3"
|
||||
handlebars = "3.0"
|
||||
hex = "0.4"
|
||||
http = "0.2"
|
||||
hyper = "0.14.5"
|
||||
lazy_static = "1.4"
|
||||
ldap3 = { version = "0.11", default-features = false }
|
||||
lettre = "0.11.1"
|
||||
libc = "0.2.107"
|
||||
log = "0.4.17"
|
||||
mail-parser = "0.8.2"
|
||||
native-tls = "0.2"
|
||||
nix = "0.26.1"
|
||||
once_cell = "1.3.1"
|
||||
openssl = "0.10"
|
||||
pam = "0.7"
|
||||
pam-sys = "0.5"
|
||||
percent-encoding = "2.1"
|
||||
pin-utils = "0.1.0"
|
||||
@ -101,52 +69,32 @@ proc-macro2 = "1.0"
|
||||
quote = "1.0"
|
||||
regex = "1.5"
|
||||
serde = "1.0"
|
||||
serde_cbor = "0.11.1"
|
||||
serde_json = "1.0"
|
||||
serde_plain = "1.0"
|
||||
syn = { version = "2", features = [ "full", "visit-mut" ] }
|
||||
syn = { version = "1.0", features = [ "full", "visit-mut" ] }
|
||||
tar = "0.4"
|
||||
tokio = "1.6"
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tower-service = "0.3.0"
|
||||
tracing = "0.1"
|
||||
tracing-journald = "0.3.0"
|
||||
tracing-log = { version = "0.2", default-features = false }
|
||||
tracing-subscriber = "0.3.16"
|
||||
url = "2.2"
|
||||
walkdir = "2"
|
||||
webauthn-rs = "0.3"
|
||||
zstd = { version = "0.12", features = [ "bindgen" ] }
|
||||
zstd = { version = "0.6", features = [ "bindgen" ] }
|
||||
|
||||
# workspace dependencies
|
||||
proxmox-acme = { version = "0.5.3", path = "proxmox-acme", default-features = false }
|
||||
proxmox-api-macro = { version = "1.3.2", path = "proxmox-api-macro" }
|
||||
proxmox-apt-api-types = { version = "1.0.2", path = "proxmox-apt-api-types" }
|
||||
proxmox-auth-api = { version = "0.4.0", path = "proxmox-auth-api" }
|
||||
proxmox-api-macro = { version = "1.0.4", path = "proxmox-api-macro" }
|
||||
proxmox-async = { version = "0.4.1", path = "proxmox-async" }
|
||||
proxmox-compression = { version = "0.2.4", path = "proxmox-compression" }
|
||||
proxmox-daemon = { version = "0.1.0", path = "proxmox-daemon" }
|
||||
proxmox-http = { version = "0.9.4", path = "proxmox-http" }
|
||||
proxmox-http-error = { version = "0.1.0", path = "proxmox-http-error" }
|
||||
proxmox-human-byte = { version = "0.1.0", path = "proxmox-human-byte" }
|
||||
proxmox-io = { version = "1.1.0", path = "proxmox-io" }
|
||||
proxmox-lang = { version = "1.3", path = "proxmox-lang" }
|
||||
proxmox-log= { version = "0.2.5", path = "proxmox-log" }
|
||||
proxmox-login = { version = "0.2.0", path = "proxmox-login" }
|
||||
proxmox-product-config = { version = "0.2.0", path = "proxmox-product-config" }
|
||||
proxmox-config-digest = { version = "0.1.0", path = "proxmox-config-digest" }
|
||||
proxmox-rest-server = { version = "0.8.8", path = "proxmox-rest-server" }
|
||||
proxmox-router = { version = "3.1.1", path = "proxmox-router" }
|
||||
proxmox-schema = { version = "4.0.0", path = "proxmox-schema" }
|
||||
proxmox-section-config = { version = "2.1.0", path = "proxmox-section-config" }
|
||||
proxmox-sendmail = { version = "0.1.0", path = "proxmox-sendmail" }
|
||||
proxmox-compression = { version = "0.1.1", path = "proxmox-compression" }
|
||||
proxmox-http = { version = "0.8.0", path = "proxmox-http" }
|
||||
proxmox-io = { version = "1.0.0", path = "proxmox-io" }
|
||||
proxmox-lang = { version = "1.1", path = "proxmox-lang" }
|
||||
proxmox-rest-server = { version = "0.3.0", path = "proxmox-rest-server" }
|
||||
proxmox-router = { version = "1.3.1", path = "proxmox-router" }
|
||||
proxmox-schema = { version = "1.3.7", path = "proxmox-schema" }
|
||||
proxmox-serde = { version = "0.1.1", path = "proxmox-serde", features = [ "serde_json" ] }
|
||||
proxmox-shared-memory = { version = "0.3.0", path = "proxmox-shared-memory" }
|
||||
proxmox-sortable-macro = { version = "0.1.3", path = "proxmox-sortable-macro" }
|
||||
proxmox-sys = { version = "0.6.6", path = "proxmox-sys" }
|
||||
proxmox-systemd = { version = "0.1.0", path = "proxmox-systemd" }
|
||||
proxmox-tfa = { version = "5.0.0", path = "proxmox-tfa" }
|
||||
proxmox-time = { version = "2.0.0", path = "proxmox-time" }
|
||||
proxmox-sortable-macro = { version = "0.1.2", path = "proxmox-sortable-macro" }
|
||||
proxmox-sys = { version = "0.4.2", path = "proxmox-sys" }
|
||||
proxmox-tfa = { version = "4.0.0", path = "proxmox-tfa" }
|
||||
proxmox-time = { version = "1.1.4", path = "proxmox-time" }
|
||||
proxmox-uuid = { version = "1.0.1", path = "proxmox-uuid" }
|
||||
proxmox-worker-task = { version = "0.1.0", path = "proxmox-worker-task" }
|
||||
|
81
Makefile
81
Makefile
@ -1,7 +1,6 @@
|
||||
# Shortcut for common operations:
|
||||
|
||||
# see proxmox-backup if we ever want to support other prefixes
|
||||
CRATES != echo proxmox-*/Cargo.toml | sed -e 's|/Cargo.toml||g'
|
||||
CRATES != cargo metadata --format-version=1 | jq -r .workspace_members'[]' | awk '{ print $$1 }'
|
||||
|
||||
# By default we just run checks:
|
||||
.PHONY: all
|
||||
@ -38,12 +37,6 @@ dinstall:
|
||||
autopkgtest build/$* build/*.deb -- null
|
||||
touch $@
|
||||
|
||||
.PHONY: list-packages
|
||||
list-packages:
|
||||
@for p in $(CRATES); do \
|
||||
echo "librust-$$p-dev"; \
|
||||
done
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
cargo test
|
||||
@ -68,7 +61,7 @@ doc:
|
||||
clean:
|
||||
cargo clean
|
||||
rm -rf build/
|
||||
rm -f -- *-deb *-dsc *-autopkgtest *.build *.buildinfo *.changes
|
||||
rm -f -- *-deb *-dsc *-autopkgtest *.buildinfo *.changes
|
||||
|
||||
.PHONY: update
|
||||
update:
|
||||
@ -79,72 +72,4 @@ update:
|
||||
dcmd --deb rust-$*_*.changes \
|
||||
| grep -v '.changes$$' \
|
||||
| tar -cf "$@.tar" -T-; \
|
||||
cat "$@.tar" | ssh -X repoman@repo.proxmox.com upload --product devel --dist bookworm
|
||||
|
||||
%-install:
|
||||
rm -rf build/install/$*
|
||||
mkdir -p build/install/$*
|
||||
BUILDDIR=build/install/$* BUILDCMD=/usr/bin/true NOCONTROL=1 ./build.sh "$*" || true
|
||||
version="$$(dpkg-parsechangelog -l $*/debian/changelog -SVersion | sed -e 's/-.*//')"; \
|
||||
install -m755 -Dd "$(DESTDIR)/usr/share/cargo/registry/$*-$${version}"; \
|
||||
rm -rf "$(DESTDIR)/usr/share/cargo/registry/$*-$${version}"; \
|
||||
mv "build/install/$*/$*" \
|
||||
"$(DESTDIR)/usr/share/cargo/registry/$*-$${version}"; \
|
||||
mv "$(DESTDIR)/usr/share/cargo/registry/$*-$${version}/debian/cargo-checksum.json" \
|
||||
"$(DESTDIR)/usr/share/cargo/registry/$*-$${version}/.cargo-checksum.json"; \
|
||||
rm -rf "$(DESTDIR)/usr/share/cargo/registry/$*-$${version}/debian" \
|
||||
|
||||
.PHONY: install
|
||||
install: $(foreach c,$(CRATES), $c-install)
|
||||
|
||||
%-install-overlay: %-install
|
||||
version="$$(dpkg-parsechangelog -l $*/debian/changelog -SVersion | sed -e 's/-.*//')"; \
|
||||
setfattr -n trusted.overlay.opaque -v y \
|
||||
"$(DESTDIR)/usr/share/cargo/registry/$*-$${version}"
|
||||
install -m755 -Dd $(DESTDIR)/usr/lib/extension-release.d
|
||||
echo 'ID=_any' >$(DESTDIR)/usr/lib/extension-release.d/extension-release.$*
|
||||
|
||||
.PHONY: install-overlay
|
||||
install-overlay: $(foreach c,$(CRATES), $c-install-overlay)
|
||||
|
||||
# To make sure a sysext *replaces* a crate, rather than "merging" with it, we
|
||||
# need to be able to set the 'trusted.overlay.opaque' xattr. Since we cannot do
|
||||
# this as a user, we utilize `fakeroot` which keeps track of this for us, and
|
||||
# turn the final directory into an 'erofs' file system image.
|
||||
#
|
||||
# The reason is that if a crate gets changed like this:
|
||||
#
|
||||
# old:
|
||||
# src/foo.rs
|
||||
# new:
|
||||
# src/foo/mod.rs
|
||||
#
|
||||
# if its /usr/share/cargo/registry/$crate-$version directory was not marked as
|
||||
# "opaque", the merged file system would end up with both
|
||||
#
|
||||
# src/foo.rs
|
||||
# src/foo/mod.rs
|
||||
#
|
||||
# together.
|
||||
#
|
||||
# See https://docs.kernel.org/filesystems/overlayfs.html
|
||||
%-sysext:
|
||||
fakeroot $(MAKE) $*-sysext-do
|
||||
%-sysext-do:
|
||||
rm -f extensions/$*.raw
|
||||
rm -rf build/sysext/$*
|
||||
rm -rf build/install/$*
|
||||
$(MAKE) DESTDIR=build/sysext/$* $*-install-overlay
|
||||
mkdir -p extensions
|
||||
mkfs.erofs extensions/$*.raw build/sysext/$*
|
||||
|
||||
sysext:
|
||||
fakeroot $(MAKE) sysext-do
|
||||
sysext-do:
|
||||
rm -f extensions/proxmox-workspace.raw
|
||||
[ -n "$(NOCLEAN)" ] || rm -rf build/sysext/workspace
|
||||
$(MAKE) DESTDIR=build/sysext/workspace $(foreach c,$(CRATES), $c-install)
|
||||
install -m755 -Dd build/sysext/workspace/usr/lib/extension-release.d
|
||||
echo 'ID=_any' >build/sysext/workspace/usr/lib/extension-release.d/extension-release.proxmox-workspace
|
||||
mkdir -p extensions
|
||||
mkfs.erofs extensions/proxmox-workspace.raw build/sysext/workspace
|
||||
cat "$@.tar" | ssh -X repoman@repo.proxmox.com upload --product devel --dist bullseye
|
||||
|
153
README.md
153
README.md
@ -1,153 +0,0 @@
|
||||
# Local cargo config
|
||||
|
||||
This repository ships with a `.cargo/config.toml` that replaces the crates.io
|
||||
registry with packaged crates located in `/usr/share/cargo/registry`.
|
||||
|
||||
A similar config is also applied building with `dh_cargo`. Cargo.lock needs to
|
||||
be deleted when switching between packaged crates and crates.io, since the
|
||||
checksums are not compatible.
|
||||
|
||||
To reference new dependencies (or updated versions) that are not yet packaged,
|
||||
the dependency needs to point directly to a path or git source.
|
||||
|
||||
# Quickly installing all packages from apt
|
||||
|
||||
To a void too many manual installations when `mk-build-deps` etc. fail, a quick
|
||||
way to install all the main packages of this workspace is to run:
|
||||
|
||||
# apt install $(make list-packages)
|
||||
|
||||
# Steps for Releases
|
||||
|
||||
- Run `./bump.sh <CRATE> [patch|minor|major|<VERSION>]`
|
||||
- Fill out changelog
|
||||
- Confirm bump commit
|
||||
- Build packages with `make <crate>-deb`.
|
||||
- Don't forget to commit updated d/control!
|
||||
|
||||
# Adding Crates
|
||||
|
||||
1. At the top level:
|
||||
- Generate the crate: `cargo new --lib the-name`
|
||||
- Sort the crate into `Cargo.toml`'s `workspace.members`
|
||||
|
||||
2. In the new crate's `Cargo.toml`:
|
||||
- In `[package]` set:
|
||||
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
exclude.workspace = true
|
||||
homepage.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
If a separate ``exclude`` is need it, separate it out as its own
|
||||
block above the inherited fields.
|
||||
- Add a meaningful `description`
|
||||
- Copy `debian/copyright` and `debian/debcargo.toml` from another subcrate.
|
||||
|
||||
3. In the new crate\'s `lib.rs`, add the following preamble on top:
|
||||
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
4. Ideally (but optionally) in the new crate\'s `lib.rs`, add the following
|
||||
preamble on top as well:
|
||||
|
||||
#![deny(unsafe_op_in_unsafe_fn)]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
# Adding a new Dependency
|
||||
|
||||
1. At the top level:
|
||||
- Add it to `[workspace.dependencies]` specifying the version and any
|
||||
features that should be enabled throughout the workspace
|
||||
2. In each member\'s `Cargo.toml`:
|
||||
- Add it to the desired dependencies section with `workspace = true` and no
|
||||
version specified.
|
||||
- If this member requires additional features, add only the extra features
|
||||
to the member dependency.
|
||||
|
||||
# Updating a Dependency\'s Version
|
||||
|
||||
1. At the top level:
|
||||
- Bump the version in `[workspace.dependencies]` as desired.
|
||||
- Check for deprecations or breakage throughout the workspace.
|
||||
|
||||
# Notes on Workspace Inheritance
|
||||
|
||||
Common metadata (like authors, license, ..) are inherited throughout the
|
||||
workspace. If new fields are added that are identical for all crates, they
|
||||
should be defined in the top-level `Cargo.toml` file\'s `[workspace.package]`
|
||||
section, and inherited in all members explicitly by setting `FIELD.workspace =
|
||||
true` in the member\'s `[package]` section.
|
||||
|
||||
Dependency information is also inherited throughout the workspace, allowing a
|
||||
single dependency specification in the top-level `Cargo.toml` file to be used
|
||||
by all members.
|
||||
|
||||
Some restrictions apply:
|
||||
|
||||
- features can only be added in members, never removed (this includes
|
||||
`default_features = false`!)
|
||||
- the base feature set at the workspace level should be the minimum (possibly
|
||||
empty!) set required by all members
|
||||
- workspace dependency specifications cannot include `optional`
|
||||
- if needed, the `optional` flag needs to be set at the member level when
|
||||
using a workspace dependency
|
||||
|
||||
# Working with *other* projects while changing to *single crates here*
|
||||
|
||||
When crates from this workspace need changes caused by requirements in projects
|
||||
*outside* of this repository, it can often be annoying to keep building and
|
||||
installing `.deb` files.
|
||||
|
||||
Additionally, doing so often requires complete rebuilds as cargo will not pick
|
||||
up *file* changes of external dependencies.
|
||||
|
||||
One way to fix this is by actually changing the version. Since we cut away
|
||||
anything starting at the first hyphen in the version, we need to use a `+`
|
||||
(build metadata) version suffix.
|
||||
|
||||
Eg. turn `5.0.0` into `5.0.0+test8`.
|
||||
|
||||
There are 2 faster ways:
|
||||
|
||||
## Adding a `#[patch.crates-io]` section to the other project.
|
||||
|
||||
Note, however, that this requires *ALL* crates from this workspace to be listed,
|
||||
otherwise multiple conflicting versions of the same crate AND even the same
|
||||
numerical *version* might be built, causing *weird* errors.
|
||||
|
||||
The advantage, however, is that `cargo` will pick up on file changes and rebuild
|
||||
the crate on changes.
|
||||
|
||||
## An in-between: system extensions
|
||||
|
||||
An easy way to quickly get the new package "installed" *temporarily*, such that
|
||||
real apt package upgrades are unaffected is as a system-extension.
|
||||
|
||||
The easiest way — if no other extensions are used — is to just symlink the
|
||||
`extensions/` directory to `/run` as root via:
|
||||
|
||||
```
|
||||
# ln -s ${THIS_DIR}/extensions /run/extensions
|
||||
```
|
||||
|
||||
This does not persist across reboots.
|
||||
(Note: that the `extensions/` directory does not need to exist for the above to
|
||||
work.)
|
||||
|
||||
Once this is done, trying a new version of a crate works by:
|
||||
|
||||
1. Bump the version: eg. `5.0.0+test8` -> `5.0.0+test9`
|
||||
While this is technically optional (the sysext would then *replace*
|
||||
(temporarily) the installed version as long as the sysext is active), just
|
||||
like with `.deb` files, not doing this causes `cargo` to consider the crate
|
||||
to be unchanged and it will not rebuild its code.
|
||||
2. here: `$ make ${crate}-sysext` (rebuilds `extensions/${crate}.raw`)
|
||||
3. as root: `# systemd-sysext refresh` (activates current extensions images)
|
||||
4. in the other project: `$ cargo update && cargo build`
|
||||
|
||||
In the last step, cargo sees that there's a newer version of the crate available
|
||||
and use that.
|
79
README.rst
Normal file
79
README.rst
Normal file
@ -0,0 +1,79 @@
|
||||
Local cargo config
|
||||
==================
|
||||
|
||||
This repository ships with a ``.cargo/config`` that replaces the crates.io
|
||||
registry with packaged crates located in ``/usr/share/cargo/registry``.
|
||||
|
||||
A similar config is also applied building with dh_cargo. Cargo.lock needs to be
|
||||
deleted when switching between packaged crates and crates.io, since the
|
||||
checksums are not compatible.
|
||||
|
||||
To reference new dependencies (or updated versions) that are not yet packaged,
|
||||
the dependency needs to point directly to a path or git source.
|
||||
|
||||
Steps for Releases
|
||||
==================
|
||||
|
||||
- Run ./bump.sh <CRATE> [patch|minor|major|<VERSION>]
|
||||
-- Fill out changelog
|
||||
-- Confirm bump commit
|
||||
- Build packages with `make deb`.
|
||||
-- Don't forget to commit updated d/control!
|
||||
|
||||
Adding Crates
|
||||
=============
|
||||
|
||||
1) At the top level:
|
||||
- Generate the crate: ``cargo new --lib the-name``
|
||||
- Sort the crate into ``Cargo.toml``'s ``workspace.members``
|
||||
|
||||
2) In the new crate's ``Cargo.toml``:
|
||||
- In ``[package]`` set:
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
edition.workspace = true
|
||||
exclude.workspace = true
|
||||
- Add a meaningful ``description``
|
||||
- Copy ``debian/copyright`` and ``debian/debcargo.toml`` from another subcrate.
|
||||
|
||||
Adding a new Dependency
|
||||
=======================
|
||||
|
||||
1) At the top level:
|
||||
- Add it to ``[workspace.dependencies]`` specifying the version and any
|
||||
features that should be enabled throughout the workspace
|
||||
|
||||
2) In each member's ``Cargo.toml``:
|
||||
- Add it to the desired dependencies section with ``workspace = true`` and no
|
||||
version specified.
|
||||
- If this member requires additional features, add only the extra features to
|
||||
the member dependency.
|
||||
|
||||
Updating a Dependency's Version
|
||||
===============================
|
||||
|
||||
1) At the top level:
|
||||
- Bump the version in ``[workspace.dependencies]`` as desired.
|
||||
- Check for deprecations or breakage throughout the workspace.
|
||||
|
||||
Notes on Workspace Inheritance
|
||||
==============================
|
||||
|
||||
Common metadata (like authors, license, ..) are inherited throughout the
|
||||
workspace. If new fields are added that are identical for all crates, they
|
||||
should be defined in the top-level ``Cargo.toml`` file's
|
||||
``[workspace.package]`` section, and inherited in all members explicitly by
|
||||
setting ``FIELD.workspace = true`` in the member's ``[package]`` section.
|
||||
|
||||
Dependency information is also inherited throughout the workspace, allowing a
|
||||
single dependency specification in the top-level Cargo.toml file to be used by
|
||||
all members.
|
||||
|
||||
Some restrictions apply:
|
||||
- features can only be added in members, never removed (this includes
|
||||
``default_features = false``!)
|
||||
- the base feature set at the workspace level should be the minimum (possibly
|
||||
empty!) set required by all members
|
||||
- workspace dependency specifications cannot include ``optional``
|
||||
- if needed, the ``optional`` flag needs to be set at the member level when
|
||||
using a workspace dependency
|
23
build.sh
23
build.sh
@ -7,30 +7,21 @@ export RUSTC=/usr/bin/rustc
|
||||
|
||||
CRATE=$1
|
||||
BUILDCMD=${BUILDCMD:-"dpkg-buildpackage -b -uc -us"}
|
||||
BUILDDIR="${BUILDDIR:-"build"}"
|
||||
|
||||
mkdir -p "${BUILDDIR}"
|
||||
echo system >"${BUILDDIR}"/rust-toolchain
|
||||
rm -rf ""${BUILDDIR}"/${CRATE}"
|
||||
mkdir -p build
|
||||
echo system >build/rust-toolchain
|
||||
rm -rf "build/${CRATE}"
|
||||
|
||||
CONTROL="$PWD/${CRATE}/debian/control"
|
||||
|
||||
if [ -e "$CONTROL" ]; then
|
||||
# check but only warn, debcargo fails anyway if crates are missing
|
||||
dpkg-checkbuilddeps $PWD/${CRATE}/debian/control || true
|
||||
[ "x$NOCONTROL" = 'x' ] && rm -f "$PWD/${CRATE}/debian/control"
|
||||
rm -f "$PWD/${CRATE}/debian/control"
|
||||
fi
|
||||
|
||||
debcargo package \
|
||||
--config "$PWD/${CRATE}/debian/debcargo.toml" \
|
||||
--changelog-ready \
|
||||
--no-overlay-write-back \
|
||||
--directory "$PWD/"${BUILDDIR}"/${CRATE}" \
|
||||
"${CRATE}" \
|
||||
"$(dpkg-parsechangelog -l "${CRATE}/debian/changelog" -SVersion | sed -e 's/-.*//')"
|
||||
|
||||
cd ""${BUILDDIR}"/${CRATE}"
|
||||
rm -f debian/source/format.debcargo.hint
|
||||
debcargo package --config "$PWD/${CRATE}/debian/debcargo.toml" --changelog-ready --no-overlay-write-back --directory "$PWD/build/${CRATE}" "${CRATE}" "$(dpkg-parsechangelog -l "${CRATE}/debian/changelog" -SVersion | sed -e 's/-.*//')"
|
||||
cd "build/${CRATE}"
|
||||
${BUILDCMD}
|
||||
|
||||
[ "x$NOCONTROL" = "x" ] && cp debian/control "$CONTROL"
|
||||
cp debian/control "$CONTROL"
|
||||
|
@ -1,26 +0,0 @@
|
||||
[package]
|
||||
name = "pbs-api-types"
|
||||
version = "0.2.0"
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
description = "API types for Proxmox Backup Server"
|
||||
exclude.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
const_format.workspace = true
|
||||
hex.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_plain.workspace = true
|
||||
|
||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-serde.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid = { workspace = true, features = [ "serde" ] }
|
@ -1,7 +0,0 @@
|
||||
rust-pbs-api-types (0.2.0) bookworm; urgency=medium
|
||||
|
||||
* imported from proxmox-backup repository
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 22 Jan 2025 09:40:51 +0100
|
||||
|
||||
|
@ -1,68 +0,0 @@
|
||||
Source: rust-pbs-api-types
|
||||
Section: rust
|
||||
Priority: optional
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
dh-sequence-cargo,
|
||||
cargo:native <!nocheck>,
|
||||
rustc:native <!nocheck>,
|
||||
libstd-rust-dev <!nocheck>,
|
||||
librust-anyhow-1+default-dev <!nocheck>,
|
||||
librust-const-format-0.2+default-dev <!nocheck>,
|
||||
librust-hex-0.4+default-dev <!nocheck>,
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~) <!nocheck>,
|
||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.2-~~) <!nocheck>,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev <!nocheck>,
|
||||
librust-proxmox-auth-api-0.4+default-dev <!nocheck>,
|
||||
librust-proxmox-human-byte-0.1+default-dev <!nocheck>,
|
||||
librust-proxmox-lang-1+default-dev (>= 1.3-~~) <!nocheck>,
|
||||
librust-proxmox-schema-4+api-macro-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+default-dev <!nocheck>,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~) <!nocheck>,
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~) <!nocheck>,
|
||||
librust-proxmox-time-2+default-dev <!nocheck>,
|
||||
librust-proxmox-uuid-1+default-dev (>= 1.0.1-~~) <!nocheck>,
|
||||
librust-proxmox-uuid-1+serde-dev (>= 1.0.1-~~) <!nocheck>,
|
||||
librust-regex-1+default-dev (>= 1.5-~~) <!nocheck>,
|
||||
librust-serde-1+default-dev <!nocheck>,
|
||||
librust-serde-plain-1+default-dev <!nocheck>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.7.0
|
||||
Vcs-Git: git://git.proxmox.com/git/proxmox.git
|
||||
Vcs-Browser: https://git.proxmox.com/?p=proxmox.git
|
||||
X-Cargo-Crate: pbs-api-types
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: librust-pbs-api-types-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-const-format-0.2+default-dev,
|
||||
librust-hex-0.4+default-dev,
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.2-~~),
|
||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
||||
librust-proxmox-auth-api-0.4+default-dev,
|
||||
librust-proxmox-human-byte-0.1+default-dev,
|
||||
librust-proxmox-lang-1+default-dev (>= 1.3-~~),
|
||||
librust-proxmox-schema-4+api-macro-dev,
|
||||
librust-proxmox-schema-4+default-dev,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-proxmox-uuid-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-uuid-1+serde-dev (>= 1.0.1-~~),
|
||||
librust-regex-1+default-dev (>= 1.5-~~),
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-plain-1+default-dev
|
||||
Provides:
|
||||
librust-pbs-api-types+default-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0+default-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0.2-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0.2+default-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0.2.0-dev (= ${binary:Version}),
|
||||
librust-pbs-api-types-0.2.0+default-dev (= ${binary:Version})
|
||||
Description: API types for Proxmox Backup Server - Rust source code
|
||||
Source code for Debianized Rust crate "pbs-api-types"
|
@ -1,18 +0,0 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
|
||||
Files:
|
||||
*
|
||||
Copyright: 2019 - 2024 Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
License: AGPL-3.0-or-later
|
||||
This program is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU Affero General Public License as published by the Free
|
||||
Software Foundation, either version 3 of the License, or (at your option) any
|
||||
later version.
|
||||
.
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
|
||||
details.
|
||||
.
|
||||
You should have received a copy of the GNU Affero General Public License along
|
||||
with this program. If not, see <https://www.gnu.org/licenses/>.
|
@ -1,7 +0,0 @@
|
||||
overlay = "."
|
||||
crate_src_path = ".."
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
|
||||
[source]
|
||||
vcs_git = "git://git.proxmox.com/git/proxmox.git"
|
||||
vcs_browser = "https://git.proxmox.com/?p=proxmox.git"
|
@ -1 +0,0 @@
|
||||
3.0 (native)
|
@ -1,332 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::de::{value, IntoDeserializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_lang::constnamedbitmap;
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
|
||||
};
|
||||
|
||||
use crate::PROXMOX_SAFE_ID_REGEX_STR;
|
||||
|
||||
const_regex! {
|
||||
pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
|
||||
}
|
||||
|
||||
// define Privilege bitfield
|
||||
|
||||
constnamedbitmap! {
|
||||
/// Contains a list of privilege name to privilege value mappings.
|
||||
///
|
||||
/// The names are used when displaying/persisting privileges anywhere, the values are used to
|
||||
/// allow easy matching of privileges as bitflags.
|
||||
PRIVILEGES: u64 => {
|
||||
/// Sys.Audit allows knowing about the system and its status
|
||||
PRIV_SYS_AUDIT("Sys.Audit");
|
||||
/// Sys.Modify allows modifying system-level configuration
|
||||
PRIV_SYS_MODIFY("Sys.Modify");
|
||||
/// Sys.Modify allows to poweroff/reboot/.. the system
|
||||
PRIV_SYS_POWER_MANAGEMENT("Sys.PowerManagement");
|
||||
|
||||
/// Datastore.Audit allows knowing about a datastore,
|
||||
/// including reading the configuration entry and listing its contents
|
||||
PRIV_DATASTORE_AUDIT("Datastore.Audit");
|
||||
/// Datastore.Allocate allows creating or deleting datastores
|
||||
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
|
||||
/// Datastore.Modify allows modifying a datastore and its contents
|
||||
PRIV_DATASTORE_MODIFY("Datastore.Modify");
|
||||
/// Datastore.Read allows reading arbitrary backup contents
|
||||
PRIV_DATASTORE_READ("Datastore.Read");
|
||||
/// Allows verifying a datastore
|
||||
PRIV_DATASTORE_VERIFY("Datastore.Verify");
|
||||
|
||||
/// Datastore.Backup allows Datastore.Read|Verify and creating new snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_BACKUP("Datastore.Backup");
|
||||
/// Datastore.Prune allows deleting snapshots,
|
||||
/// but also requires backup ownership
|
||||
PRIV_DATASTORE_PRUNE("Datastore.Prune");
|
||||
|
||||
/// Permissions.Modify allows modifying ACLs
|
||||
PRIV_PERMISSIONS_MODIFY("Permissions.Modify");
|
||||
|
||||
/// Remote.Audit allows reading remote.cfg and sync.cfg entries
|
||||
PRIV_REMOTE_AUDIT("Remote.Audit");
|
||||
/// Remote.Modify allows modifying remote.cfg
|
||||
PRIV_REMOTE_MODIFY("Remote.Modify");
|
||||
/// Remote.Read allows reading data from a configured `Remote`
|
||||
PRIV_REMOTE_READ("Remote.Read");
|
||||
/// Remote.DatastoreBackup allows creating new snapshots on remote datastores
|
||||
PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup");
|
||||
/// Remote.DatastoreModify allows to modify remote datastores
|
||||
PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify");
|
||||
/// Remote.DatastorePrune allows deleting snapshots on remote datastores
|
||||
PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune");
|
||||
|
||||
/// Sys.Console allows access to the system's console
|
||||
PRIV_SYS_CONSOLE("Sys.Console");
|
||||
|
||||
/// Tape.Audit allows reading tape backup configuration and status
|
||||
PRIV_TAPE_AUDIT("Tape.Audit");
|
||||
/// Tape.Modify allows modifying tape backup configuration
|
||||
PRIV_TAPE_MODIFY("Tape.Modify");
|
||||
/// Tape.Write allows writing tape media
|
||||
PRIV_TAPE_WRITE("Tape.Write");
|
||||
/// Tape.Read allows reading tape backup configuration and media contents
|
||||
PRIV_TAPE_READ("Tape.Read");
|
||||
|
||||
/// Realm.Allocate allows viewing, creating, modifying and deleting realms
|
||||
PRIV_REALM_ALLOCATE("Realm.Allocate");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
||||
PRIVILEGES
|
||||
.iter()
|
||||
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
||||
if value & privs != 0 {
|
||||
priv_names.push(name);
|
||||
}
|
||||
priv_names
|
||||
})
|
||||
}
|
||||
|
||||
/// Admin always has all privileges. It can do everything except a few actions
|
||||
/// which are limited to the 'root@pam` superuser
|
||||
pub const ROLE_ADMIN: u64 = u64::MAX;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NO_ACCESS: u64 = 0;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Audit can view configuration and status information, but not modify it.
|
||||
pub const ROLE_AUDIT: u64 = 0
|
||||
| PRIV_SYS_AUDIT
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Admin can do anything on the datastore.
|
||||
pub const ROLE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_MODIFY
|
||||
| PRIV_DATASTORE_READ
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_BACKUP
|
||||
| PRIV_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Reader can read/verify datastore content and do restore
|
||||
pub const ROLE_DATASTORE_READER: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT
|
||||
| PRIV_DATASTORE_VERIFY
|
||||
| PRIV_DATASTORE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Backup can do backup and restore, but no prune.
|
||||
pub const ROLE_DATASTORE_BACKUP: u64 = 0
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.PowerUser can do backup, restore, and prune.
|
||||
pub const ROLE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_DATASTORE_PRUNE
|
||||
| PRIV_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Datastore.Audit can audit the datastore.
|
||||
pub const ROLE_DATASTORE_AUDIT: u64 = 0
|
||||
| PRIV_DATASTORE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Audit can audit the remote
|
||||
pub const ROLE_REMOTE_AUDIT: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.Admin can do anything on the remote.
|
||||
pub const ROLE_REMOTE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_MODIFY
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncOperator can do read and prune on the remote.
|
||||
pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_READ;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.SyncPushOperator can read and push snapshots to the remote.
|
||||
pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots
|
||||
/// and groups but not create or remove namespaces.
|
||||
pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP
|
||||
| PRIV_REMOTE_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots
|
||||
/// and groups, as well as create or remove namespaces.
|
||||
pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0
|
||||
| PRIV_REMOTE_AUDIT
|
||||
| PRIV_REMOTE_DATASTORE_BACKUP
|
||||
| PRIV_REMOTE_DATASTORE_MODIFY
|
||||
| PRIV_REMOTE_DATASTORE_PRUNE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Audit can audit the tape backup configuration and media content
|
||||
pub const ROLE_TAPE_AUDIT: u64 = 0
|
||||
| PRIV_TAPE_AUDIT;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Admin can do anything on the tape backup
|
||||
pub const ROLE_TAPE_ADMIN: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_MODIFY
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Operator can do tape backup and restore (but no configuration changes)
|
||||
pub const ROLE_TAPE_OPERATOR: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ
|
||||
| PRIV_TAPE_WRITE;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(clippy::identity_op)]
|
||||
/// Tape.Reader can do read and inspect tape content
|
||||
pub const ROLE_TAPE_READER: u64 = 0
|
||||
| PRIV_TAPE_AUDIT
|
||||
| PRIV_TAPE_READ;
|
||||
|
||||
/// NoAccess can be used to remove privileges from specific (sub-)paths
|
||||
pub const ROLE_NAME_NO_ACCESS: &str = "NoAccess";
|
||||
|
||||
#[api(
|
||||
type_text: "<role>",
|
||||
)]
|
||||
#[repr(u64)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Enum representing roles via their [PRIVILEGES] combination.
|
||||
///
|
||||
/// Since privileges are implemented as bitflags, each unique combination of privileges maps to a
|
||||
/// single, unique `u64` value that is used in this enum definition.
|
||||
pub enum Role {
|
||||
/// Administrator
|
||||
Admin = ROLE_ADMIN,
|
||||
/// Auditor
|
||||
Audit = ROLE_AUDIT,
|
||||
/// Disable Access
|
||||
NoAccess = ROLE_NO_ACCESS,
|
||||
/// Datastore Administrator
|
||||
DatastoreAdmin = ROLE_DATASTORE_ADMIN,
|
||||
/// Datastore Reader (inspect datastore content and do restores)
|
||||
DatastoreReader = ROLE_DATASTORE_READER,
|
||||
/// Datastore Backup (backup and restore owned backups)
|
||||
DatastoreBackup = ROLE_DATASTORE_BACKUP,
|
||||
/// Datastore PowerUser (backup, restore and prune owned backup)
|
||||
DatastorePowerUser = ROLE_DATASTORE_POWERUSER,
|
||||
/// Datastore Auditor
|
||||
DatastoreAudit = ROLE_DATASTORE_AUDIT,
|
||||
/// Remote Auditor
|
||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||
/// Remote Administrator
|
||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||
/// Synchronization Operator
|
||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||
/// Synchronisation Operator (push direction)
|
||||
RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR,
|
||||
/// Remote Datastore Prune
|
||||
RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER,
|
||||
/// Remote Datastore Admin
|
||||
RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN,
|
||||
/// Tape Auditor
|
||||
TapeAudit = ROLE_TAPE_AUDIT,
|
||||
/// Tape Administrator
|
||||
TapeAdmin = ROLE_TAPE_ADMIN,
|
||||
/// Tape Operator
|
||||
TapeOperator = ROLE_TAPE_OPERATOR,
|
||||
/// Tape Reader
|
||||
TapeReader = ROLE_TAPE_READER,
|
||||
}
|
||||
|
||||
impl FromStr for Role {
|
||||
type Err = value::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Self::deserialize(s.into_deserializer())
|
||||
}
|
||||
}
|
||||
|
||||
pub const ACL_PATH_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&ACL_PATH_REGEX);
|
||||
|
||||
pub const ACL_PATH_SCHEMA: Schema = StringSchema::new("Access control path.")
|
||||
.format(&ACL_PATH_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(128)
|
||||
.schema();
|
||||
|
||||
pub const ACL_PROPAGATE_SCHEMA: Schema =
|
||||
BooleanSchema::new("Allow to propagate (inherit) permissions.")
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' property.")
|
||||
.format(&ApiStringFormat::Enum(&[
|
||||
EnumEntry::new("user", "User"),
|
||||
EnumEntry::new("group", "Group"),
|
||||
]))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
propagate: {
|
||||
schema: ACL_PROPAGATE_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: ACL_PATH_SCHEMA,
|
||||
},
|
||||
ugid_type: {
|
||||
schema: ACL_UGID_TYPE_SCHEMA,
|
||||
},
|
||||
ugid: {
|
||||
type: String,
|
||||
description: "User or Group ID.",
|
||||
},
|
||||
roleid: {
|
||||
type: Role,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ACL list entry.
|
||||
pub struct AclListItem {
|
||||
pub path: String,
|
||||
pub ugid: String,
|
||||
pub ugid_type: String,
|
||||
pub propagate: bool,
|
||||
pub roleid: String,
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, Updater};
|
||||
|
||||
use super::{
|
||||
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// AD realm configuration properties.
|
||||
pub struct AdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// AD server address
|
||||
pub server1: String,
|
||||
/// Fallback AD server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// AD server Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
|
||||
/// overridden if the need arises.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub base_dn: Option<String>,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for AD sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of LDAP attributes to sync from AD to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
@ -1,95 +0,0 @@
|
||||
use std::fmt::{self, Display};
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(default: "encrypt")]
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Defines whether data is encrypted (using an AEAD cipher), only signed, or neither.
|
||||
pub enum CryptMode {
|
||||
/// Don't encrypt.
|
||||
None,
|
||||
/// Encrypt.
|
||||
Encrypt,
|
||||
/// Only sign.
|
||||
SignOnly,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone, Deserialize, Serialize)]
|
||||
#[serde(transparent)]
|
||||
/// 32-byte fingerprint, usually calculated with SHA256.
|
||||
pub struct Fingerprint {
|
||||
#[serde(with = "bytes_as_fingerprint")]
|
||||
bytes: [u8; 32],
|
||||
}
|
||||
|
||||
impl Fingerprint {
|
||||
pub fn new(bytes: [u8; 32]) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
pub fn bytes(&self) -> &[u8; 32] {
|
||||
&self.bytes
|
||||
}
|
||||
pub fn signature(&self) -> String {
|
||||
as_fingerprint(&self.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Display as short key ID
|
||||
impl Display for Fingerprint {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", as_fingerprint(&self.bytes[0..8]))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for Fingerprint {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Error> {
|
||||
let mut tmp = s.to_string();
|
||||
tmp.retain(|c| c != ':');
|
||||
let mut bytes = [0u8; 32];
|
||||
hex::decode_to_slice(&tmp, &mut bytes)?;
|
||||
Ok(Fingerprint::new(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
fn as_fingerprint(bytes: &[u8]) -> String {
|
||||
hex::encode(bytes)
|
||||
.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|v| unsafe { std::str::from_utf8_unchecked(v) }) // it's a hex string
|
||||
.collect::<Vec<&str>>()
|
||||
.join(":")
|
||||
}
|
||||
|
||||
pub mod bytes_as_fingerprint {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<S>(bytes: &[u8; 32], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = super::as_fingerprint(bytes);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// TODO: more efficiently implement with a Visitor implementing visit_str using split() and
|
||||
// hex::decode by-byte
|
||||
let mut s = String::deserialize(deserializer)?;
|
||||
s.retain(|c| c != ':');
|
||||
let mut out = MaybeUninit::<[u8; 32]>::uninit();
|
||||
hex::decode_to_slice(s.as_bytes(), unsafe { &mut (*out.as_mut_ptr())[..] })
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
Ok(unsafe { out.assume_init() })
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,30 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// General status information about a running VM file-restore daemon
|
||||
pub struct RestoreDaemonStatus {
|
||||
/// VM uptime in seconds
|
||||
pub uptime: i64,
|
||||
/// time left until auto-shutdown, keep in mind that this is useless when 'keep-timeout' is
|
||||
/// not set, as then the status call will have reset the timer before returning the value
|
||||
pub timeout: i64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The desired format of the result.
|
||||
pub enum FileRestoreFormat {
|
||||
/// Plain file (only works for single files)
|
||||
Plain,
|
||||
/// PXAR archive
|
||||
Pxar,
|
||||
/// ZIP archive
|
||||
Zip,
|
||||
/// TAR archive
|
||||
Tar,
|
||||
}
|
@ -1,844 +0,0 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::bail;
|
||||
use const_format::concatcp;
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
|
||||
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
|
||||
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
|
||||
pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
|
||||
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
|
||||
pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
|
||||
}
|
||||
|
||||
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SYNC_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run sync job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const GC_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run garbage collection job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const PRUNE_SCHEDULE_SCHEMA: Schema = StringSchema::new("Run prune job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_SCHEDULE_SCHEMA: Schema =
|
||||
StringSchema::new("Run verify job at specified schedule.")
|
||||
.format(&ApiStringFormat::VerifyFn(
|
||||
proxmox_time::verify_calendar_event,
|
||||
))
|
||||
.type_text("<calendar-event>")
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Delete vanished backups. This remove the local copy if the remote backup was deleted.",
|
||||
)
|
||||
.default(false)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"next-run": {
|
||||
description: "Estimated time of the next run (UNIX epoch).",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
"last-run-state": {
|
||||
description: "Result of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-upid": {
|
||||
description: "Task UPID of the last run.",
|
||||
optional: true,
|
||||
type: String,
|
||||
},
|
||||
"last-run-endtime": {
|
||||
description: "Endtime of the last run.",
|
||||
optional: true,
|
||||
type: Integer,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Job Scheduling Status
|
||||
pub struct JobScheduleStatus {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_upid: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// When do we send notifications
|
||||
pub enum Notify {
|
||||
/// Never send notification
|
||||
Never,
|
||||
/// Send notifications for failed and successful jobs
|
||||
Always,
|
||||
/// Send notifications for failed jobs only
|
||||
Error,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
gc: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
verify: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
sync: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
prune: {
|
||||
type: Notify,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Datastore notify settings
|
||||
pub struct DatastoreNotify {
|
||||
/// Garbage collection settings
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub gc: Option<Notify>,
|
||||
/// Verify job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<Notify>,
|
||||
/// Sync job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync: Option<Notify>,
|
||||
/// Prune job setting
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prune: Option<Notify>,
|
||||
}
|
||||
|
||||
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
|
||||
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&DatastoreNotify::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Do not verify backups that are already verified if their verification is not outdated.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
||||
IntegerSchema::new("Days after that a verification becomes outdated. (0 is deprecated)'")
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"ignore-verified": {
|
||||
optional: true,
|
||||
schema: IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
},
|
||||
"outdated-after": {
|
||||
optional: true,
|
||||
schema: VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
optional: true,
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
},
|
||||
"max-depth": {
|
||||
optional: true,
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Verification Job
|
||||
pub struct VerificationJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
/// the datastore ID this verification job affects
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if not set to false, check the age of the last snapshot verification to filter
|
||||
/// out recent ones, depending on 'outdated_after' configuration.
|
||||
pub ignore_verified: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Reverify snapshots after X days, never if 0. Ignored if 'ignore_verified' is false.
|
||||
pub outdated_after: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// on which backup namespace to run the verification recursively
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// how deep the verify should go from the `ns` level downwards. Passing 0 verifies only the
|
||||
/// snapshots on the same level as the passed `ns`, or the datastore root if none.
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
impl VerificationJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: VerificationJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Verification Job
|
||||
pub struct VerificationJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: VerificationJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
drive: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
"eject-media": {
|
||||
description: "Eject media upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"export-media-set": {
|
||||
description: "Export media set upon job completion.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"latest-only": {
|
||||
description: "Backup latest snapshots only.",
|
||||
type: bool,
|
||||
optional: true,
|
||||
},
|
||||
"notify-user": {
|
||||
optional: true,
|
||||
type: Userid,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: crate::NS_MAX_DEPTH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job Setup
|
||||
pub struct TapeBackupJobSetup {
|
||||
pub store: String,
|
||||
pub pool: String,
|
||||
pub drive: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub eject_media: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_media_set: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub latest_only: Option<bool>,
|
||||
/// Send job email notification to this user
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notification_mode: Option<NotificationMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub max_depth: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
setup: {
|
||||
type: TapeBackupJobSetup,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Tape Backup Job
|
||||
pub struct TapeBackupJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
#[serde(flatten)]
|
||||
pub setup: TapeBackupJobSetup,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TapeBackupJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Tape Backup Job
|
||||
pub struct TapeBackupJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: TapeBackupJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
/// Next tape used (best guess)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_media_label: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
|
||||
pub enum FilterType {
|
||||
/// BackupGroup type - either `vm`, `ct`, or `host`.
|
||||
BackupType(BackupType),
|
||||
/// Full identifier of BackupGroup, including type
|
||||
Group(String),
|
||||
/// A regular expression matched against the full identifier of the BackupGroup
|
||||
Regex(Regex),
|
||||
}
|
||||
|
||||
impl PartialEq for FilterType {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
match (self, other) {
|
||||
(Self::BackupType(a), Self::BackupType(b)) => a == b,
|
||||
(Self::Group(a), Self::Group(b)) => a == b,
|
||||
(Self::Regex(a), Self::Regex(b)) => a.as_str() == b.as_str(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for FilterType {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(match s.split_once(':') {
|
||||
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
|
||||
Some(("type", value)) => FilterType::BackupType(value.parse()?),
|
||||
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
|
||||
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
|
||||
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for FilterType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
|
||||
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
|
||||
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct GroupFilter {
|
||||
pub is_exclude: bool,
|
||||
pub filter_type: FilterType,
|
||||
}
|
||||
|
||||
impl PartialEq for GroupFilter {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for GroupFilter {}
|
||||
|
||||
impl std::str::FromStr for GroupFilter {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let (is_exclude, type_str) = match s.split_once(':') {
|
||||
Some(("include", value)) => (false, value),
|
||||
Some(("exclude", value)) => (true, value),
|
||||
_ => (false, s),
|
||||
};
|
||||
|
||||
Ok(GroupFilter {
|
||||
is_exclude,
|
||||
filter_type: type_str.parse()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// used for serializing below, caution!
|
||||
impl std::fmt::Display for GroupFilter {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
if self.is_exclude {
|
||||
f.write_str("exclude:")?;
|
||||
}
|
||||
std::fmt::Display::fmt(&self.filter_type, f)
|
||||
}
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(GroupFilter);
|
||||
proxmox_serde::forward_serialize_to_display!(GroupFilter);
|
||||
|
||||
fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
|
||||
GroupFilter::from_str(input).map(|_| ())
|
||||
}
|
||||
|
||||
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
|
||||
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
|
||||
.format(&ApiStringFormat::VerifyFn(verify_group_filter))
|
||||
.type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
|
||||
.schema();
|
||||
|
||||
pub const GROUP_FILTER_LIST_SCHEMA: Schema =
|
||||
ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
|
||||
|
||||
pub const TRANSFER_LAST_SCHEMA: Schema =
|
||||
IntegerSchema::new("Limit transfer to last N snapshots (per group), skipping others")
|
||||
.minimum(1)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Direction of the sync job, push or pull
|
||||
pub enum SyncDirection {
|
||||
/// Sync direction pull
|
||||
#[default]
|
||||
Pull,
|
||||
/// Sync direction push
|
||||
Push,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SyncDirection {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
SyncDirection::Pull => f.write_str("pull"),
|
||||
SyncDirection::Push => f.write_str("push"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const RESYNC_CORRUPT_SCHEMA: Schema =
|
||||
BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"owner": {
|
||||
type: Authid,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remote-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
schedule: {
|
||||
optional: true,
|
||||
schema: SYNC_SCHEDULE_SCHEMA,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"transfer-last": {
|
||||
schema: TRANSFER_LAST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"resync-corrupt": {
|
||||
schema: RESYNC_CORRUPT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-direction": {
|
||||
type: SyncDirection,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Sync Job
|
||||
pub struct SyncJobConfig {
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
pub store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub owner: Option<Authid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// None implies local sync.
|
||||
pub remote: Option<String>,
|
||||
pub remote_store: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remote_ns: Option<BackupNamespace>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub remove_vanished: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub transfer_last: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub resync_corrupt: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_direction: Option<SyncDirection>,
|
||||
}
|
||||
|
||||
impl SyncJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
match self.ns.as_ref() {
|
||||
Some(ns) => ns.acl_path(&self.store),
|
||||
None => vec!["datastore", &self.store],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remote_acl_path(&self) -> Option<Vec<&str>> {
|
||||
let remote = self.remote.as_ref()?;
|
||||
match &self.remote_ns {
|
||||
Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)),
|
||||
None => Some(vec!["remote", remote, &self.remote_store]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: SyncJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of Sync Job
|
||||
pub struct SyncJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: SyncJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
||||
|
||||
/// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
|
||||
/// call to prune a specific group, where `max-depth` makes no sense.
|
||||
#[api(
|
||||
properties: {
|
||||
"keep-last": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_LAST,
|
||||
optional: true,
|
||||
},
|
||||
"keep-hourly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_HOURLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-daily": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_DAILY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-weekly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_WEEKLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-monthly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_MONTHLY,
|
||||
optional: true,
|
||||
},
|
||||
"keep-yearly": {
|
||||
schema: crate::PRUNE_SCHEMA_KEEP_YEARLY,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct KeepOptions {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_last: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_hourly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_daily: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_weekly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_monthly: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub keep_yearly: Option<u64>,
|
||||
}
|
||||
|
||||
impl KeepOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep_last.unwrap_or(0)
|
||||
+ self.keep_hourly.unwrap_or(0)
|
||||
+ self.keep_daily.unwrap_or(0)
|
||||
+ self.keep_weekly.unwrap_or(0)
|
||||
+ self.keep_monthly.unwrap_or(0)
|
||||
+ self.keep_yearly.unwrap_or(0)
|
||||
> 0
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
keep: {
|
||||
type: KeepOptions,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Common pruning options
|
||||
pub struct PruneJobOptions {
|
||||
#[serde(flatten)]
|
||||
pub keep: KeepOptions,
|
||||
|
||||
/// The (optional) recursion depth
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_depth: Option<usize>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
}
|
||||
|
||||
impl PruneJobOptions {
|
||||
pub fn keeps_something(&self) -> bool {
|
||||
self.keep.keeps_something()
|
||||
}
|
||||
|
||||
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
|
||||
match &self.ns {
|
||||
Some(ns) => ns.acl_path(store),
|
||||
None => vec!["datastore", store],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
disable: {
|
||||
type: Boolean,
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
id: {
|
||||
schema: JOB_ID_SCHEMA,
|
||||
},
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
schedule: {
|
||||
schema: PRUNE_SCHEDULE_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
options: {
|
||||
type: PruneJobOptions,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Prune configuration.
|
||||
pub struct PruneJobConfig {
|
||||
/// unique ID to address this job
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
|
||||
pub store: String,
|
||||
|
||||
/// Disable this job.
|
||||
#[serde(default, skip_serializing_if = "is_false")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
pub disable: bool,
|
||||
|
||||
pub schedule: String,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
|
||||
#[serde(flatten)]
|
||||
pub options: PruneJobOptions,
|
||||
}
|
||||
|
||||
impl PruneJobConfig {
|
||||
pub fn acl_path(&self) -> Vec<&str> {
|
||||
self.options.acl_path(&self.store)
|
||||
}
|
||||
}
|
||||
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: PruneJobConfig,
|
||||
},
|
||||
status: {
|
||||
type: JobScheduleStatus,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Status of prune job
|
||||
pub struct PruneJobStatus {
|
||||
#[serde(flatten)]
|
||||
pub config: PruneJobConfig,
|
||||
#[serde(flatten)]
|
||||
pub status: JobScheduleStatus,
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
use crate::CERT_FINGERPRINT_SHA256_SCHEMA;
|
||||
|
||||
#[api(default: "scrypt")]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Key derivation function for password protected encryption keys.
|
||||
pub enum Kdf {
|
||||
/// Do not encrypt the key.
|
||||
None,
|
||||
/// Encrypt they key with a password using SCrypt.
|
||||
Scrypt,
|
||||
/// Encrtypt the Key with a password using PBKDF2
|
||||
PBKDF2,
|
||||
}
|
||||
|
||||
impl Default for Kdf {
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Kdf::Scrypt
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kdf: {
|
||||
type: Kdf,
|
||||
},
|
||||
fingerprint: {
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Encryption Key Information
|
||||
pub struct KeyInfo {
|
||||
/// Path to key (if stored in a file)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub path: Option<String>,
|
||||
pub kdf: Kdf,
|
||||
/// Key creation time
|
||||
pub created: i64,
|
||||
/// Key modification time
|
||||
pub modified: i64,
|
||||
/// Key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
/// Password hint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub hint: Option<String>,
|
||||
}
|
@ -1,208 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
/// LDAP connection type
|
||||
pub enum LdapMode {
|
||||
/// Plaintext LDAP connection
|
||||
#[serde(rename = "ldap")]
|
||||
#[default]
|
||||
Ldap,
|
||||
/// Secure STARTTLS connection
|
||||
#[serde(rename = "ldap+starttls")]
|
||||
StartTls,
|
||||
/// Secure LDAPS connection
|
||||
#[serde(rename = "ldaps")]
|
||||
Ldaps,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// LDAP configuration properties.
|
||||
pub struct LdapRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// LDAP server address
|
||||
pub server1: String,
|
||||
/// Fallback LDAP server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
pub base_dn: String,
|
||||
/// Username attribute. Used to map a ``userid`` to LDAP to an LDAP ``dn``.
|
||||
pub user_attr: String,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for LDAP sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of attributes to sync from LDAP to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"remove-vanished": {
|
||||
optional: true,
|
||||
schema: REMOVE_VANISHED_SCHEMA,
|
||||
},
|
||||
},
|
||||
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Default options for LDAP synchronization runs
|
||||
pub struct SyncDefaultsOptions {
|
||||
/// How to handle vanished properties/users
|
||||
pub remove_vanished: Option<String>,
|
||||
/// Enable new users after sync
|
||||
pub enable_new: Option<bool>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// remove-vanished options
|
||||
pub enum RemoveVanished {
|
||||
/// Delete ACLs for vanished users
|
||||
Acl,
|
||||
/// Remove vanished users
|
||||
Entry,
|
||||
/// Remove vanished properties from users (e.g. email)
|
||||
Properties,
|
||||
}
|
||||
|
||||
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
|
||||
|
||||
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncDefaultsOptions::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
const REMOVE_VANISHED_DESCRIPTION: &str =
|
||||
"A semicolon-separated list of things to remove when they or the user \
|
||||
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
|
||||
user when not returned from the sync; ``properties`` removes any \
|
||||
properties on existing user that do not appear in the source. \
|
||||
``acl`` removes ACLs when the user is not returned from the sync.";
|
||||
|
||||
pub const REMOVE_VANISHED_SCHEMA: Schema = StringSchema::new(REMOVE_VANISHED_DESCRIPTION)
|
||||
.format(&ApiStringFormat::PropertyString(&REMOVE_VANISHED_ARRAY))
|
||||
.schema();
|
||||
|
||||
pub const REMOVE_VANISHED_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of remove-vanished options",
|
||||
&RemoveVanished::API_SCHEMA,
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Updater, Default, Debug)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Determine which LDAP attributes should be synced to which user attributes
|
||||
pub struct SyncAttributes {
|
||||
/// Name of the LDAP attribute containing the user's email address
|
||||
pub email: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's first name
|
||||
pub firstname: Option<String>,
|
||||
/// Name of the LDAP attribute containing the user's last name
|
||||
pub lastname: Option<String>,
|
||||
}
|
||||
|
||||
const SYNC_ATTRIBUTES_TEXT: &str = "Comma-separated list of key=value pairs for specifying \
|
||||
which LDAP attributes map to which PBS user field. For example, \
|
||||
to map the LDAP attribute ``mail`` to PBS's ``email``, write \
|
||||
``email=mail``.";
|
||||
|
||||
pub const SYNC_ATTRIBUTES_SCHEMA: Schema = StringSchema::new(SYNC_ATTRIBUTES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&SyncAttributes::API_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
pub const USER_CLASSES_ARRAY: Schema = ArraySchema::new(
|
||||
"Array of user classes",
|
||||
&StringSchema::new("user class").schema(),
|
||||
)
|
||||
.min_length(1)
|
||||
.schema();
|
||||
|
||||
const USER_CLASSES_TEXT: &str = "Comma-separated list of allowed objectClass values for \
|
||||
user synchronization. For instance, if ``user-classes`` is set to ``person,user``, \
|
||||
then user synchronization will consider all LDAP entities \
|
||||
where ``objectClass: person`` `or` ``objectClass: user``.";
|
||||
|
||||
pub const USER_CLASSES_SCHEMA: Schema = StringSchema::new(USER_CLASSES_TEXT)
|
||||
.format(&ApiStringFormat::PropertyString(&USER_CLASSES_ARRAY))
|
||||
.default("inetorgperson,posixaccount,person,user")
|
||||
.schema();
|
@ -1,373 +0,0 @@
|
||||
//! Basic API types used by most of the PBS code.
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod percent_encoding;
|
||||
|
||||
use proxmox_schema::{
|
||||
api, const_regex, ApiStringFormat, ApiType, ArraySchema, ReturnType, Schema, StringSchema,
|
||||
};
|
||||
use proxmox_time::parse_daily_duration;
|
||||
|
||||
use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
|
||||
|
||||
pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
|
||||
pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
|
||||
pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
|
||||
pub use proxmox_schema::api_types::{
|
||||
GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
|
||||
pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
|
||||
|
||||
pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
|
||||
pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
|
||||
pub use proxmox_schema::api_types::NODE_SCHEMA;
|
||||
pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
|
||||
pub use proxmox_schema::api_types::{
|
||||
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
|
||||
};
|
||||
pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
|
||||
pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
|
||||
pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
|
||||
pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
|
||||
|
||||
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
|
||||
|
||||
// re-export APT API types
|
||||
pub use proxmox_apt_api_types::{
|
||||
APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile,
|
||||
APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository,
|
||||
APTUpdateInfo, APTUpdateOptions,
|
||||
};
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_RE: &str =
|
||||
concatcp!("(?:",
|
||||
"(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
|
||||
")?");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_NS_PATH_RE: &str =
|
||||
concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
|
||||
);
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
|
||||
concatcp!(
|
||||
r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
|
||||
);
|
||||
|
||||
mod acl;
|
||||
pub use acl::*;
|
||||
|
||||
mod datastore;
|
||||
pub use datastore::*;
|
||||
|
||||
mod jobs;
|
||||
pub use jobs::*;
|
||||
|
||||
mod key_derivation;
|
||||
pub use key_derivation::{Kdf, KeyInfo};
|
||||
|
||||
mod maintenance;
|
||||
pub use maintenance::*;
|
||||
|
||||
mod network;
|
||||
pub use network::*;
|
||||
|
||||
mod node;
|
||||
pub use node::*;
|
||||
|
||||
pub use proxmox_auth_api::types as userid;
|
||||
pub use proxmox_auth_api::types::{Authid, Userid};
|
||||
pub use proxmox_auth_api::types::{Realm, RealmRef};
|
||||
pub use proxmox_auth_api::types::{Tokenname, TokennameRef};
|
||||
pub use proxmox_auth_api::types::{Username, UsernameRef};
|
||||
pub use proxmox_auth_api::types::{
|
||||
PROXMOX_GROUP_ID_SCHEMA, PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA,
|
||||
};
|
||||
|
||||
#[macro_use]
|
||||
mod user;
|
||||
pub use user::*;
|
||||
|
||||
pub use proxmox_schema::upid::*;
|
||||
|
||||
mod crypto;
|
||||
pub use crypto::{bytes_as_fingerprint, CryptMode, Fingerprint};
|
||||
|
||||
pub mod file_restore;
|
||||
|
||||
mod openid;
|
||||
pub use openid::*;
|
||||
|
||||
mod ldap;
|
||||
pub use ldap::*;
|
||||
|
||||
mod ad;
|
||||
pub use ad::*;
|
||||
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
|
||||
mod pathpatterns;
|
||||
pub use pathpatterns::*;
|
||||
|
||||
mod tape;
|
||||
pub use tape::*;
|
||||
|
||||
mod traffic_control;
|
||||
pub use traffic_control::*;
|
||||
|
||||
mod zfs;
|
||||
pub use zfs::*;
|
||||
|
||||
mod metrics;
|
||||
pub use metrics::*;
|
||||
|
||||
mod version;
|
||||
pub use version::*;
|
||||
|
||||
const_regex! {
|
||||
// just a rough check - dummy acceptor is used before persisting
|
||||
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
|
||||
|
||||
pub BACKUP_REPO_URL_REGEX = concatcp!(
|
||||
r"^^(?:(?:(",
|
||||
USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
|
||||
")@)?(",
|
||||
DNS_NAME_STR, "|", IPRE_BRACKET_STR,
|
||||
"):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
|
||||
);
|
||||
|
||||
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
|
||||
}
|
||||
|
||||
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
|
||||
|
||||
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
|
||||
|
||||
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
|
||||
|
||||
pub const SEARCH_DOMAIN_SCHEMA: Schema =
|
||||
StringSchema::new("Search domain for host-name lookup.").schema();
|
||||
|
||||
pub const FIRST_DNS_SERVER_SCHEMA: Schema = StringSchema::new("First name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const SECOND_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Second name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server IP address.")
|
||||
.format(&IP_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
|
||||
StringSchema::new("OpenSSL ciphersuites list used by the proxy for TLS 1.3")
|
||||
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(8)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
|
||||
StringSchema::new("Proxmox Backup Server subscription key.")
|
||||
.format(&SUBSCRIPTION_KEY_FORMAT)
|
||||
.min_length(15)
|
||||
.max_length(16)
|
||||
.schema();
|
||||
|
||||
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
|
||||
"Prevent changes if current configuration file has different \
|
||||
SHA256 digest. This can be used to prevent concurrent \
|
||||
modifications.",
|
||||
)
|
||||
.format(&PVE_CONFIG_DIGEST_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// API schema format definition for repository URLs
|
||||
pub const BACKUP_REPO_URL: ApiStringFormat = ApiStringFormat::Pattern(&BACKUP_REPO_URL_REGEX);
|
||||
|
||||
// Complex type definitions
|
||||
|
||||
#[api()]
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
/// Storage space usage information.
|
||||
pub struct StorageStatus {
|
||||
/// Total space (bytes).
|
||||
pub total: u64,
|
||||
/// Used space (bytes).
|
||||
pub used: u64,
|
||||
/// Available space (bytes).
|
||||
pub avail: u64,
|
||||
}
|
||||
|
||||
pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Node Power command type.
|
||||
pub enum NodePowerCommand {
|
||||
/// Restart the server
|
||||
Reboot,
|
||||
/// Shutdown the server
|
||||
Shutdown,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Eq, PartialEq, Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The state (result) of a finished worker task.
|
||||
pub enum TaskStateType {
|
||||
/// Ok
|
||||
OK,
|
||||
/// Warning
|
||||
Warning,
|
||||
/// Error
|
||||
Error,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
upid: { schema: UPID::API_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// Task properties.
|
||||
pub struct TaskListItem {
|
||||
pub upid: String,
|
||||
/// The node name where the task is running on.
|
||||
pub node: String,
|
||||
/// The Unix PID
|
||||
pub pid: i64,
|
||||
/// The task start time (Epoch)
|
||||
pub pstart: u64,
|
||||
/// The task start time (Epoch)
|
||||
pub starttime: i64,
|
||||
/// Worker type (arbitrary ASCII string)
|
||||
pub worker_type: String,
|
||||
/// Worker ID (arbitrary ASCII string)
|
||||
pub worker_id: Option<String>,
|
||||
/// The authenticated entity who started the task
|
||||
pub user: String,
|
||||
/// The task end time (Epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub endtime: Option<i64>,
|
||||
/// Task end status
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
optional: false,
|
||||
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
|
||||
};
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// type of the realm
|
||||
pub enum RealmType {
|
||||
/// The PAM realm
|
||||
Pam,
|
||||
/// The PBS realm
|
||||
Pbs,
|
||||
/// An OpenID Connect realm
|
||||
OpenId,
|
||||
/// An LDAP realm
|
||||
Ldap,
|
||||
/// An Active Directory (AD) realm
|
||||
Ad,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(RealmType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(RealmType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: RealmType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic Information about a realm
|
||||
pub struct BasicRealmInfo {
|
||||
pub realm: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: RealmType,
|
||||
/// True if it is the default realm
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub default: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::borrow::Cow;
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
const_regex! {
|
||||
pub MAINTENANCE_MESSAGE_REGEX = r"^[[:^cntrl:]]*$";
|
||||
}
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&MAINTENANCE_MESSAGE_REGEX);
|
||||
|
||||
pub const MAINTENANCE_MESSAGE_SCHEMA: Schema =
|
||||
StringSchema::new("Message describing the reason for the maintenance.")
|
||||
.format(&MAINTENANCE_MESSAGE_FORMAT)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
/// Operation requirements, used when checking for maintenance mode.
|
||||
pub enum Operation {
|
||||
/// for any read operation like backup restore or RRD metric collection
|
||||
Read,
|
||||
/// for any write/delete operation, like backup create or GC
|
||||
Write,
|
||||
/// for any purely logical operation on the in-memory state of the datastore, e.g., to check if
|
||||
/// some mutex could be locked (e.g., GC already running?)
|
||||
///
|
||||
/// NOTE: one must *not* do any IO operations when only helding this Op state
|
||||
Lookup,
|
||||
// GarbageCollect or Delete?
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Maintenance type.
|
||||
pub enum MaintenanceType {
|
||||
// TODO:
|
||||
// - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate
|
||||
// operation, so that one can enable a mode where nothing new can be added but stuff can be
|
||||
// cleaned
|
||||
/// Only read operations are allowed on the datastore.
|
||||
ReadOnly,
|
||||
/// Neither read nor write operations are allowed on the datastore.
|
||||
Offline,
|
||||
/// The datastore is being deleted.
|
||||
Delete,
|
||||
/// The (removable) datastore is being unmounted.
|
||||
Unmount,
|
||||
}
|
||||
serde_plain::derive_display_from_serialize!(MaintenanceType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
type: {
|
||||
type: MaintenanceType,
|
||||
},
|
||||
message: {
|
||||
optional: true,
|
||||
schema: MAINTENANCE_MESSAGE_SCHEMA,
|
||||
}
|
||||
},
|
||||
default_key: "type",
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Maintenance mode
|
||||
pub struct MaintenanceMode {
|
||||
/// Type of maintenance ("read-only" or "offline").
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MaintenanceType,
|
||||
|
||||
/// Reason for maintenance.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
impl MaintenanceMode {
|
||||
/// Used for deciding whether the datastore is cleared from the internal cache
|
||||
pub fn clear_from_cache(&self) -> bool {
|
||||
self.ty == MaintenanceType::Offline
|
||||
|| self.ty == MaintenanceType::Delete
|
||||
|| self.ty == MaintenanceType::Unmount
|
||||
}
|
||||
|
||||
pub fn check(&self, operation: Option<Operation>) -> Result<(), Error> {
|
||||
if self.ty == MaintenanceType::Delete {
|
||||
bail!("datastore is being deleted");
|
||||
}
|
||||
|
||||
let message = percent_encoding::percent_decode_str(self.message.as_deref().unwrap_or(""))
|
||||
.decode_utf8()
|
||||
.unwrap_or(Cow::Borrowed(""));
|
||||
|
||||
if let Some(Operation::Lookup) = operation {
|
||||
return Ok(());
|
||||
} else if self.ty == MaintenanceType::Unmount {
|
||||
bail!("datastore is being unmounted");
|
||||
} else if self.ty == MaintenanceType::Offline {
|
||||
bail!("offline maintenance mode: {}", message);
|
||||
} else if self.ty == MaintenanceType::ReadOnly {
|
||||
if let Some(Operation::Write) = operation {
|
||||
bail!("read-only maintenance mode: {}", message);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,255 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::{
|
||||
HOST_PORT_SCHEMA, HTTP_URL_SCHEMA, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
use proxmox_schema::{api, Schema, StringSchema, Updater};
|
||||
|
||||
pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
|
||||
.min_length(1)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
|
||||
.min_length(1)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
fn return_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn is_true(b: &bool) -> bool {
|
||||
*b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
host: {
|
||||
schema: HOST_PORT_SCHEMA,
|
||||
},
|
||||
mtu: {
|
||||
type: u16,
|
||||
optional: true,
|
||||
default: 1500,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (UDP)
|
||||
pub struct InfluxDbUdp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// the host + port
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The MTU
|
||||
pub mtu: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
url: {
|
||||
schema: HTTP_URL_SCHEMA,
|
||||
},
|
||||
token: {
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bucket: {
|
||||
schema: INFLUXDB_BUCKET_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
organization: {
|
||||
schema: INFLUXDB_ORGANIZATION_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-body-size": {
|
||||
type: usize,
|
||||
optional: true,
|
||||
default: 25_000_000,
|
||||
},
|
||||
"verify-tls": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// InfluxDB Server (HTTP(s))
|
||||
pub struct InfluxDbHttp {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(default = "return_true", skip_serializing_if = "is_true")]
|
||||
#[updater(serde(skip_serializing_if = "Option::is_none"))]
|
||||
/// Enables or disables the metrics server
|
||||
pub enable: bool,
|
||||
/// The base url of the influxdb server
|
||||
pub url: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) API token
|
||||
pub token: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Named location where time series data is stored
|
||||
pub bucket: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Workspace for a group of users
|
||||
pub organization: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) maximum body size
|
||||
pub max_body_size: Option<usize>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// If true, the certificate will be validated.
|
||||
pub verify_tls: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
/// Type of the metric server
|
||||
pub enum MetricServerType {
|
||||
/// InfluxDB HTTP
|
||||
#[serde(rename = "influxdb-http")]
|
||||
InfluxDbHttp,
|
||||
/// InfluxDB UDP
|
||||
#[serde(rename = "influxdb-udp")]
|
||||
InfluxDbUdp,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: METRIC_SERVER_ID_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: MetricServerType,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Basic information about a metric server that's available for all types
|
||||
pub struct MetricServerInfo {
|
||||
pub name: String,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricServerType,
|
||||
/// Enables or disables the metrics server
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
/// The target server
|
||||
pub server: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[api(
|
||||
properties: {
|
||||
data: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: MetricDataPoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Return type for the metric API endpoint
|
||||
pub struct Metrics {
|
||||
/// List of metric data points, sorted by timestamp
|
||||
pub data: Vec<MetricDataPoint>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
type: String,
|
||||
},
|
||||
metric: {
|
||||
type: String,
|
||||
},
|
||||
timestamp: {
|
||||
type: Integer,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Metric data point
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct MetricDataPoint {
|
||||
/// Unique identifier for this metric object, for instance `node/<nodename>`
|
||||
/// or `qemu/<vmid>`.
|
||||
pub id: String,
|
||||
|
||||
/// Name of the metric.
|
||||
pub metric: String,
|
||||
|
||||
/// Time at which this metric was observed
|
||||
pub timestamp: i64,
|
||||
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricDataType,
|
||||
|
||||
/// Metric value.
|
||||
pub value: f64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Type of the metric.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum MetricDataType {
|
||||
/// gauge.
|
||||
Gauge,
|
||||
/// counter.
|
||||
Counter,
|
||||
/// derive.
|
||||
Derive,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(MetricDataType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MetricDataType);
|
@ -1,345 +0,0 @@
|
||||
use std::fmt;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
CIDR_FORMAT, CIDR_V4_FORMAT, CIDR_V6_FORMAT, IP_FORMAT, IP_V4_FORMAT, IP_V6_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX,
|
||||
};
|
||||
|
||||
pub const NETWORK_INTERFACE_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const IP_V4_SCHEMA: Schema = StringSchema::new("IPv4 address.")
|
||||
.format(&IP_V4_FORMAT)
|
||||
.max_length(15)
|
||||
.schema();
|
||||
|
||||
pub const IP_V6_SCHEMA: Schema = StringSchema::new("IPv6 address.")
|
||||
.format(&IP_V6_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const IP_SCHEMA: Schema = StringSchema::new("IP (IPv4 or IPv6) address.")
|
||||
.format(&IP_FORMAT)
|
||||
.max_length(39)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V4_SCHEMA: Schema = StringSchema::new("IPv4 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V4_FORMAT)
|
||||
.max_length(18)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_V6_SCHEMA: Schema = StringSchema::new("IPv6 address with netmask (CIDR notation).")
|
||||
.format(&CIDR_V6_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
pub const CIDR_SCHEMA: Schema =
|
||||
StringSchema::new("IP address (IPv4 or IPv6) with netmask (CIDR notation).")
|
||||
.format(&CIDR_FORMAT)
|
||||
.max_length(43)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Interface configuration method
|
||||
pub enum NetworkConfigMethod {
|
||||
/// Configuration is done manually using other tools
|
||||
Manual,
|
||||
/// Define interfaces with statically allocated addresses.
|
||||
Static,
|
||||
/// Obtain an address via DHCP
|
||||
DHCP,
|
||||
/// Define the loopback interface.
|
||||
Loopback,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Linux Bond Mode
|
||||
pub enum LinuxBondMode {
|
||||
/// Round-robin policy
|
||||
BalanceRr = 0,
|
||||
/// Active-backup policy
|
||||
ActiveBackup = 1,
|
||||
/// XOR policy
|
||||
BalanceXor = 2,
|
||||
/// Broadcast policy
|
||||
Broadcast = 3,
|
||||
/// IEEE 802.3ad Dynamic link aggregation
|
||||
#[serde(rename = "802.3ad")]
|
||||
Ieee802_3ad = 4,
|
||||
/// Adaptive transmit load balancing
|
||||
BalanceTlb = 5,
|
||||
/// Adaptive load balancing
|
||||
BalanceAlb = 6,
|
||||
}
|
||||
|
||||
impl fmt::Display for LinuxBondMode {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
LinuxBondMode::BalanceRr => "balance-rr",
|
||||
LinuxBondMode::ActiveBackup => "active-backup",
|
||||
LinuxBondMode::BalanceXor => "balance-xor",
|
||||
LinuxBondMode::Broadcast => "broadcast",
|
||||
LinuxBondMode::Ieee802_3ad => "802.3ad",
|
||||
LinuxBondMode::BalanceTlb => "balance-tlb",
|
||||
LinuxBondMode::BalanceAlb => "balance-alb",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[repr(u8)]
|
||||
/// Bond Transmit Hash Policy for LACP (802.3ad)
|
||||
pub enum BondXmitHashPolicy {
|
||||
/// Layer 2
|
||||
Layer2 = 0,
|
||||
/// Layer 2+3
|
||||
#[serde(rename = "layer2+3")]
|
||||
Layer2_3 = 1,
|
||||
/// Layer 3+4
|
||||
#[serde(rename = "layer3+4")]
|
||||
Layer3_4 = 2,
|
||||
}
|
||||
|
||||
impl fmt::Display for BondXmitHashPolicy {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str(match self {
|
||||
BondXmitHashPolicy::Layer2 => "layer2",
|
||||
BondXmitHashPolicy::Layer2_3 => "layer2+3",
|
||||
BondXmitHashPolicy::Layer3_4 => "layer3+4",
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Network interface type
|
||||
pub enum NetworkInterfaceType {
|
||||
/// Loopback
|
||||
Loopback,
|
||||
/// Physical Ethernet device
|
||||
Eth,
|
||||
/// Linux Bridge
|
||||
Bridge,
|
||||
/// Linux Bond
|
||||
Bond,
|
||||
/// Linux VLAN (eth.10)
|
||||
Vlan,
|
||||
/// Interface Alias (eth:1)
|
||||
Alias,
|
||||
/// Unknown interface type
|
||||
Unknown,
|
||||
}
|
||||
|
||||
pub const NETWORK_INTERFACE_NAME_SCHEMA: Schema = StringSchema::new("Network interface name.")
|
||||
.format(&NETWORK_INTERFACE_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(15) // libc::IFNAMSIZ-1
|
||||
.schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Network interface list.", &NETWORK_INTERFACE_NAME_SCHEMA).schema();
|
||||
|
||||
pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
|
||||
StringSchema::new("A list of network devices, comma separated.")
|
||||
.format(&ApiStringFormat::PropertyString(
|
||||
&NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
},
|
||||
"type": {
|
||||
type: NetworkInterfaceType,
|
||||
},
|
||||
method: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
method6: {
|
||||
type: NetworkConfigMethod,
|
||||
optional: true,
|
||||
},
|
||||
cidr: {
|
||||
schema: CIDR_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
cidr6: {
|
||||
schema: CIDR_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway: {
|
||||
schema: IP_V4_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
gateway6: {
|
||||
schema: IP_V6_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
options: {
|
||||
description: "Option list (inet)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
options6: {
|
||||
description: "Option list (inet6)",
|
||||
type: Array,
|
||||
items: {
|
||||
description: "Optional attribute line.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
comments: {
|
||||
description: "Comments (inet, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
comments6: {
|
||||
description: "Comments (inet6, may span multiple lines)",
|
||||
type: String,
|
||||
optional: true,
|
||||
},
|
||||
bridge_ports: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
slaves: {
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-id": {
|
||||
description: "VLAN ID.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-raw-device": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
},
|
||||
"bond-primary": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_xmit_hash_policy: {
|
||||
type: BondXmitHashPolicy,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
|
||||
/// Network Interface configuration
|
||||
pub struct Interface {
|
||||
/// Autostart interface
|
||||
#[serde(rename = "autostart")]
|
||||
pub autostart: bool,
|
||||
/// Interface is active (UP)
|
||||
pub active: bool,
|
||||
/// Interface name
|
||||
pub name: String,
|
||||
/// Interface type
|
||||
#[serde(rename = "type")]
|
||||
pub interface_type: NetworkInterfaceType,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub method6: Option<NetworkConfigMethod>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 address with netmask
|
||||
pub cidr: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv4 gateway
|
||||
pub gateway: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 address with netmask
|
||||
pub cidr6: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// IPv6 gateway
|
||||
pub gateway6: Option<String>,
|
||||
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options: Vec<String>,
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub options6: Vec<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comments6: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Maximum Transmission Unit
|
||||
pub mtu: Option<u64>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_ports: Option<Vec<String>>,
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-id")]
|
||||
pub vlan_id: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-raw-device")]
|
||||
pub vlan_raw_device: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_mode: Option<LinuxBondMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "bond-primary")]
|
||||
pub bond_primary: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
|
||||
}
|
||||
|
||||
impl Interface {
|
||||
pub fn new(name: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
interface_type: NetworkInterfaceType::Unknown,
|
||||
autostart: false,
|
||||
active: false,
|
||||
method: None,
|
||||
method6: None,
|
||||
cidr: None,
|
||||
gateway: None,
|
||||
cidr6: None,
|
||||
gateway6: None,
|
||||
options: Vec::new(),
|
||||
options6: Vec::new(),
|
||||
comments: None,
|
||||
comments6: None,
|
||||
mtu: None,
|
||||
bridge_ports: None,
|
||||
bridge_vlan_aware: None,
|
||||
vlan_id: None,
|
||||
vlan_raw_device: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
bond_xmit_hash_policy: None,
|
||||
}
|
||||
}
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
use std::ffi::OsStr;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::StorageStatus;
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node memory usage counters
|
||||
pub struct NodeMemoryCounters {
|
||||
/// Total memory
|
||||
pub total: u64,
|
||||
/// Used memory
|
||||
pub used: u64,
|
||||
/// Free memory
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Node swap usage counters
|
||||
pub struct NodeSwapCounters {
|
||||
/// Total swap
|
||||
pub total: u64,
|
||||
/// Used swap
|
||||
pub used: u64,
|
||||
/// Free swap
|
||||
pub free: u64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Contains general node information such as the fingerprint`
|
||||
pub struct NodeInformation {
|
||||
/// The SSL Fingerprint
|
||||
pub fingerprint: String,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The current kernel version (output of `uname`)
|
||||
pub struct KernelVersionInformation {
|
||||
/// The systemname/nodename
|
||||
pub sysname: String,
|
||||
/// The kernel release number
|
||||
pub release: String,
|
||||
/// The kernel version
|
||||
pub version: String,
|
||||
/// The machine architecture
|
||||
pub machine: String,
|
||||
}
|
||||
|
||||
impl KernelVersionInformation {
|
||||
pub fn from_uname_parts(
|
||||
sysname: &OsStr,
|
||||
release: &OsStr,
|
||||
version: &OsStr,
|
||||
machine: &OsStr,
|
||||
) -> Self {
|
||||
KernelVersionInformation {
|
||||
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
|
||||
release: release.to_str().map(String::from).unwrap_or_default(),
|
||||
version: version.to_str().map(String::from).unwrap_or_default(),
|
||||
machine: machine.to_str().map(String::from).unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_legacy(&self) -> String {
|
||||
format!("{} {} {}", self.sysname, self.release, self.version)
|
||||
}
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The possible BootModes
|
||||
pub enum BootMode {
|
||||
/// The BootMode is EFI/UEFI
|
||||
Efi,
|
||||
/// The BootMode is Legacy BIOS
|
||||
LegacyBios,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Holds the Bootmodes
|
||||
pub struct BootModeInformation {
|
||||
/// The BootMode, either Efi or Bios
|
||||
pub mode: BootMode,
|
||||
/// SecureBoot status
|
||||
pub secureboot: bool,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Information about the CPU
|
||||
pub struct NodeCpuInformation {
|
||||
/// The CPU model
|
||||
pub model: String,
|
||||
/// The number of CPU sockets
|
||||
pub sockets: usize,
|
||||
/// The number of CPU cores (incl. threads)
|
||||
pub cpus: usize,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
memory: {
|
||||
type: NodeMemoryCounters,
|
||||
},
|
||||
root: {
|
||||
type: StorageStatus,
|
||||
},
|
||||
swap: {
|
||||
type: NodeSwapCounters,
|
||||
},
|
||||
loadavg: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: Number,
|
||||
description: "the load",
|
||||
}
|
||||
},
|
||||
cpuinfo: {
|
||||
type: NodeCpuInformation,
|
||||
},
|
||||
info: {
|
||||
type: NodeInformation,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// The Node status
|
||||
pub struct NodeStatus {
|
||||
pub memory: NodeMemoryCounters,
|
||||
pub root: StorageStatus,
|
||||
pub swap: NodeSwapCounters,
|
||||
/// The current uptime of the server.
|
||||
pub uptime: u64,
|
||||
/// Load for 1, 5 and 15 minutes.
|
||||
pub loadavg: [f64; 3],
|
||||
/// The current kernel version (NEW struct type).
|
||||
pub current_kernel: KernelVersionInformation,
|
||||
/// The current kernel version (LEGACY string type).
|
||||
pub kversion: String,
|
||||
/// Total CPU usage since last query.
|
||||
pub cpu: f64,
|
||||
/// Total IO wait since last query.
|
||||
pub wait: f64,
|
||||
pub cpuinfo: NodeCpuInformation,
|
||||
pub info: NodeInformation,
|
||||
/// Current boot mode
|
||||
pub boot_info: BootModeInformation,
|
||||
}
|
@ -1,120 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::{
|
||||
GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
|
||||
|
||||
pub const OPENID_SCOPE_SCHEMA: Schema = StringSchema::new("OpenID Scope Name.")
|
||||
.format(&OPENID_SCOPE_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_SCOPE_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId Scopes.", &OPENID_SCOPE_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_SCOPE_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_SCOPE_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_DEFAILT_SCOPE_LIST: &str = "email profile";
|
||||
pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope List")
|
||||
.format(&OPENID_SCOPE_LIST_FORMAT)
|
||||
.default(OPENID_DEFAILT_SCOPE_LIST)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
|
||||
|
||||
pub const OPENID_ACR_SCHEMA: Schema =
|
||||
StringSchema::new("OpenID Authentication Context Class Reference.")
|
||||
.format(&OPENID_ACR_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
|
||||
ArraySchema::new("Array of OpenId ACRs.", &OPENID_ACR_SCHEMA).schema();
|
||||
|
||||
pub const OPENID_ACR_LIST_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::PropertyString(&OPENID_ACR_ARRAY_SCHEMA);
|
||||
|
||||
pub const OPENID_ACR_LIST_SCHEMA: Schema = StringSchema::new("OpenID ACR List")
|
||||
.format(&OPENID_ACR_LIST_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const OPENID_USERNAME_CLAIM_SCHEMA: Schema = StringSchema::new(
|
||||
"Use the value of this attribute/claim as unique user name. It \
|
||||
is up to the identity provider to guarantee the uniqueness. The \
|
||||
OpenID specification only guarantees that Subject ('sub') is \
|
||||
unique. Also make sure that the user is not allowed to change that \
|
||||
attribute by himself!",
|
||||
)
|
||||
.max_length(64)
|
||||
.min_length(1)
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"client-key": {
|
||||
optional: true,
|
||||
},
|
||||
"scopes": {
|
||||
schema: OPENID_SCOPE_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"acr-values": {
|
||||
schema: OPENID_ACR_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
prompt: {
|
||||
description: "OpenID Prompt",
|
||||
type: String,
|
||||
format: &PROXMOX_SAFE_ID_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
autocreate: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"username-claim": {
|
||||
schema: OPENID_USERNAME_CLAIM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// OpenID configuration properties.
|
||||
pub struct OpenIdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// OpenID Issuer Url
|
||||
pub issuer_url: String,
|
||||
/// OpenID Client ID
|
||||
pub client_id: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub scopes: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub acr_values: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub prompt: Option<String>,
|
||||
/// OpenID Client Key
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub client_key: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Automatically create users if they do not exist.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub autocreate: Option<bool>,
|
||||
#[updater(skip)]
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub username_claim: Option<String>,
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const_regex! {
|
||||
pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$");
|
||||
}
|
||||
|
||||
pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX);
|
||||
|
||||
pub const PATH_PATTERN_SCHEMA: Schema =
|
||||
StringSchema::new("Path or match pattern for matching filenames.")
|
||||
.format(&PATH_PATTERN_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
/// Path or path pattern for filename matching
|
||||
pub struct PathPattern {
|
||||
pattern: String,
|
||||
}
|
||||
|
||||
impl ApiType for PathPattern {
|
||||
const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA;
|
||||
}
|
||||
|
||||
impl AsRef<[u8]> for PathPattern {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.pattern.as_bytes()
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
use percent_encoding::{utf8_percent_encode, AsciiSet};
|
||||
|
||||
/// This used to be: `SIMPLE_ENCODE_SET` plus space, `"`, `#`, `<`, `>`, backtick, `?`, `{`, `}`
|
||||
pub const DEFAULT_ENCODE_SET: &AsciiSet = &percent_encoding::CONTROLS // 0..1f and 7e
|
||||
// The SIMPLE_ENCODE_SET adds space and anything >= 0x7e (7e itself is already included above)
|
||||
.add(0x20)
|
||||
.add(0x7f)
|
||||
// the DEFAULT_ENCODE_SET added:
|
||||
.add(b' ')
|
||||
.add(b'"')
|
||||
.add(b'#')
|
||||
.add(b'<')
|
||||
.add(b'>')
|
||||
.add(b'`')
|
||||
.add(b'?')
|
||||
.add(b'{')
|
||||
.add(b'}');
|
||||
|
||||
/// percent encode a url component
|
||||
pub fn percent_encode_component(comp: &str) -> String {
|
||||
utf8_percent_encode(comp, percent_encoding::NON_ALPHANUMERIC).to_string()
|
||||
}
|
@ -1,106 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::*;
|
||||
use proxmox_schema::*;
|
||||
|
||||
pub const REMOTE_PASSWORD_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host.")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_PASSWORD_BASE64_SCHEMA: Schema =
|
||||
StringSchema::new("Password or auth token for remote host (stored as base64 string).")
|
||||
.format(&PASSWORD_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(1024)
|
||||
.schema();
|
||||
|
||||
pub const REMOTE_ID_SCHEMA: Schema = StringSchema::new("Remote ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
host: {
|
||||
schema: DNS_NAME_OR_IP_SCHEMA,
|
||||
},
|
||||
port: {
|
||||
optional: true,
|
||||
description: "The (optional) port",
|
||||
type: u16,
|
||||
},
|
||||
"auth-id": {
|
||||
type: Authid,
|
||||
},
|
||||
fingerprint: {
|
||||
optional: true,
|
||||
schema: CERT_FINGERPRINT_SHA256_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote configuration properties.
|
||||
pub struct RemoteConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
pub host: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
pub auth_id: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
password: {
|
||||
schema: REMOTE_PASSWORD_BASE64_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct Remote {
|
||||
pub name: String,
|
||||
// Note: The stored password is base64 encoded
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
#[serde(with = "proxmox_serde::string_as_base64")]
|
||||
pub password: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
config: {
|
||||
type: RemoteConfig,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Remote properties.
|
||||
pub struct RemoteWithoutPassword {
|
||||
pub name: String,
|
||||
#[serde(flatten)]
|
||||
pub config: RemoteConfig,
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
//! Types for tape changer API
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{
|
||||
api, ApiStringFormat, ArraySchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const CHANGER_NAME_SCHEMA: Schema = StringSchema::new("Tape Changer Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SCSI_CHANGER_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("Path to Linux generic SCSI device (e.g. '/dev/sg4')").schema();
|
||||
|
||||
pub const MEDIA_LABEL_SCHEMA: Schema = StringSchema::new("Media Label/Barcode.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const SLOT_ARRAY_SCHEMA: Schema = ArraySchema::new(
|
||||
"Slot list.",
|
||||
&IntegerSchema::new("Slot number").minimum(1).schema(),
|
||||
)
|
||||
.schema();
|
||||
|
||||
pub const EXPORT_SLOT_LIST_SCHEMA: Schema = StringSchema::new(
|
||||
"\
|
||||
A list of slot numbers, comma separated. Those slots are reserved for
|
||||
Import/Export, i.e. any media in those slots are considered to be
|
||||
'offline'.
|
||||
",
|
||||
)
|
||||
.format(&ApiStringFormat::PropertyString(&SLOT_ARRAY_SCHEMA))
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: SCSI_CHANGER_PATH_SCHEMA,
|
||||
},
|
||||
"export-slots": {
|
||||
schema: EXPORT_SLOT_LIST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"eject-before-unload": {
|
||||
optional: true,
|
||||
default: false,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// SCSI tape changer
|
||||
pub struct ScsiTapeChanger {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub export_slots: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// if set to true, tapes are ejected manually before unloading
|
||||
pub eject_before_unload: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: ScsiTapeChanger,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Changer config with optional device identification attributes
|
||||
pub struct ChangerListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: ScsiTapeChanger,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Entry Kind
|
||||
pub enum MtxEntryKind {
|
||||
/// Drive
|
||||
Drive,
|
||||
/// Slot
|
||||
Slot,
|
||||
/// Import/Export Slot
|
||||
ImportExport,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"entry-kind": {
|
||||
type: MtxEntryKind,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Mtx Status Entry
|
||||
pub struct MtxStatusEntry {
|
||||
pub entry_kind: MtxEntryKind,
|
||||
/// The ID of the slot or drive
|
||||
pub entry_id: u64,
|
||||
/// The media label (volume tag) if the slot/drive is full
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub label_text: Option<String>,
|
||||
/// The slot the drive was loaded from
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub loaded_slot: Option<u64>,
|
||||
/// The current state of the drive
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Optional Device Identification Attributes
|
||||
pub struct OptionalDeviceIdentification {
|
||||
/// Vendor (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub vendor: Option<String>,
|
||||
/// Model (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub model: Option<String>,
|
||||
/// Serial number (autodetected)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub serial: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Kind of device
|
||||
pub enum DeviceKind {
|
||||
/// Tape changer (Autoloader, Robot)
|
||||
Changer,
|
||||
/// Normal SCSI tape device
|
||||
Tape,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
kind: {
|
||||
type: DeviceKind,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
/// Tape device information
|
||||
pub struct TapeDeviceInfo {
|
||||
pub kind: DeviceKind,
|
||||
/// Path to the linux device node
|
||||
pub path: String,
|
||||
/// Serial number (autodetected)
|
||||
pub serial: String,
|
||||
/// Vendor (autodetected)
|
||||
pub vendor: String,
|
||||
/// Model (autodetected)
|
||||
pub model: String,
|
||||
/// Device major number
|
||||
pub major: u32,
|
||||
/// Device minor number
|
||||
pub minor: u32,
|
||||
}
|
@ -1,350 +0,0 @@
|
||||
//! Types for tape drive API
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{OptionalDeviceIdentification, CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const DRIVE_NAME_SCHEMA: Schema = StringSchema::new("Drive Identifier.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const LTO_DRIVE_PATH_SCHEMA: Schema =
|
||||
StringSchema::new("The path to a LTO SCSI-generic tape device (i.e. '/dev/sg0')").schema();
|
||||
|
||||
pub const CHANGER_DRIVENUM_SCHEMA: Schema =
|
||||
IntegerSchema::new("Associated changer drive number (requires option changer)")
|
||||
.minimum(0)
|
||||
.maximum(255)
|
||||
.default(0)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
}
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Simulate tape drives (only for test and debug)
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct VirtualTapeDrive {
|
||||
pub name: String,
|
||||
/// Path to directory
|
||||
pub path: String,
|
||||
/// Virtual tape size
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub max_size: Option<usize>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: DRIVE_NAME_SCHEMA,
|
||||
},
|
||||
path: {
|
||||
schema: LTO_DRIVE_PATH_SCHEMA,
|
||||
},
|
||||
changer: {
|
||||
schema: CHANGER_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"changer-drivenum": {
|
||||
schema: CHANGER_DRIVENUM_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Lto SCSI tape driver
|
||||
pub struct LtoTapeDrive {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
pub path: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub changer_drivenum: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: LtoTapeDrive,
|
||||
},
|
||||
info: {
|
||||
type: OptionalDeviceIdentification,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive list entry
|
||||
pub struct DriveListEntry {
|
||||
#[serde(flatten)]
|
||||
pub config: LtoTapeDrive,
|
||||
#[serde(flatten)]
|
||||
pub info: OptionalDeviceIdentification,
|
||||
/// the state of the drive if locked
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
/// Medium auxiliary memory attributes (MAM)
|
||||
pub struct MamAttribute {
|
||||
/// Attribute id
|
||||
pub id: u16,
|
||||
/// Attribute name
|
||||
pub name: String,
|
||||
/// Attribute value
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
|
||||
/// The density of a tape medium, derived from the LTO version.
|
||||
pub enum TapeDensity {
|
||||
/// Unknown (no media loaded)
|
||||
Unknown,
|
||||
/// LTO1
|
||||
LTO1,
|
||||
/// LTO2
|
||||
LTO2,
|
||||
/// LTO3
|
||||
LTO3,
|
||||
/// LTO4
|
||||
LTO4,
|
||||
/// LTO5
|
||||
LTO5,
|
||||
/// LTO6
|
||||
LTO6,
|
||||
/// LTO7
|
||||
LTO7,
|
||||
/// LTO7M8
|
||||
LTO7M8,
|
||||
/// LTO8
|
||||
LTO8,
|
||||
/// LTO9
|
||||
LTO9,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for TapeDensity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
let density = match value {
|
||||
0x00 => TapeDensity::Unknown,
|
||||
0x40 => TapeDensity::LTO1,
|
||||
0x42 => TapeDensity::LTO2,
|
||||
0x44 => TapeDensity::LTO3,
|
||||
0x46 => TapeDensity::LTO4,
|
||||
0x58 => TapeDensity::LTO5,
|
||||
0x5a => TapeDensity::LTO6,
|
||||
0x5c => TapeDensity::LTO7,
|
||||
0x5d => TapeDensity::LTO7M8,
|
||||
0x5e => TapeDensity::LTO8,
|
||||
0x60 => TapeDensity::LTO9,
|
||||
_ => bail!("unknown tape density code 0x{:02x}", value),
|
||||
};
|
||||
Ok(density)
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
density: {
|
||||
type: TapeDensity,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Drive/Media status for Lto SCSI drives.
|
||||
///
|
||||
/// Media related data is optional - only set if there is a medium
|
||||
/// loaded.
|
||||
pub struct LtoDriveAndMediaStatus {
|
||||
/// Vendor
|
||||
pub vendor: String,
|
||||
/// Product
|
||||
pub product: String,
|
||||
/// Revision
|
||||
pub revision: String,
|
||||
/// Block size (0 is variable size)
|
||||
pub blocksize: u32,
|
||||
/// Compression enabled
|
||||
pub compression: bool,
|
||||
/// Drive buffer mode
|
||||
pub buffer_mode: u8,
|
||||
/// Tape density
|
||||
pub density: TapeDensity,
|
||||
/// Media is write protected
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub write_protect: Option<bool>,
|
||||
/// Tape Alert Flags
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alert_flags: Option<String>,
|
||||
/// Current file number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub file_number: Option<u64>,
|
||||
/// Current block number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub block_number: Option<u64>,
|
||||
/// Medium Manufacture Date (epoch)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub manufactured: Option<i64>,
|
||||
/// Total Bytes Read in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_read: Option<u64>,
|
||||
/// Total Bytes Written in Medium Life
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bytes_written: Option<u64>,
|
||||
/// Number of mounts for the current volume (i.e., Thread Count)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub volume_mounts: Option<u64>,
|
||||
/// Count of the total number of times the medium has passed over
|
||||
/// the head.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_passes: Option<u64>,
|
||||
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_wearout: Option<f64>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub drive_activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
/// Volume statistics from SCSI log page 17h
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Lp17VolumeStatistics {
|
||||
/// Volume mounts (thread count)
|
||||
pub volume_mounts: u64,
|
||||
/// Total data sets written
|
||||
pub volume_datasets_written: u64,
|
||||
/// Write retries
|
||||
pub volume_recovered_write_data_errors: u64,
|
||||
/// Total unrecovered write errors
|
||||
pub volume_unrecovered_write_data_errors: u64,
|
||||
/// Total suspended writes
|
||||
pub volume_write_servo_errors: u64,
|
||||
/// Total fatal suspended writes
|
||||
pub volume_unrecovered_write_servo_errors: u64,
|
||||
/// Total datasets read
|
||||
pub volume_datasets_read: u64,
|
||||
/// Total read retries
|
||||
pub volume_recovered_read_errors: u64,
|
||||
/// Total unrecovered read errors
|
||||
pub volume_unrecovered_read_errors: u64,
|
||||
/// Last mount unrecovered write errors
|
||||
pub last_mount_unrecovered_write_errors: u64,
|
||||
/// Last mount unrecovered read errors
|
||||
pub last_mount_unrecovered_read_errors: u64,
|
||||
/// Last mount bytes written
|
||||
pub last_mount_bytes_written: u64,
|
||||
/// Last mount bytes read
|
||||
pub last_mount_bytes_read: u64,
|
||||
/// Lifetime bytes written
|
||||
pub lifetime_bytes_written: u64,
|
||||
/// Lifetime bytes read
|
||||
pub lifetime_bytes_read: u64,
|
||||
/// Last load write compression ratio
|
||||
pub last_load_write_compression_ratio: u64,
|
||||
/// Last load read compression ratio
|
||||
pub last_load_read_compression_ratio: u64,
|
||||
/// Medium mount time
|
||||
pub medium_mount_time: u64,
|
||||
/// Medium ready time
|
||||
pub medium_ready_time: u64,
|
||||
/// Total native capacity
|
||||
pub total_native_capacity: u64,
|
||||
/// Total used native capacity
|
||||
pub total_used_native_capacity: u64,
|
||||
/// Write protect
|
||||
pub write_protect: bool,
|
||||
/// Volume is WORM
|
||||
pub worm: bool,
|
||||
/// Beginning of medium passes
|
||||
pub beginning_of_medium_passes: u64,
|
||||
/// Middle of medium passes
|
||||
pub middle_of_tape_passes: u64,
|
||||
/// Volume serial number
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
/// The DT Device Activity from DT Device Status LP page
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DeviceActivity {
|
||||
/// No activity
|
||||
NoActivity,
|
||||
/// Cleaning
|
||||
Cleaning,
|
||||
/// Loading
|
||||
Loading,
|
||||
/// Unloading
|
||||
Unloading,
|
||||
/// Other unspecified activity
|
||||
Other,
|
||||
/// Reading
|
||||
Reading,
|
||||
/// Writing
|
||||
Writing,
|
||||
/// Locating
|
||||
Locating,
|
||||
/// Rewinding
|
||||
Rewinding,
|
||||
/// Erasing
|
||||
Erasing,
|
||||
/// Formatting
|
||||
Formatting,
|
||||
/// Calibrating
|
||||
Calibrating,
|
||||
/// Other (DT)
|
||||
OtherDT,
|
||||
/// Updating microcode
|
||||
MicrocodeUpdate,
|
||||
/// Reading encrypted data
|
||||
ReadingEncrypted,
|
||||
/// Writing encrypted data
|
||||
WritingEncrypted,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for DeviceActivity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
Ok(match value {
|
||||
0x00 => DeviceActivity::NoActivity,
|
||||
0x01 => DeviceActivity::Cleaning,
|
||||
0x02 => DeviceActivity::Loading,
|
||||
0x03 => DeviceActivity::Unloading,
|
||||
0x04 => DeviceActivity::Other,
|
||||
0x05 => DeviceActivity::Reading,
|
||||
0x06 => DeviceActivity::Writing,
|
||||
0x07 => DeviceActivity::Locating,
|
||||
0x08 => DeviceActivity::Rewinding,
|
||||
0x09 => DeviceActivity::Erasing,
|
||||
0x0A => DeviceActivity::Formatting,
|
||||
0x0B => DeviceActivity::Calibrating,
|
||||
0x0C => DeviceActivity::OtherDT,
|
||||
0x0D => DeviceActivity::MicrocodeUpdate,
|
||||
0x0E => DeviceActivity::ReadingEncrypted,
|
||||
0x0F => DeviceActivity::WritingEncrypted,
|
||||
other => bail!("invalid DT device activity value: {:x}", other),
|
||||
})
|
||||
}
|
||||
}
|
@ -1,179 +0,0 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{MediaLocation, MediaStatus, UUID_FORMAT};
|
||||
|
||||
pub const MEDIA_SET_UUID_SCHEMA: Schema = StringSchema::new(
|
||||
"MediaSet Uuid (We use the all-zero Uuid to reseve an empty media for a specific pool).",
|
||||
)
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_UUID_SCHEMA: Schema = StringSchema::new("Media Uuid.")
|
||||
.format(&UUID_FORMAT)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media Set list entry
|
||||
pub struct MediaSetListEntry {
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
location: {
|
||||
type: MediaLocation,
|
||||
},
|
||||
status: {
|
||||
type: MediaStatus,
|
||||
},
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media list entry
|
||||
pub struct MediaListEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
pub uuid: Uuid,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
pub location: MediaLocation,
|
||||
pub status: MediaStatus,
|
||||
/// Expired flag
|
||||
pub expired: bool,
|
||||
/// Catalog status OK
|
||||
pub catalog: bool,
|
||||
/// Media set name
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_name: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// Media set seq_nr
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Media Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Bytes currently used
|
||||
pub bytes_used: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media label info
|
||||
pub struct MediaIdFlat {
|
||||
/// Unique ID
|
||||
pub uuid: Uuid,
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Creation time stamp
|
||||
pub ctime: i64,
|
||||
// All MediaSet properties are optional here
|
||||
/// MediaSet Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_uuid: Option<Uuid>,
|
||||
/// MediaSet media sequence number
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub seq_nr: Option<u64>,
|
||||
/// MediaSet Creation time stamp
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub media_set_ctime: Option<i64>,
|
||||
/// Encryption key fingerprint
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encryption_key_fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Label with optional Uuid
|
||||
pub struct LabelUuidMap {
|
||||
/// Changer label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Associated Uuid (if any)
|
||||
pub uuid: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
uuid: {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
},
|
||||
"media-set-uuid": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Media content list entry
|
||||
pub struct MediaContentEntry {
|
||||
/// Media label text (or Barcode)
|
||||
pub label_text: String,
|
||||
/// Media Uuid
|
||||
pub uuid: Uuid,
|
||||
/// Media set name
|
||||
pub media_set_name: String,
|
||||
/// Media set uuid
|
||||
pub media_set_uuid: Uuid,
|
||||
/// MediaSet Creation time stamp
|
||||
pub media_set_ctime: i64,
|
||||
/// Media set seq_nr
|
||||
pub seq_nr: u64,
|
||||
/// Media Pool
|
||||
pub pool: String,
|
||||
/// Datastore Name
|
||||
pub store: String,
|
||||
/// Backup snapshot
|
||||
pub snapshot: String,
|
||||
/// Snapshot creation time (epoch)
|
||||
pub backup_time: i64,
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_schema::{ApiStringFormat, Schema, StringSchema};
|
||||
|
||||
use crate::{CHANGER_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT};
|
||||
|
||||
pub const VAULT_NAME_SCHEMA: Schema = StringSchema::new("Vault name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
/// Media location
|
||||
pub enum MediaLocation {
|
||||
/// Ready for use (inside tape library)
|
||||
Online(String),
|
||||
/// Local available, but need to be mounted (insert into tape
|
||||
/// drive)
|
||||
Offline,
|
||||
/// Media is inside a Vault
|
||||
Vault(String),
|
||||
}
|
||||
|
||||
proxmox_serde::forward_deserialize_to_from_str!(MediaLocation);
|
||||
proxmox_serde::forward_serialize_to_display!(MediaLocation);
|
||||
|
||||
impl proxmox_schema::ApiType for MediaLocation {
|
||||
const API_SCHEMA: Schema = StringSchema::new(
|
||||
"Media location (e.g. 'offline', 'online-<changer_name>', 'vault-<vault_name>')",
|
||||
)
|
||||
.format(&ApiStringFormat::VerifyFn(|text| {
|
||||
let location: MediaLocation = text.parse()?;
|
||||
match location {
|
||||
MediaLocation::Online(ref changer) => {
|
||||
CHANGER_NAME_SCHEMA.parse_simple_value(changer)?;
|
||||
}
|
||||
MediaLocation::Vault(ref vault) => {
|
||||
VAULT_NAME_SCHEMA.parse_simple_value(vault)?;
|
||||
}
|
||||
MediaLocation::Offline => { /* OK */ }
|
||||
}
|
||||
Ok(())
|
||||
}))
|
||||
.schema();
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MediaLocation {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
MediaLocation::Offline => {
|
||||
write!(f, "offline")
|
||||
}
|
||||
MediaLocation::Online(changer) => {
|
||||
write!(f, "online-{}", changer)
|
||||
}
|
||||
MediaLocation::Vault(vault) => {
|
||||
write!(f, "vault-{}", vault)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaLocation {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "offline" {
|
||||
return Ok(MediaLocation::Offline);
|
||||
}
|
||||
if let Some(changer) = s.strip_prefix("online-") {
|
||||
return Ok(MediaLocation::Online(changer.to_string()));
|
||||
}
|
||||
if let Some(vault) = s.strip_prefix("vault-") {
|
||||
return Ok(MediaLocation::Vault(vault.to_string()));
|
||||
}
|
||||
|
||||
bail!("MediaLocation parse error");
|
||||
}
|
||||
}
|
@ -1,161 +0,0 @@
|
||||
//! Types for tape media pool API
|
||||
//!
|
||||
//! Note: Both MediaSetPolicy and RetentionPolicy are complex enums,
|
||||
//! so we cannot use them directly for the API. Instead, we represent
|
||||
//! them as String.
|
||||
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, ApiStringFormat, Schema, StringSchema, Updater};
|
||||
|
||||
use proxmox_time::{CalendarEvent, TimeSpan};
|
||||
|
||||
use crate::{
|
||||
PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const MEDIA_POOL_NAME_SCHEMA: Schema = StringSchema::new("Media pool name.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_NAMING_TEMPLATE_SCHEMA: Schema = StringSchema::new(
|
||||
"Media set naming template (may contain strftime() time format specifications).",
|
||||
)
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
MediaSetPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_SET_ALLOCATION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media set allocation policy ('continue', 'always', or a calendar event).")
|
||||
.format(&MEDIA_SET_ALLOCATION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media set allocation policy
|
||||
pub enum MediaSetPolicy {
|
||||
/// Try to use the current media set
|
||||
ContinueCurrent,
|
||||
/// Each backup job creates a new media set
|
||||
AlwaysCreate,
|
||||
/// Create a new set when the specified CalendarEvent triggers
|
||||
CreateAt(CalendarEvent),
|
||||
}
|
||||
|
||||
impl std::str::FromStr for MediaSetPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "continue" {
|
||||
return Ok(MediaSetPolicy::ContinueCurrent);
|
||||
}
|
||||
if s == "always" {
|
||||
return Ok(MediaSetPolicy::AlwaysCreate);
|
||||
}
|
||||
|
||||
let event = s.parse()?;
|
||||
|
||||
Ok(MediaSetPolicy::CreateAt(event))
|
||||
}
|
||||
}
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|s| {
|
||||
RetentionPolicy::from_str(s)?;
|
||||
Ok(())
|
||||
});
|
||||
|
||||
pub const MEDIA_RETENTION_POLICY_SCHEMA: Schema =
|
||||
StringSchema::new("Media retention policy ('overwrite', 'keep', or time span).")
|
||||
.format(&MEDIA_RETENTION_POLICY_FORMAT)
|
||||
.schema();
|
||||
|
||||
/// Media retention Policy
|
||||
pub enum RetentionPolicy {
|
||||
/// Always overwrite media
|
||||
OverwriteAlways,
|
||||
/// Protect data for the timespan specified
|
||||
ProtectFor(TimeSpan),
|
||||
/// Never overwrite data
|
||||
KeepForever,
|
||||
}
|
||||
|
||||
impl std::str::FromStr for RetentionPolicy {
|
||||
type Err = Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if s == "overwrite" {
|
||||
return Ok(RetentionPolicy::OverwriteAlways);
|
||||
}
|
||||
if s == "keep" {
|
||||
return Ok(RetentionPolicy::KeepForever);
|
||||
}
|
||||
|
||||
let time_span = s.parse()?;
|
||||
|
||||
Ok(RetentionPolicy::ProtectFor(time_span))
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
},
|
||||
allocation: {
|
||||
schema: MEDIA_SET_ALLOCATION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
retention: {
|
||||
schema: MEDIA_RETENTION_POLICY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
template: {
|
||||
schema: MEDIA_SET_NAMING_TEMPLATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
encrypt: {
|
||||
schema: TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
/// Media pool configuration
|
||||
pub struct MediaPoolConfig {
|
||||
/// The pool name
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
/// Media Set allocation policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub allocation: Option<String>,
|
||||
/// Media retention policy
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub retention: Option<String>,
|
||||
/// Media set naming template (default "%c")
|
||||
///
|
||||
/// The template is UTF8 text, and can include strftime time
|
||||
/// format specifications.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub template: Option<String>,
|
||||
/// Encryption key fingerprint
|
||||
///
|
||||
/// If set, encrypt all data using the specified key.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub encrypt: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
@ -1,21 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api()]
|
||||
/// Media status
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Media Status
|
||||
pub enum MediaStatus {
|
||||
/// Media is ready to be written
|
||||
Writable,
|
||||
/// Media is full (contains data)
|
||||
Full,
|
||||
/// Media is marked as unknown, needs rescan
|
||||
Unknown,
|
||||
/// Media is marked as damaged
|
||||
Damaged,
|
||||
/// Media is marked as retired
|
||||
Retired,
|
||||
}
|
@ -1,92 +0,0 @@
|
||||
//! Types for tape backup API
|
||||
|
||||
mod device;
|
||||
pub use device::*;
|
||||
|
||||
mod changer;
|
||||
pub use changer::*;
|
||||
|
||||
mod drive;
|
||||
pub use drive::*;
|
||||
|
||||
mod media_pool;
|
||||
pub use media_pool::*;
|
||||
|
||||
mod media_status;
|
||||
pub use media_status::*;
|
||||
|
||||
mod media_location;
|
||||
|
||||
pub use media_location::*;
|
||||
|
||||
mod media;
|
||||
pub use media::*;
|
||||
|
||||
use const_format::concatcp;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
|
||||
}
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
|
||||
ApiStringFormat::Pattern(&TAPE_RESTORE_SNAPSHOT_REGEX);
|
||||
|
||||
pub const TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA: Schema =
|
||||
StringSchema::new("Tape encryption key fingerprint (sha256).")
|
||||
.format(&FINGERPRINT_SHA256_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TAPE_RESTORE_SNAPSHOT_SCHEMA: Schema =
|
||||
StringSchema::new("A snapshot in the format: 'store:[ns/namespace/...]type/id/time")
|
||||
.format(&TAPE_RESTORE_SNAPSHOT_FORMAT)
|
||||
.type_text("store:[ns/namespace/...]type/id/time")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
pool: {
|
||||
schema: MEDIA_POOL_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"label-text": {
|
||||
schema: MEDIA_LABEL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media": {
|
||||
schema: MEDIA_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"media-set": {
|
||||
schema: MEDIA_SET_UUID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"backup-type": {
|
||||
type: BackupType,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Content list filter parameters
|
||||
pub struct MediaContentListFilter {
|
||||
pub pool: Option<String>,
|
||||
pub label_text: Option<String>,
|
||||
pub media: Option<Uuid>,
|
||||
pub media_set: Option<Uuid>,
|
||||
pub backup_type: Option<BackupType>,
|
||||
pub backup_id: Option<String>,
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
|
||||
StringSchema::new("Timeframe to specify when the rule is active.")
|
||||
.format(&DAILY_DURATION_FORMAT)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"rate-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-in": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"rate-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
"burst-out": {
|
||||
type: HumanByte,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone, Updater, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Rate Limit Configuration
|
||||
pub struct RateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_in: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub rate_out: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub burst_out: Option<HumanByte>,
|
||||
}
|
||||
|
||||
impl RateLimitConfig {
|
||||
pub fn with_same_inout(rate: Option<HumanByte>, burst: Option<HumanByte>) -> Self {
|
||||
Self {
|
||||
rate_in: rate,
|
||||
burst_in: burst,
|
||||
rate_out: rate,
|
||||
burst_out: burst,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a [RateLimitConfig] from a [ClientRateLimitConfig]
|
||||
pub fn from_client_config(limit: ClientRateLimitConfig) -> Self {
|
||||
Self::with_same_inout(limit.rate, limit.burst)
|
||||
}
|
||||
}
|
||||
|
||||
const CLIENT_RATE_LIMIT_SCHEMA: Schema = HumanByte::API_SCHEMA
|
||||
.unwrap_string_schema_cloned()
|
||||
.description("Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).")
|
||||
.schema();
|
||||
|
||||
const CLIENT_BURST_SCHEMA: Schema = HumanByte::API_SCHEMA
|
||||
.unwrap_string_schema_cloned()
|
||||
.description("Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).")
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
rate: {
|
||||
schema: CLIENT_RATE_LIMIT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
burst: {
|
||||
schema: CLIENT_BURST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Client Rate Limit Configuration
|
||||
pub struct ClientRateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
rate: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
burst: Option<HumanByte>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: {
|
||||
schema: TRAFFIC_CONTROL_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
limit: {
|
||||
type: RateLimitConfig,
|
||||
},
|
||||
network: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: CIDR_SCHEMA,
|
||||
},
|
||||
},
|
||||
timeframe: {
|
||||
type: Array,
|
||||
items: {
|
||||
schema: TRAFFIC_CONTROL_TIMEFRAME_SCHEMA,
|
||||
},
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule
|
||||
pub struct TrafficControlRule {
|
||||
#[updater(skip)]
|
||||
pub name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Rule applies to Source IPs within this networks
|
||||
pub network: Vec<String>,
|
||||
#[serde(flatten)]
|
||||
pub limit: RateLimitConfig,
|
||||
// fixme: expose this?
|
||||
// /// Bandwidth is shared across all connections
|
||||
// #[serde(skip_serializing_if="Option::is_none")]
|
||||
// pub shared: Option<bool>,
|
||||
/// Enable the rule at specific times
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub timeframe: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
config: {
|
||||
type: TrafficControlRule,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Traffic control rule config with current rates
|
||||
pub struct TrafficControlCurrentRate {
|
||||
#[serde(flatten)]
|
||||
pub config: TrafficControlRule,
|
||||
/// Current ingress rate in bytes/second
|
||||
pub cur_rate_in: u64,
|
||||
/// Current egress rate in bytes/second
|
||||
pub cur_rate_out: u64,
|
||||
}
|
@ -1,226 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, BooleanSchema, IntegerSchema, Schema, StringSchema, Updater};
|
||||
|
||||
use super::userid::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||
use super::{SINGLE_LINE_COMMENT_FORMAT, SINGLE_LINE_COMMENT_SCHEMA};
|
||||
|
||||
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Enable the account (default). You can set this to '0' to disable the account.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Account expiration date (seconds since epoch). '0' means no expiration date.",
|
||||
)
|
||||
.default(0)
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
tokens: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "List of user's API tokens.",
|
||||
items: {
|
||||
type: ApiToken
|
||||
},
|
||||
},
|
||||
"totp-locked": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
description: "True if the user is currently locked out of TOTP factors",
|
||||
},
|
||||
"tfa-locked-until": {
|
||||
optional: true,
|
||||
description: "Contains a timestamp until when a user is locked out of 2nd factors",
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// User properties with added list of ApiTokens
|
||||
pub struct UserWithTokens {
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub tokens: Vec<ApiToken>,
|
||||
#[serde(skip_serializing_if = "bool_is_false", default)]
|
||||
pub totp_locked: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tfa_locked_until: Option<i64>,
|
||||
}
|
||||
|
||||
fn bool_is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
tokenid: {
|
||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ApiToken properties.
|
||||
pub struct ApiToken {
|
||||
pub tokenid: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
}
|
||||
|
||||
impl ApiToken {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq)]
|
||||
/// User properties.
|
||||
pub struct User {
|
||||
#[updater(skip)]
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
@ -1,190 +0,0 @@
|
||||
//! Defines the types for the api version info endpoint
|
||||
use std::cmp::Ordering;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use anyhow::{format_err, Context};
|
||||
|
||||
use proxmox_schema::api;
|
||||
|
||||
#[api(
|
||||
description: "Api version information",
|
||||
properties: {
|
||||
"version": {
|
||||
description: "Version 'major.minor'",
|
||||
type: String,
|
||||
},
|
||||
"release": {
|
||||
description: "Version release",
|
||||
type: String,
|
||||
},
|
||||
"repoid": {
|
||||
description: "Version repository id",
|
||||
type: String,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(serde::Deserialize, serde::Serialize)]
|
||||
pub struct ApiVersionInfo {
|
||||
pub version: String,
|
||||
pub release: String,
|
||||
pub repoid: String,
|
||||
}
|
||||
|
||||
pub type ApiVersionMajor = u64;
|
||||
pub type ApiVersionMinor = u64;
|
||||
pub type ApiVersionRelease = u64;
|
||||
|
||||
#[derive(PartialEq, Eq)]
|
||||
pub struct ApiVersion {
|
||||
pub major: ApiVersionMajor,
|
||||
pub minor: ApiVersionMinor,
|
||||
pub release: ApiVersionRelease,
|
||||
}
|
||||
|
||||
impl TryFrom<ApiVersionInfo> for ApiVersion {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_from(value: ApiVersionInfo) -> Result<Self, Self::Error> {
|
||||
let (major, minor) = value
|
||||
.version
|
||||
.split_once('.')
|
||||
.ok_or_else(|| format_err!("malformed API version {}", value.version))?;
|
||||
|
||||
let major: ApiVersionMajor = major
|
||||
.parse()
|
||||
.with_context(|| "failed to parse major version")?;
|
||||
let minor: ApiVersionMinor = minor
|
||||
.parse()
|
||||
.with_context(|| "failed to parse minor version")?;
|
||||
let release: ApiVersionRelease = value
|
||||
.release
|
||||
.parse()
|
||||
.with_context(|| "failed to parse release version")?;
|
||||
|
||||
Ok(Self {
|
||||
major,
|
||||
minor,
|
||||
release,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for ApiVersion {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
let ordering = match (
|
||||
self.major.cmp(&other.major),
|
||||
self.minor.cmp(&other.minor),
|
||||
self.release.cmp(&other.release),
|
||||
) {
|
||||
(Ordering::Equal, Ordering::Equal, ordering) => ordering,
|
||||
(Ordering::Equal, ordering, _) => ordering,
|
||||
(ordering, _, _) => ordering,
|
||||
};
|
||||
|
||||
Some(ordering)
|
||||
}
|
||||
}
|
||||
|
||||
impl ApiVersion {
|
||||
pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self {
|
||||
Self {
|
||||
major,
|
||||
minor,
|
||||
release,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn same_level_version_comarison() {
|
||||
let major_base = ApiVersion::new(2, 0, 0);
|
||||
let major_less = ApiVersion::new(1, 0, 0);
|
||||
let major_greater = ApiVersion::new(3, 0, 0);
|
||||
|
||||
let minor_base = ApiVersion::new(2, 2, 0);
|
||||
let minor_less = ApiVersion::new(2, 1, 0);
|
||||
let minor_greater = ApiVersion::new(2, 3, 0);
|
||||
|
||||
let release_base = ApiVersion::new(2, 2, 2);
|
||||
let release_less = ApiVersion::new(2, 2, 1);
|
||||
let release_greater = ApiVersion::new(2, 2, 3);
|
||||
|
||||
assert!(major_base == major_base);
|
||||
assert!(minor_base == minor_base);
|
||||
assert!(release_base == release_base);
|
||||
|
||||
assert!(major_base > major_less);
|
||||
assert!(major_base >= major_less);
|
||||
assert!(major_base != major_less);
|
||||
|
||||
assert!(major_base < major_greater);
|
||||
assert!(major_base <= major_greater);
|
||||
assert!(major_base != major_greater);
|
||||
|
||||
assert!(minor_base > minor_less);
|
||||
assert!(minor_base >= minor_less);
|
||||
assert!(minor_base != minor_less);
|
||||
|
||||
assert!(minor_base < minor_greater);
|
||||
assert!(minor_base <= minor_greater);
|
||||
assert!(minor_base != minor_greater);
|
||||
|
||||
assert!(release_base > release_less);
|
||||
assert!(release_base >= release_less);
|
||||
assert!(release_base != release_less);
|
||||
|
||||
assert!(release_base < release_greater);
|
||||
assert!(release_base <= release_greater);
|
||||
assert!(release_base != release_greater);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mixed_level_version_comarison() {
|
||||
let major_base = ApiVersion::new(2, 0, 0);
|
||||
let major_less = ApiVersion::new(1, 0, 0);
|
||||
let major_greater = ApiVersion::new(3, 0, 0);
|
||||
|
||||
let minor_base = ApiVersion::new(2, 2, 0);
|
||||
let minor_less = ApiVersion::new(2, 1, 0);
|
||||
let minor_greater = ApiVersion::new(2, 3, 0);
|
||||
|
||||
let release_base = ApiVersion::new(2, 2, 2);
|
||||
let release_less = ApiVersion::new(2, 2, 1);
|
||||
let release_greater = ApiVersion::new(2, 2, 3);
|
||||
|
||||
assert!(major_base < minor_base);
|
||||
assert!(major_base < minor_less);
|
||||
assert!(major_base < minor_greater);
|
||||
|
||||
assert!(major_base < release_base);
|
||||
assert!(major_base < release_less);
|
||||
assert!(major_base < release_greater);
|
||||
|
||||
assert!(major_less < minor_base);
|
||||
assert!(major_less < minor_less);
|
||||
assert!(major_less < minor_greater);
|
||||
|
||||
assert!(major_less < release_base);
|
||||
assert!(major_less < release_less);
|
||||
assert!(major_less < release_greater);
|
||||
|
||||
assert!(major_greater > minor_base);
|
||||
assert!(major_greater > minor_less);
|
||||
assert!(major_greater > minor_greater);
|
||||
|
||||
assert!(major_greater > release_base);
|
||||
assert!(major_greater > release_less);
|
||||
assert!(major_greater > release_greater);
|
||||
|
||||
assert!(minor_base < release_base);
|
||||
assert!(minor_base < release_less);
|
||||
assert!(minor_base < release_greater);
|
||||
|
||||
assert!(minor_greater > release_base);
|
||||
assert!(minor_greater > release_less);
|
||||
assert!(minor_greater > release_greater);
|
||||
|
||||
assert!(minor_less < release_base);
|
||||
assert!(minor_less < release_less);
|
||||
assert!(minor_less < release_greater);
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
const_regex! {
|
||||
pub ZPOOL_NAME_REGEX = r"^[a-zA-Z][a-z0-9A-Z\-_.:]+$";
|
||||
}
|
||||
|
||||
pub const ZFS_ASHIFT_SCHEMA: Schema = IntegerSchema::new("Pool sector size exponent.")
|
||||
.minimum(9)
|
||||
.maximum(16)
|
||||
.default(12)
|
||||
.schema();
|
||||
|
||||
pub const ZPOOL_NAME_SCHEMA: Schema = StringSchema::new("ZFS Pool Name")
|
||||
.format(&ApiStringFormat::Pattern(&ZPOOL_NAME_REGEX))
|
||||
.schema();
|
||||
|
||||
#[api(default: "On")]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS compression algorithm to use.
|
||||
pub enum ZfsCompressionType {
|
||||
/// Gnu Zip
|
||||
Gzip,
|
||||
/// LZ4
|
||||
Lz4,
|
||||
/// LZJB
|
||||
Lzjb,
|
||||
/// ZLE
|
||||
Zle,
|
||||
/// ZStd
|
||||
ZStd,
|
||||
/// Enable compression using the default algorithm.
|
||||
On,
|
||||
/// Disable compression.
|
||||
Off,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// The ZFS RAID level to use.
|
||||
pub enum ZfsRaidLevel {
|
||||
/// Single Disk
|
||||
Single,
|
||||
/// Mirror
|
||||
Mirror,
|
||||
/// Raid10
|
||||
Raid10,
|
||||
/// RaidZ
|
||||
RaidZ,
|
||||
/// RaidZ2
|
||||
RaidZ2,
|
||||
/// RaidZ3
|
||||
RaidZ3,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// zpool list item
|
||||
pub struct ZpoolListItem {
|
||||
/// zpool name
|
||||
pub name: String,
|
||||
/// Health
|
||||
pub health: String,
|
||||
/// Total size
|
||||
pub size: u64,
|
||||
/// Used size
|
||||
pub alloc: u64,
|
||||
/// Free space
|
||||
pub free: u64,
|
||||
/// ZFS fragnentation level
|
||||
pub frag: u64,
|
||||
/// ZFS deduplication ratio
|
||||
pub dedup: f64,
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[test]
|
||||
fn test_no_filters() {
|
||||
let group_filters = vec![];
|
||||
|
||||
let do_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_filters() {
|
||||
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
|
||||
|
||||
let do_backup = [
|
||||
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
|
||||
];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/109"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108", "vm/109"];
|
||||
|
||||
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_include_and_exclude_filters() {
|
||||
let group_filters = [
|
||||
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
|
||||
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
|
||||
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
|
||||
];
|
||||
|
||||
let do_backup = ["vm/104", "vm/108"];
|
||||
|
||||
let dont_backup = [
|
||||
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
|
||||
];
|
||||
|
||||
for id in do_backup {
|
||||
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
|
||||
for id in dont_backup {
|
||||
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
[package]
|
||||
name = "proxmox-access-control"
|
||||
description = "A collection of utilities to implement access control management."
|
||||
version = "0.2.4"
|
||||
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
exclude.workspace = true
|
||||
homepage.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
nix = { workspace = true, optional = true }
|
||||
openssl = { workspace = true, optional = true }
|
||||
serde.workspace = true
|
||||
serde_json = { workspace = true, optional = true }
|
||||
|
||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
||||
proxmox-config-digest = { workspace = true, optional = true, features = [ "openssl" ] }
|
||||
proxmox-product-config = { workspace = true, optional = true }
|
||||
proxmox-router = { workspace = true, optional = true }
|
||||
proxmox-schema.workspace = true
|
||||
proxmox-section-config = { workspace = true, optional = true }
|
||||
proxmox-shared-memory = { workspace = true, optional = true }
|
||||
proxmox-sys = { workspace = true, features = [ "crypt" ], optional = true }
|
||||
proxmox-time = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
impl = [
|
||||
"dep:nix",
|
||||
"dep:openssl",
|
||||
"dep:proxmox-config-digest",
|
||||
"dep:proxmox-product-config",
|
||||
"dep:proxmox-router",
|
||||
"dep:proxmox-section-config",
|
||||
"dep:proxmox-shared-memory",
|
||||
"dep:proxmox-sys",
|
||||
"dep:serde_json",
|
||||
]
|
@ -1,48 +0,0 @@
|
||||
rust-proxmox-access-control (0.2.4-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-schema 4.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 15 Jan 2025 12:47:56 +0100
|
||||
|
||||
rust-proxmox-access-control (0.2.3-1) bookworm; urgency=medium
|
||||
|
||||
* upgrade to current proxmox-router
|
||||
|
||||
* doc fixup
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 05 Sep 2024 14:25:02 +0200
|
||||
|
||||
rust-proxmox-access-control (0.2.2-1) bookworm; urgency=medium
|
||||
|
||||
* add init_user_config() to AccessControlConfig with a default
|
||||
implementation so downstream can ensure default users such as 'root@pam'
|
||||
are enabled if so desired
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Jul 2024 09:04:59 +0200
|
||||
|
||||
rust-proxmox-access-control (0.2.1-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-sys 6.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 11 Jul 2024 14:46:29 +0200
|
||||
|
||||
rust-proxmox-access-control (0.2.0-1) bookworm; urgency=medium
|
||||
|
||||
* change acl::config() and user::config() to return a ConfigDigest instead
|
||||
of raw digest bytes
|
||||
|
||||
* various clippy fixes
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 04 Jul 2024 14:32:54 +0200
|
||||
|
||||
rust-proxmox-access-control (0.1.1-1) bookworm; urgency=medium
|
||||
|
||||
* upgrade proxmox-time to 2.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 20 Jun 2024 13:47:21 +0200
|
||||
|
||||
rust-proxmox-access-control (0.1.0-1) bookworm; urgency=medium
|
||||
|
||||
* initial packaging
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Jun 2024 14:44:26 +0200
|
@ -1,70 +0,0 @@
|
||||
Source: rust-proxmox-access-control
|
||||
Section: rust
|
||||
Priority: optional
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
dh-sequence-cargo,
|
||||
cargo:native <!nocheck>,
|
||||
rustc:native (>= 1.80) <!nocheck>,
|
||||
libstd-rust-dev <!nocheck>,
|
||||
librust-anyhow-1+default-dev <!nocheck>,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev <!nocheck>,
|
||||
librust-proxmox-auth-api-0.4+default-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+default-dev <!nocheck>,
|
||||
librust-proxmox-time-2+default-dev <!nocheck>,
|
||||
librust-serde-1+default-dev <!nocheck>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.7.0
|
||||
Vcs-Git: git://git.proxmox.com/git/proxmox.git
|
||||
Vcs-Browser: https://git.proxmox.com/?p=proxmox.git
|
||||
Homepage: https://proxmox.com
|
||||
X-Cargo-Crate: proxmox-access-control
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: librust-proxmox-access-control-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
||||
librust-proxmox-auth-api-0.4+default-dev,
|
||||
librust-proxmox-schema-4+default-dev,
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-serde-1+default-dev
|
||||
Suggests:
|
||||
librust-proxmox-access-control+impl-dev (= ${binary:Version})
|
||||
Provides:
|
||||
librust-proxmox-access-control+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2.4-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2.4+default-dev (= ${binary:Version})
|
||||
Description: Collection of utilities to implement access control management - Rust source code
|
||||
Source code for Debianized Rust crate "proxmox-access-control"
|
||||
|
||||
Package: librust-proxmox-access-control+impl-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-access-control-dev (= ${binary:Version}),
|
||||
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
||||
librust-openssl-0.10+default-dev,
|
||||
librust-proxmox-config-digest-0.1+default-dev,
|
||||
librust-proxmox-config-digest-0.1+openssl-dev,
|
||||
librust-proxmox-product-config-0.2+default-dev,
|
||||
librust-proxmox-router-3+default-dev,
|
||||
librust-proxmox-section-config-2+default-dev (>= 2.1.0-~~),
|
||||
librust-proxmox-shared-memory-0.3+default-dev,
|
||||
librust-proxmox-sys-0.6+crypt-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
|
||||
librust-serde-json-1+default-dev
|
||||
Provides:
|
||||
librust-proxmox-access-control-0+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-access-control-0.2.4+impl-dev (= ${binary:Version})
|
||||
Description: Collection of utilities to implement access control management - feature "impl"
|
||||
This metapackage enables feature "impl" for the Rust proxmox-access-control
|
||||
crate, by pulling in any additional dependencies needed by that feature.
|
@ -1,18 +0,0 @@
|
||||
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||
|
||||
Files:
|
||||
*
|
||||
Copyright: 2024 Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
License: AGPL-3.0-or-later
|
||||
This program is free software: you can redistribute it and/or modify it under
|
||||
the terms of the GNU Affero General Public License as published by the Free
|
||||
Software Foundation, either version 3 of the License, or (at your option) any
|
||||
later version.
|
||||
.
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
|
||||
details.
|
||||
.
|
||||
You should have received a copy of the GNU Affero General Public License along
|
||||
with this program. If not, see <https://www.gnu.org/licenses/>.
|
@ -1,7 +0,0 @@
|
||||
overlay = "."
|
||||
crate_src_path = ".."
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
|
||||
[source]
|
||||
vcs_git = "git://git.proxmox.com/git/proxmox.git"
|
||||
vcs_browser = "https://git.proxmox.com/?p=proxmox.git"
|
@ -1 +0,0 @@
|
||||
3.0 (native)
|
@ -1,992 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet, HashMap};
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, OnceLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_auth_api::types::{Authid, Userid};
|
||||
use proxmox_config_digest::ConfigDigest;
|
||||
use proxmox_product_config::{open_api_lockfile, replace_privileged_config, ApiLockGuard};
|
||||
|
||||
use crate::init::{access_conf, acl_config, acl_config_lock};
|
||||
|
||||
pub fn split_acl_path(path: &str) -> Vec<&str> {
|
||||
let items = path.split('/');
|
||||
|
||||
let mut components = vec![];
|
||||
|
||||
for name in items {
|
||||
if name.is_empty() {
|
||||
continue;
|
||||
}
|
||||
components.push(name);
|
||||
}
|
||||
|
||||
components
|
||||
}
|
||||
|
||||
/// Tree representing a parsed acl.cfg
|
||||
#[derive(Default)]
|
||||
pub struct AclTree {
|
||||
/// Root node of the tree.
|
||||
///
|
||||
/// The rest of the tree is available via [find_node()](AclTree::find_node()) or an
|
||||
/// [`AclTreeNode`]'s [children](AclTreeNode::children) member.
|
||||
pub root: AclTreeNode,
|
||||
}
|
||||
|
||||
/// Node representing ACLs for a certain ACL path.
|
||||
#[derive(Default)]
|
||||
pub struct AclTreeNode {
|
||||
/// `User` or `Token` ACLs for this node.
|
||||
pub users: HashMap<Authid, HashMap<String, bool>>,
|
||||
/// `Group` ACLs for this node (not yet implemented)
|
||||
pub groups: HashMap<String, HashMap<String, bool>>,
|
||||
/// `AclTreeNodes` representing ACL paths directly below the current one.
|
||||
pub children: BTreeMap<String, AclTreeNode>,
|
||||
}
|
||||
|
||||
impl AclTreeNode {
|
||||
/// Creates a new, empty AclTreeNode.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
users: HashMap::new(),
|
||||
groups: HashMap::new(),
|
||||
children: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns applicable role and their propagation status for a given [Authid].
|
||||
///
|
||||
/// If the `Authid` is a `User` that has no specific `Roles` configured on this node,
|
||||
/// applicable `Group` roles will be returned instead.
|
||||
///
|
||||
/// If `leaf` is `false`, only those roles where the propagate flag in the ACL is set to `true`
|
||||
/// are returned. Otherwise, all roles will be returned.
|
||||
pub fn extract_roles(&self, auth_id: &Authid, leaf: bool) -> HashMap<String, bool> {
|
||||
let user_roles = self.extract_user_roles(auth_id, leaf);
|
||||
if !user_roles.is_empty() || auth_id.is_token() {
|
||||
// user privs always override group privs
|
||||
return user_roles;
|
||||
};
|
||||
|
||||
self.extract_group_roles(auth_id.user(), leaf)
|
||||
}
|
||||
|
||||
fn extract_user_roles(&self, auth_id: &Authid, leaf: bool) -> HashMap<String, bool> {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
let roles = match self.users.get(auth_id) {
|
||||
Some(m) => m,
|
||||
None => return map,
|
||||
};
|
||||
|
||||
for (role, propagate) in roles {
|
||||
if *propagate || leaf {
|
||||
if access_conf().role_no_access() == Some(role) {
|
||||
// return a map with a single role 'NoAccess'
|
||||
let mut map = HashMap::new();
|
||||
map.insert(role.to_string(), false);
|
||||
return map;
|
||||
}
|
||||
map.insert(role.to_string(), *propagate);
|
||||
}
|
||||
}
|
||||
|
||||
map
|
||||
}
|
||||
|
||||
fn extract_group_roles(&self, _user: &Userid, leaf: bool) -> HashMap<String, bool> {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
for roles in self.groups.values() {
|
||||
let is_member = false; // fixme: check if user is member of the group
|
||||
if !is_member {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (role, propagate) in roles {
|
||||
if *propagate || leaf {
|
||||
if access_conf().role_no_access() == Some(role) {
|
||||
// return a map with a single role 'NoAccess'
|
||||
let mut map = HashMap::new();
|
||||
map.insert(role.to_string(), false);
|
||||
return map;
|
||||
}
|
||||
map.insert(role.to_string(), *propagate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map
|
||||
}
|
||||
|
||||
fn delete_group_role(&mut self, group: &str, role: &str) {
|
||||
let roles = match self.groups.get_mut(group) {
|
||||
Some(r) => r,
|
||||
None => return,
|
||||
};
|
||||
roles.remove(role);
|
||||
}
|
||||
|
||||
fn delete_user_role(&mut self, auth_id: &Authid, role: &str) {
|
||||
let roles = match self.users.get_mut(auth_id) {
|
||||
Some(r) => r,
|
||||
None => return,
|
||||
};
|
||||
roles.remove(role);
|
||||
}
|
||||
|
||||
fn delete_authid(&mut self, auth_id: &Authid) {
|
||||
for node in self.children.values_mut() {
|
||||
node.delete_authid(auth_id);
|
||||
}
|
||||
self.users.remove(auth_id);
|
||||
}
|
||||
|
||||
fn insert_group_role(&mut self, group: String, role: String, propagate: bool) {
|
||||
let map = self.groups.entry(group).or_default();
|
||||
if let Some(no_access) = access_conf().role_no_access() {
|
||||
if role == no_access {
|
||||
map.clear();
|
||||
} else {
|
||||
map.remove(no_access);
|
||||
}
|
||||
}
|
||||
|
||||
map.insert(role, propagate);
|
||||
}
|
||||
|
||||
fn insert_user_role(&mut self, auth_id: Authid, role: String, propagate: bool) {
|
||||
let map = self.users.entry(auth_id).or_default();
|
||||
if let Some(no_access) = access_conf().role_no_access() {
|
||||
if role == no_access {
|
||||
map.clear();
|
||||
} else {
|
||||
map.remove(no_access);
|
||||
}
|
||||
}
|
||||
|
||||
map.insert(role, propagate);
|
||||
}
|
||||
|
||||
fn get_child_paths(
|
||||
&self,
|
||||
path: String,
|
||||
auth_id: &Authid,
|
||||
paths: &mut Vec<String>,
|
||||
) -> Result<(), Error> {
|
||||
for (sub_comp, child_node) in &self.children {
|
||||
let roles = child_node.extract_roles(auth_id, true);
|
||||
let child_path = format!("{path}/{sub_comp}");
|
||||
if !roles.is_empty() {
|
||||
paths.push(child_path.clone());
|
||||
}
|
||||
child_node.get_child_paths(child_path, auth_id, paths)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl AclTree {
|
||||
/// Create a new, empty ACL tree with a single, empty root [node](AclTreeNode)
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
root: AclTreeNode::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Iterates over the tree looking for a node matching `path`.
|
||||
pub fn find_node(&mut self, path: &str) -> Option<&mut AclTreeNode> {
|
||||
let path = split_acl_path(path);
|
||||
self.get_node_mut(&path)
|
||||
}
|
||||
|
||||
fn get_node(&self, path: &[&str]) -> Option<&AclTreeNode> {
|
||||
let mut node = &self.root;
|
||||
for outer in path {
|
||||
for comp in outer.split('/') {
|
||||
node = node.children.get(comp)?;
|
||||
}
|
||||
}
|
||||
Some(node)
|
||||
}
|
||||
|
||||
fn get_node_mut(&mut self, path: &[&str]) -> Option<&mut AclTreeNode> {
|
||||
let mut node = &mut self.root;
|
||||
for outer in path {
|
||||
for comp in outer.split('/') {
|
||||
node = node.children.get_mut(comp)?;
|
||||
}
|
||||
}
|
||||
Some(node)
|
||||
}
|
||||
|
||||
fn get_or_insert_node(&mut self, path: &[&str]) -> &mut AclTreeNode {
|
||||
let mut node = &mut self.root;
|
||||
for outer in path {
|
||||
for comp in outer.split('/') {
|
||||
node = node.children.entry(String::from(comp)).or_default();
|
||||
}
|
||||
}
|
||||
node
|
||||
}
|
||||
|
||||
/// Deletes the specified `role` from the `group`'s ACL on `path`.
|
||||
///
|
||||
/// Never fails, even if the `path` has no ACLs configured, or the `group`/`role` combination
|
||||
/// does not exist on `path`.
|
||||
pub fn delete_group_role(&mut self, path: &str, group: &str, role: &str) {
|
||||
let path = split_acl_path(path);
|
||||
let node = match self.get_node_mut(&path) {
|
||||
Some(n) => n,
|
||||
None => return,
|
||||
};
|
||||
node.delete_group_role(group, role);
|
||||
}
|
||||
|
||||
/// Deletes the specified `role` from the `user`'s ACL on `path`.
|
||||
///
|
||||
/// Never fails, even if the `path` has no ACLs configured, or the `user`/`role` combination
|
||||
/// does not exist on `path`.
|
||||
pub fn delete_user_role(&mut self, path: &str, auth_id: &Authid, role: &str) {
|
||||
let path = split_acl_path(path);
|
||||
let node = match self.get_node_mut(&path) {
|
||||
Some(n) => n,
|
||||
None => return,
|
||||
};
|
||||
node.delete_user_role(auth_id, role);
|
||||
}
|
||||
|
||||
/// Deletes the [`AclTreeNode`] at the specified patth
|
||||
///
|
||||
/// Never fails, deletes a node iff the specified path exists.
|
||||
pub fn delete_node(&mut self, path: &str) {
|
||||
let mut path = split_acl_path(path);
|
||||
let last = path.pop();
|
||||
let parent = match self.get_node_mut(&path) {
|
||||
Some(n) => n,
|
||||
None => return,
|
||||
};
|
||||
if let Some(name) = last {
|
||||
parent.children.remove(name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Deletes a user or token from the ACL-tree
|
||||
///
|
||||
/// Traverses the tree in-order and removes the given user/token by their Authid
|
||||
/// from every node in the tree.
|
||||
pub fn delete_authid(&mut self, auth_id: &Authid) {
|
||||
self.root.delete_authid(auth_id);
|
||||
}
|
||||
|
||||
/// Inserts the specified `role` into the `group` ACL on `path`.
|
||||
///
|
||||
/// The [`AclTreeNode`] representing `path` will be created and inserted into the tree if
|
||||
/// necessary.
|
||||
pub fn insert_group_role(&mut self, path: &str, group: &str, role: &str, propagate: bool) {
|
||||
let path = split_acl_path(path);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
/// Inserts the specified `role` into the `user` ACL on `path`.
|
||||
///
|
||||
/// The [`AclTreeNode`] representing `path` will be created and inserted into the tree if
|
||||
/// necessary.
|
||||
pub fn insert_user_role(&mut self, path: &str, auth_id: &Authid, role: &str, propagate: bool) {
|
||||
let path = split_acl_path(path);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
node.insert_user_role(auth_id.to_owned(), role.to_string(), propagate);
|
||||
}
|
||||
|
||||
fn write_node_config(node: &AclTreeNode, path: &str, w: &mut dyn Write) -> Result<(), Error> {
|
||||
let mut role_ug_map0: HashMap<_, BTreeSet<_>> = HashMap::new();
|
||||
let mut role_ug_map1: HashMap<_, BTreeSet<_>> = HashMap::new();
|
||||
|
||||
for (auth_id, roles) in &node.users {
|
||||
// no need to save, because root is always 'Administrator'
|
||||
if !auth_id.is_token() && auth_id.user() == "root@pam" {
|
||||
continue;
|
||||
}
|
||||
for (role, propagate) in roles {
|
||||
let role = role.as_str();
|
||||
let auth_id = auth_id.to_string();
|
||||
if *propagate {
|
||||
role_ug_map1.entry(role).or_default().insert(auth_id);
|
||||
} else {
|
||||
role_ug_map0.entry(role).or_default().insert(auth_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (group, roles) in &node.groups {
|
||||
for (role, propagate) in roles {
|
||||
let group = format!("@{}", group);
|
||||
if *propagate {
|
||||
role_ug_map1.entry(role).or_default().insert(group);
|
||||
} else {
|
||||
role_ug_map0.entry(role).or_default().insert(group);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn group_by_property_list(
|
||||
item_property_map: &HashMap<&str, BTreeSet<String>>,
|
||||
) -> BTreeMap<String, BTreeSet<String>> {
|
||||
let mut result_map: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
|
||||
for (item, property_map) in item_property_map {
|
||||
let item_list = property_map.iter().fold(String::new(), |mut acc, v| {
|
||||
if !acc.is_empty() {
|
||||
acc.push(',');
|
||||
}
|
||||
acc.push_str(v);
|
||||
acc
|
||||
});
|
||||
result_map
|
||||
.entry(item_list)
|
||||
.or_default()
|
||||
.insert(item.to_string());
|
||||
}
|
||||
result_map
|
||||
}
|
||||
|
||||
let uglist_role_map0 = group_by_property_list(&role_ug_map0);
|
||||
let uglist_role_map1 = group_by_property_list(&role_ug_map1);
|
||||
|
||||
fn role_list(roles: &BTreeSet<String>) -> String {
|
||||
if let Some(no_access) = access_conf().role_no_access() {
|
||||
if roles.contains(no_access) {
|
||||
return String::from(no_access);
|
||||
}
|
||||
}
|
||||
|
||||
roles.iter().fold(String::new(), |mut acc, v| {
|
||||
if !acc.is_empty() {
|
||||
acc.push(',');
|
||||
}
|
||||
acc.push_str(v);
|
||||
acc
|
||||
})
|
||||
}
|
||||
|
||||
for (uglist, roles) in &uglist_role_map0 {
|
||||
let role_list = role_list(roles);
|
||||
writeln!(
|
||||
w,
|
||||
"acl:0:{}:{}:{}",
|
||||
if path.is_empty() { "/" } else { path },
|
||||
uglist,
|
||||
role_list
|
||||
)?;
|
||||
}
|
||||
|
||||
for (uglist, roles) in &uglist_role_map1 {
|
||||
let role_list = role_list(roles);
|
||||
writeln!(
|
||||
w,
|
||||
"acl:1:{}:{}:{}",
|
||||
if path.is_empty() { "/" } else { path },
|
||||
uglist,
|
||||
role_list
|
||||
)?;
|
||||
}
|
||||
|
||||
for (name, child) in node.children.iter() {
|
||||
let child_path = format!("{}/{}", path, name);
|
||||
Self::write_node_config(child, &child_path, w)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_config(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||
Self::write_node_config(&self.root, "", w)
|
||||
}
|
||||
|
||||
fn parse_acl_line(&mut self, line: &str) -> Result<(), Error> {
|
||||
let items: Vec<&str> = line.split(':').collect();
|
||||
|
||||
if items.len() != 5 {
|
||||
bail!("wrong number of items.");
|
||||
}
|
||||
|
||||
if items[0] != "acl" {
|
||||
bail!("line does not start with 'acl'.");
|
||||
}
|
||||
|
||||
let propagate = if items[1] == "0" {
|
||||
false
|
||||
} else if items[1] == "1" {
|
||||
true
|
||||
} else {
|
||||
bail!("expected '0' or '1' for propagate flag.");
|
||||
};
|
||||
|
||||
let path_str = items[2];
|
||||
let path = split_acl_path(path_str);
|
||||
let node = self.get_or_insert_node(&path);
|
||||
|
||||
let uglist: Vec<&str> = items[3].split(',').map(|v| v.trim()).collect();
|
||||
|
||||
let rolelist: Vec<&str> = items[4].split(',').map(|v| v.trim()).collect();
|
||||
|
||||
for user_or_group in &uglist {
|
||||
for role in &rolelist {
|
||||
if !access_conf().roles().contains_key(role) {
|
||||
bail!("unknown role '{}'", role);
|
||||
}
|
||||
if let Some(group) = user_or_group.strip_prefix('@') {
|
||||
node.insert_group_role(group.to_string(), role.to_string(), propagate);
|
||||
} else {
|
||||
node.insert_user_role(user_or_group.parse()?, role.to_string(), propagate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn load(filename: &Path) -> Result<(Self, ConfigDigest), Error> {
|
||||
let mut tree = Self::new();
|
||||
|
||||
let raw = match std::fs::read_to_string(filename) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
if err.kind() == std::io::ErrorKind::NotFound {
|
||||
String::new()
|
||||
} else {
|
||||
bail!("unable to read acl config {:?} - {}", filename, err);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let digest = ConfigDigest::from_slice(raw.as_bytes());
|
||||
|
||||
for (linenr, line) in raw.lines().enumerate() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Err(err) = tree.parse_acl_line(line) {
|
||||
bail!(
|
||||
"unable to parse acl config {:?}, line {} - {}",
|
||||
filename,
|
||||
linenr + 1,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok((tree, digest))
|
||||
}
|
||||
|
||||
/// This is used for testing
|
||||
pub fn from_raw(raw: &str) -> Result<Self, Error> {
|
||||
let mut tree = Self::new();
|
||||
for (linenr, line) in raw.lines().enumerate() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if let Err(err) = tree.parse_acl_line(line) {
|
||||
bail!(
|
||||
"unable to parse acl config data, line {} - {}",
|
||||
linenr + 1,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
/// Returns a map of role name and propagation status for a given `auth_id` and `path`.
|
||||
///
|
||||
/// This will collect role mappings according to the following algorithm:
|
||||
/// - iterate over all intermediate nodes along `path` and collect roles with `propagate` set
|
||||
/// - get all (propagating and non-propagating) roles for last component of path
|
||||
/// - more specific role maps replace less specific role maps
|
||||
/// -- user/token is more specific than group at each level
|
||||
/// -- roles lower in the tree are more specific than those higher up along the path
|
||||
pub fn roles(&self, auth_id: &Authid, path: &[&str]) -> HashMap<String, bool> {
|
||||
let mut node = &self.root;
|
||||
let mut role_map = node.extract_roles(auth_id, path.is_empty());
|
||||
|
||||
let mut comp_iter = path.iter().peekable();
|
||||
|
||||
while let Some(comp) = comp_iter.next() {
|
||||
let last_comp = comp_iter.peek().is_none();
|
||||
|
||||
let mut sub_comp_iter = comp.split('/').peekable();
|
||||
|
||||
while let Some(sub_comp) = sub_comp_iter.next() {
|
||||
let last_sub_comp = last_comp && sub_comp_iter.peek().is_none();
|
||||
|
||||
node = match node.children.get(sub_comp) {
|
||||
Some(n) => n,
|
||||
None => return role_map, // path not found
|
||||
};
|
||||
|
||||
let new_map = node.extract_roles(auth_id, last_sub_comp);
|
||||
if !new_map.is_empty() {
|
||||
// overwrite previous mappings
|
||||
role_map = new_map;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
role_map
|
||||
}
|
||||
|
||||
pub fn get_child_paths(&self, auth_id: &Authid, path: &[&str]) -> Result<Vec<String>, Error> {
|
||||
let mut res = Vec::new();
|
||||
|
||||
if let Some(node) = self.get_node(path) {
|
||||
let path = path.join("/");
|
||||
node.get_child_paths(path, auth_id, &mut res)?;
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get exclusive lock
|
||||
pub fn lock_config() -> Result<ApiLockGuard, Error> {
|
||||
open_api_lockfile(acl_config_lock(), None, true)
|
||||
}
|
||||
|
||||
/// Reads the [`AclTree`] from `acl.cfg` in the configuration directory.
|
||||
pub fn config() -> Result<(AclTree, ConfigDigest), Error> {
|
||||
let path = acl_config();
|
||||
AclTree::load(&path)
|
||||
}
|
||||
|
||||
/// Returns a cached [`AclTree`] or a fresh copy read directly from `acl.cfg` in the configuration
|
||||
/// directory.
|
||||
///
|
||||
/// Since the AclTree is used for every API request's permission check, this caching mechanism
|
||||
/// allows to skip reading and parsing the file again if it is unchanged.
|
||||
pub fn cached_config() -> Result<Arc<AclTree>, Error> {
|
||||
struct ConfigCache {
|
||||
data: Option<Arc<AclTree>>,
|
||||
last_mtime: i64,
|
||||
last_mtime_nsec: i64,
|
||||
}
|
||||
|
||||
static CACHED_CONFIG: OnceLock<RwLock<ConfigCache>> = OnceLock::new();
|
||||
let cached_conf = CACHED_CONFIG.get_or_init(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_mtime: 0,
|
||||
last_mtime_nsec: 0,
|
||||
})
|
||||
});
|
||||
|
||||
let conf = acl_config();
|
||||
let stat = match nix::sys::stat::stat(&conf) {
|
||||
Ok(stat) => Some(stat),
|
||||
Err(nix::errno::Errno::ENOENT) => None,
|
||||
Err(err) => bail!("unable to stat '{}' - {err}", conf.display()),
|
||||
};
|
||||
|
||||
{
|
||||
// limit scope
|
||||
let cache = cached_conf.read().unwrap();
|
||||
if let Some(ref config) = cache.data {
|
||||
if let Some(stat) = stat {
|
||||
if stat.st_mtime == cache.last_mtime && stat.st_mtime_nsec == cache.last_mtime_nsec
|
||||
{
|
||||
return Ok(config.clone());
|
||||
}
|
||||
} else if cache.last_mtime == 0 && cache.last_mtime_nsec == 0 {
|
||||
return Ok(config.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (config, _digest) = config()?;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let mut cache = cached_conf.write().unwrap();
|
||||
if let Some(stat) = stat {
|
||||
cache.last_mtime = stat.st_mtime;
|
||||
cache.last_mtime_nsec = stat.st_mtime_nsec;
|
||||
}
|
||||
cache.data = Some(config.clone());
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Saves an [`AclTree`] to `acl.cfg` in the configuration directory, ensuring proper ownership and
|
||||
/// file permissions.
|
||||
pub fn save_config(acl: &AclTree) -> Result<(), Error> {
|
||||
let mut raw: Vec<u8> = Vec::new();
|
||||
acl.write_config(&mut raw)?;
|
||||
|
||||
let conf = acl_config();
|
||||
replace_privileged_config(conf, &raw)?;
|
||||
|
||||
// increase cache generation so we reload it next time we access it
|
||||
access_conf().increment_cache_generation()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::{collections::HashMap, sync::OnceLock};
|
||||
|
||||
use crate::init::{init_access_config, AccessControlConfig};
|
||||
|
||||
use super::AclTree;
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_auth_api::types::Authid;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TestAcmConfig<'a> {
|
||||
roles: HashMap<&'a str, u64>,
|
||||
}
|
||||
|
||||
impl AccessControlConfig for TestAcmConfig<'_> {
|
||||
fn roles(&self) -> &HashMap<&str, u64> {
|
||||
&self.roles
|
||||
}
|
||||
|
||||
fn privileges(&self) -> &HashMap<&str, u64> {
|
||||
unreachable!("acl tests don't need privileges")
|
||||
}
|
||||
|
||||
fn role_no_access(&self) -> Option<&'static str> {
|
||||
Some("NoAccess")
|
||||
}
|
||||
|
||||
fn role_admin(&self) -> Option<&'static str> {
|
||||
Some("Admin")
|
||||
}
|
||||
}
|
||||
|
||||
fn setup_acl_tree_config() {
|
||||
static ACL_CONFIG: OnceLock<TestAcmConfig> = OnceLock::new();
|
||||
let config = ACL_CONFIG.get_or_init(|| {
|
||||
let mut roles = HashMap::new();
|
||||
roles.insert("NoAccess", 0);
|
||||
roles.insert("Admin", u64::MAX);
|
||||
roles.insert("DatastoreBackup", 4);
|
||||
roles.insert("DatastoreReader", 8);
|
||||
|
||||
TestAcmConfig { roles }
|
||||
});
|
||||
|
||||
// ignore errors here, we don't care if it's initialized already
|
||||
let _ = init_access_config(config);
|
||||
}
|
||||
|
||||
fn check_roles(tree: &AclTree, auth_id: &Authid, path: &str, expected_roles: &str) {
|
||||
let path_vec = super::split_acl_path(path);
|
||||
let mut roles = tree
|
||||
.roles(auth_id, &path_vec)
|
||||
.keys()
|
||||
.cloned()
|
||||
.collect::<Vec<String>>();
|
||||
roles.sort();
|
||||
let roles = roles.join(",");
|
||||
|
||||
assert_eq!(
|
||||
roles, expected_roles,
|
||||
"\nat check_roles for '{}' on '{}'",
|
||||
auth_id, path
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_acl_line_compression() {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:0:/store/store2:user1@pbs:Admin\n\
|
||||
acl:0:/store/store2:user2@pbs:Admin\n\
|
||||
acl:0:/store/store2:user1@pbs:DatastoreBackup\n\
|
||||
acl:0:/store/store2:user2@pbs:DatastoreBackup\n\
|
||||
",
|
||||
)
|
||||
.expect("failed to parse acl tree");
|
||||
|
||||
let mut raw: Vec<u8> = Vec::new();
|
||||
tree.write_config(&mut raw)
|
||||
.expect("failed to write acl tree");
|
||||
let raw = std::str::from_utf8(&raw).expect("acl tree is not valid utf8");
|
||||
|
||||
assert_eq!(
|
||||
raw,
|
||||
"acl:0:/store/store2:user1@pbs,user2@pbs:Admin,DatastoreBackup\n"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_roles_1() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:1:/storage:user1@pbs:Admin\n\
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup\n\
|
||||
acl:1:/storage/store2:user2@pbs:DatastoreBackup\n\
|
||||
",
|
||||
)?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "");
|
||||
check_roles(&tree, &user1, "/storage", "Admin");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||
|
||||
let user2: Authid = "user2@pbs".parse()?;
|
||||
check_roles(&tree, &user2, "/", "");
|
||||
check_roles(&tree, &user2, "/storage", "");
|
||||
check_roles(&tree, &user2, "/storage/store1", "");
|
||||
check_roles(&tree, &user2, "/storage/store2", "DatastoreBackup");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_role_no_access() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:1:/:user1@pbs:Admin\n\
|
||||
acl:1:/storage:user1@pbs:NoAccess\n\
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup\n\
|
||||
",
|
||||
)?;
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
check_roles(&tree, &user1, "/", "Admin");
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "NoAccess");
|
||||
check_roles(&tree, &user1, "/system", "Admin");
|
||||
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:1:/:user1@pbs:Admin\n\
|
||||
acl:0:/storage:user1@pbs:NoAccess\n\
|
||||
acl:1:/storage/store1:user1@pbs:DatastoreBackup\n\
|
||||
",
|
||||
)?;
|
||||
check_roles(&tree, &user1, "/", "Admin");
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
check_roles(&tree, &user1, "/storage/store1", "DatastoreBackup");
|
||||
check_roles(&tree, &user1, "/storage/store2", "Admin");
|
||||
check_roles(&tree, &user1, "/system", "Admin");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_role_add_delete() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
|
||||
tree.insert_user_role("/", &user1, "Admin", true);
|
||||
tree.insert_user_role("/", &user1, "Audit", true);
|
||||
|
||||
check_roles(&tree, &user1, "/", "Admin,Audit");
|
||||
|
||||
tree.insert_user_role("/", &user1, "NoAccess", true);
|
||||
check_roles(&tree, &user1, "/", "NoAccess");
|
||||
|
||||
let mut raw: Vec<u8> = Vec::new();
|
||||
tree.write_config(&mut raw)?;
|
||||
let raw = std::str::from_utf8(&raw)?;
|
||||
|
||||
assert_eq!(raw, "acl:1:/:user1@pbs:NoAccess\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_access_overwrite() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
|
||||
tree.insert_user_role("/storage", &user1, "Admin", true);
|
||||
tree.insert_user_role("/storage", &user1, "Audit", true);
|
||||
|
||||
check_roles(&tree, &user1, "/storage", "Admin,Audit");
|
||||
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
|
||||
check_roles(&tree, &user1, "/storage", "NoAccess");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_child_paths() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let tree = AclTree::from_raw(
|
||||
"\
|
||||
acl:0:/store/store2:user1@pbs:Admin\n\
|
||||
acl:1:/store/store2/store31/store4/store6:user2@pbs:DatastoreReader\n\
|
||||
acl:0:/store/store2/store3:user1@pbs:Admin\n\
|
||||
",
|
||||
)
|
||||
.expect("failed to parse acl tree");
|
||||
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
let user2: Authid = "user2@pbs".parse()?;
|
||||
|
||||
// user1 has admin on "/store/store2/store3" -> return paths
|
||||
let paths = tree.get_child_paths(&user1, &["store"])?;
|
||||
assert!(
|
||||
paths.len() == 2
|
||||
&& paths.contains(&"store/store2".to_string())
|
||||
&& paths.contains(&"store/store2/store3".to_string())
|
||||
);
|
||||
|
||||
// user2 has no privileges under "/store/store2/store3" --> return empty
|
||||
assert!(tree
|
||||
.get_child_paths(&user2, &["store", "store2", "store3"],)?
|
||||
.is_empty());
|
||||
|
||||
// user2 has DatastoreReader privileges under "/store/store2/store31" --> return paths
|
||||
let paths = tree.get_child_paths(&user2, &["store/store2/store31"])?;
|
||||
assert!(
|
||||
paths.len() == 1 && paths.contains(&"store/store2/store31/store4/store6".to_string())
|
||||
);
|
||||
|
||||
// user2 has no privileges under "/store/store2/foo/bar/baz"
|
||||
assert!(tree
|
||||
.get_child_paths(&user2, &["store", "store2", "foo/bar/baz"])?
|
||||
.is_empty());
|
||||
|
||||
// user2 has DatastoreReader privileges on "/store/store2/store31/store4/store6", but not
|
||||
// on any child paths --> return empty
|
||||
assert!(tree
|
||||
.get_child_paths(&user2, &["store/store2/store31/store4/store6"],)?
|
||||
.is_empty());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_node() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
|
||||
tree.insert_user_role("/storage", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/storage/a", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/storage/b", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/storage/b/a", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/storage/b/b", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/datastore/c", &user1, "NoAccess", true);
|
||||
tree.insert_user_role("/datastore/d", &user1, "NoAccess", true);
|
||||
|
||||
assert!(tree.find_node("/storage/b/a").is_some());
|
||||
tree.delete_node("/storage/b/a");
|
||||
assert!(tree.find_node("/storage/b/a").is_none());
|
||||
|
||||
assert!(tree.find_node("/storage/b/b").is_some());
|
||||
assert!(tree.find_node("/storage/b").is_some());
|
||||
tree.delete_node("/storage/b");
|
||||
assert!(tree.find_node("/storage/b/b").is_none());
|
||||
assert!(tree.find_node("/storage/b").is_none());
|
||||
|
||||
assert!(tree.find_node("/storage").is_some());
|
||||
assert!(tree.find_node("/storage/a").is_some());
|
||||
tree.delete_node("/storage");
|
||||
assert!(tree.find_node("/storage").is_none());
|
||||
assert!(tree.find_node("/storage/a").is_none());
|
||||
|
||||
assert!(tree.find_node("/datastore/c").is_some());
|
||||
tree.delete_node("/datastore/c");
|
||||
assert!(tree.find_node("/datastore/c").is_none());
|
||||
|
||||
assert!(tree.find_node("/datastore/d").is_some());
|
||||
tree.delete_node("/datastore/d");
|
||||
assert!(tree.find_node("/datastore/d").is_none());
|
||||
|
||||
// '/' should not be deletable
|
||||
assert!(tree.find_node("/").is_some());
|
||||
tree.delete_node("/");
|
||||
assert!(tree.find_node("/").is_some());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_delete_authid() -> Result<(), Error> {
|
||||
setup_acl_tree_config();
|
||||
|
||||
let mut tree = AclTree::new();
|
||||
|
||||
let user1: Authid = "user1@pbs".parse()?;
|
||||
let user2: Authid = "user2@pbs".parse()?;
|
||||
|
||||
let user1_paths = vec![
|
||||
"/",
|
||||
"/storage",
|
||||
"/storage/a",
|
||||
"/storage/a/b",
|
||||
"/storage/b",
|
||||
"/storage/b/a",
|
||||
"/storage/b/b",
|
||||
"/storage/a/a",
|
||||
];
|
||||
let user2_paths = vec!["/", "/storage", "/storage/a/b", "/storage/a/a"];
|
||||
|
||||
for path in &user1_paths {
|
||||
tree.insert_user_role(path, &user1, "NoAccess", true);
|
||||
}
|
||||
for path in &user2_paths {
|
||||
tree.insert_user_role(path, &user2, "NoAccess", true);
|
||||
}
|
||||
|
||||
tree.delete_authid(&user1);
|
||||
|
||||
for path in &user1_paths {
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(!node.users.contains_key(&user1));
|
||||
}
|
||||
}
|
||||
for path in &user2_paths {
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(node.users.contains_key(&user2));
|
||||
}
|
||||
}
|
||||
|
||||
tree.delete_authid(&user2);
|
||||
|
||||
for path in &user2_paths {
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(!node.users.contains_key(&user2));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,246 +0,0 @@
|
||||
//! Cached user info for fast ACL permission checks
|
||||
|
||||
use std::sync::{Arc, OnceLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_auth_api::types::{Authid, Userid};
|
||||
use proxmox_router::UserInformation;
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
use proxmox_time::epoch_i64;
|
||||
|
||||
use crate::acl::AclTree;
|
||||
use crate::init::access_conf;
|
||||
use crate::types::{ApiToken, User};
|
||||
|
||||
/// Cache User/Group/Token/Acl configuration data for fast permission tests
|
||||
pub struct CachedUserInfo {
|
||||
user_cfg: Arc<SectionConfigData>,
|
||||
acl_tree: Arc<AclTree>,
|
||||
}
|
||||
|
||||
struct ConfigCache {
|
||||
data: Option<Arc<CachedUserInfo>>,
|
||||
last_update: i64,
|
||||
last_user_cache_generation: usize,
|
||||
}
|
||||
|
||||
impl CachedUserInfo {
|
||||
/// Returns a cached instance (up to 5 seconds old).
|
||||
pub fn new() -> Result<Arc<Self>, Error> {
|
||||
let now = epoch_i64();
|
||||
|
||||
let cache_generation = access_conf().cache_generation();
|
||||
|
||||
static CACHED_CONFIG: OnceLock<RwLock<ConfigCache>> = OnceLock::new();
|
||||
let cached_config = CACHED_CONFIG.get_or_init(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_update: 0,
|
||||
last_user_cache_generation: 0,
|
||||
})
|
||||
});
|
||||
|
||||
{
|
||||
// limit scope
|
||||
let cache = cached_config.read().unwrap();
|
||||
if let Some(current_generation) = cache_generation {
|
||||
if (current_generation == cache.last_user_cache_generation)
|
||||
&& ((now - cache.last_update) < 5)
|
||||
{
|
||||
if let Some(ref config) = cache.data {
|
||||
return Ok(config.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let config = Arc::new(CachedUserInfo {
|
||||
user_cfg: crate::user::cached_config()?,
|
||||
acl_tree: crate::acl::cached_config()?,
|
||||
});
|
||||
|
||||
let mut cache = cached_config.write().unwrap();
|
||||
|
||||
if let Some(current_generation) = cache_generation {
|
||||
cache.last_user_cache_generation = current_generation;
|
||||
}
|
||||
|
||||
cache.last_update = now;
|
||||
cache.data = Some(config.clone());
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn is_superuser(&self, auth_id: &Authid) -> bool {
|
||||
access_conf().is_superuser(auth_id)
|
||||
}
|
||||
|
||||
pub fn is_group_member(&self, user_id: &Userid, group: &str) -> bool {
|
||||
access_conf().is_group_member(user_id, group)
|
||||
}
|
||||
|
||||
/// Test if a user_id is enabled and not expired
|
||||
pub fn is_active_user_id(&self, userid: &Userid) -> bool {
|
||||
if let Ok(info) = self.user_cfg.lookup::<User>("user", userid.as_str()) {
|
||||
info.is_active()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if a authentication id is enabled and not expired
|
||||
pub fn is_active_auth_id(&self, auth_id: &Authid) -> bool {
|
||||
let userid = auth_id.user();
|
||||
|
||||
if !self.is_active_user_id(userid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if auth_id.is_token() {
|
||||
if let Ok(info) = self
|
||||
.user_cfg
|
||||
.lookup::<ApiToken>("token", &auth_id.to_string())
|
||||
{
|
||||
return info.is_active();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
pub fn check_privs(
|
||||
&self,
|
||||
auth_id: &Authid,
|
||||
path: &[&str],
|
||||
required_privs: u64,
|
||||
partial: bool,
|
||||
) -> Result<(), Error> {
|
||||
let privs = self.lookup_privs(auth_id, path);
|
||||
let allowed = if partial {
|
||||
(privs & required_privs) != 0
|
||||
} else {
|
||||
(privs & required_privs) == required_privs
|
||||
};
|
||||
if !allowed {
|
||||
// printing the path doesn't leak any information as long as we
|
||||
// always check privilege before resource existence
|
||||
let priv_names = privs_to_priv_names(required_privs);
|
||||
let priv_names = if partial {
|
||||
priv_names.join("|")
|
||||
} else {
|
||||
priv_names.join("&")
|
||||
};
|
||||
bail!(
|
||||
"missing permissions '{priv_names}' on '/{}'",
|
||||
path.join("/")
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn lookup_privs(&self, auth_id: &Authid, path: &[&str]) -> u64 {
|
||||
let (privs, _) = self.lookup_privs_details(auth_id, path);
|
||||
privs
|
||||
}
|
||||
|
||||
pub fn lookup_privs_details(&self, auth_id: &Authid, path: &[&str]) -> (u64, u64) {
|
||||
if self.is_superuser(auth_id) {
|
||||
let acm_config = access_conf();
|
||||
if let Some(admin) = acm_config.role_admin() {
|
||||
if let Some(admin) = acm_config.roles().get(admin) {
|
||||
return (*admin, *admin);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let roles = self.acl_tree.roles(auth_id, path);
|
||||
let mut privs: u64 = 0;
|
||||
let mut propagated_privs: u64 = 0;
|
||||
for (role, propagate) in roles {
|
||||
if let Some(role_privs) = access_conf().roles().get(role.as_str()) {
|
||||
if propagate {
|
||||
propagated_privs |= role_privs;
|
||||
}
|
||||
privs |= role_privs;
|
||||
}
|
||||
}
|
||||
|
||||
if auth_id.is_token() {
|
||||
// limit privs to that of owning user
|
||||
let user_auth_id = Authid::from(auth_id.user().clone());
|
||||
let (owner_privs, owner_propagated_privs) =
|
||||
self.lookup_privs_details(&user_auth_id, path);
|
||||
privs &= owner_privs;
|
||||
propagated_privs &= owner_propagated_privs;
|
||||
}
|
||||
|
||||
(privs, propagated_privs)
|
||||
}
|
||||
|
||||
/// Checks whether the `auth_id` has any of the privileges `privs` on any object below `path`.
|
||||
pub fn any_privs_below(
|
||||
&self,
|
||||
auth_id: &Authid,
|
||||
path: &[&str],
|
||||
privs: u64,
|
||||
) -> Result<bool, Error> {
|
||||
// if the anchor path itself has matching propagated privs, we skip checking children
|
||||
let (_privs, propagated_privs) = self.lookup_privs_details(auth_id, path);
|
||||
if propagated_privs & privs != 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
// get all sub-paths with roles defined for `auth_id`
|
||||
let paths = self.acl_tree.get_child_paths(auth_id, path)?;
|
||||
|
||||
for path in paths.iter() {
|
||||
// early return if any sub-path has any of the privs we are looking for
|
||||
if privs & self.lookup_privs(auth_id, &[path.as_str()]) != 0 {
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
// no paths or no matching paths
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl UserInformation for CachedUserInfo {
|
||||
fn is_superuser(&self, userid: &str) -> bool {
|
||||
if let Ok(authid) = userid.parse() {
|
||||
return self.is_superuser(&authid);
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn is_group_member(&self, userid: &str, group: &str) -> bool {
|
||||
if let Ok(userid) = userid.parse() {
|
||||
return self.is_group_member(&userid, group);
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn lookup_privs(&self, auth_id: &str, path: &[&str]) -> u64 {
|
||||
match auth_id.parse::<Authid>() {
|
||||
Ok(auth_id) => Self::lookup_privs(self, &auth_id, path),
|
||||
Err(_) => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn privs_to_priv_names(privs: u64) -> Vec<&'static str> {
|
||||
access_conf()
|
||||
.privileges()
|
||||
.iter()
|
||||
.fold(Vec::new(), |mut priv_names, (name, value)| {
|
||||
if value & privs != 0 {
|
||||
priv_names.push(name);
|
||||
}
|
||||
priv_names
|
||||
})
|
||||
}
|
@ -1,131 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
|
||||
use proxmox_auth_api::types::{Authid, Userid};
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
|
||||
static ACCESS_CONF: OnceLock<&'static dyn AccessControlConfig> = OnceLock::new();
|
||||
static ACCESS_CONF_DIR: OnceLock<PathBuf> = OnceLock::new();
|
||||
|
||||
/// This trait specifies the functions a product needs to implement to get ACL tree based access
|
||||
/// control management from this plugin.
|
||||
pub trait AccessControlConfig: Send + Sync {
|
||||
/// Returns a mapping of all recognized privileges and their corresponding `u64` value.
|
||||
fn privileges(&self) -> &HashMap<&str, u64>;
|
||||
|
||||
/// Returns a mapping of all recognized roles and their corresponding `u64` value.
|
||||
fn roles(&self) -> &HashMap<&str, u64>;
|
||||
|
||||
/// Checks whether an `Authid` has super user privileges or not.
|
||||
///
|
||||
/// Default: Always returns `false`.
|
||||
fn is_superuser(&self, _auth_id: &Authid) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Checks whether a user is part of a group.
|
||||
///
|
||||
/// Default: Always returns `false`.
|
||||
fn is_group_member(&self, _user_id: &Userid, _group: &str) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Returns the current cache generation of the user and acl configs. If the generation was
|
||||
/// incremented since the last time the cache was queried, the configs are loaded again from
|
||||
/// disk.
|
||||
///
|
||||
/// Returning `None` will always reload the cache.
|
||||
///
|
||||
/// Default: Always returns `None`.
|
||||
fn cache_generation(&self) -> Option<usize> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Increment the cache generation of user and acl configs. This indicates that they were
|
||||
/// changed on disk.
|
||||
///
|
||||
/// Default: Does nothing.
|
||||
fn increment_cache_generation(&self) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Optionally returns a role that has no access to any resource.
|
||||
///
|
||||
/// Default: Returns `None`.
|
||||
fn role_no_access(&self) -> Option<&str> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Optionally returns a role that is allowed to access all resources.
|
||||
///
|
||||
/// Default: Returns `None`.
|
||||
fn role_admin(&self) -> Option<&str> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Called after the user configuration is loaded to potentially re-add fixed users, such as a
|
||||
/// `root@pam` user.
|
||||
fn init_user_config(&self, config: &mut SectionConfigData) -> Result<(), Error> {
|
||||
let _ = config;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init<P: AsRef<Path>>(
|
||||
acm_config: &'static dyn AccessControlConfig,
|
||||
config_dir: P,
|
||||
) -> Result<(), Error> {
|
||||
init_access_config(acm_config)?;
|
||||
init_access_config_dir(config_dir)
|
||||
}
|
||||
|
||||
pub(crate) fn init_access_config_dir<P: AsRef<Path>>(config_dir: P) -> Result<(), Error> {
|
||||
ACCESS_CONF_DIR
|
||||
.set(config_dir.as_ref().to_owned())
|
||||
.map_err(|_e| format_err!("cannot initialize acl tree config twice!"))
|
||||
}
|
||||
|
||||
pub(crate) fn init_access_config(config: &'static dyn AccessControlConfig) -> Result<(), Error> {
|
||||
ACCESS_CONF
|
||||
.set(config)
|
||||
.map_err(|_e| format_err!("cannot initialize acl tree config twice!"))
|
||||
}
|
||||
|
||||
pub(crate) fn access_conf() -> &'static dyn AccessControlConfig {
|
||||
*ACCESS_CONF
|
||||
.get()
|
||||
.expect("please initialize the acm config before using it!")
|
||||
}
|
||||
|
||||
fn conf_dir() -> &'static PathBuf {
|
||||
ACCESS_CONF_DIR
|
||||
.get()
|
||||
.expect("please initialize acm config dir before using it!")
|
||||
}
|
||||
|
||||
pub(crate) fn acl_config() -> PathBuf {
|
||||
conf_dir().join("acl.cfg")
|
||||
}
|
||||
|
||||
pub(crate) fn acl_config_lock() -> PathBuf {
|
||||
conf_dir().join(".acl.lck")
|
||||
}
|
||||
|
||||
pub(crate) fn user_config() -> PathBuf {
|
||||
conf_dir().join("user.cfg")
|
||||
}
|
||||
|
||||
pub(crate) fn user_config_lock() -> PathBuf {
|
||||
conf_dir().join(".user.lck")
|
||||
}
|
||||
|
||||
pub(crate) fn token_shadow() -> PathBuf {
|
||||
conf_dir().join("token.shadow")
|
||||
}
|
||||
|
||||
pub(crate) fn token_shadow_lock() -> PathBuf {
|
||||
conf_dir().join("token.shadow.lock")
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
pub mod types;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod acl;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod init;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod token_shadow;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod user;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod cached_user_info;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use cached_user_info::CachedUserInfo;
|
@ -1,84 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{from_value, Value};
|
||||
|
||||
use proxmox_auth_api::types::Authid;
|
||||
use proxmox_product_config::{open_api_lockfile, replace_config, ApiLockGuard};
|
||||
|
||||
use crate::init::{token_shadow, token_shadow_lock};
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// ApiToken id / secret pair
|
||||
pub struct ApiTokenSecret {
|
||||
pub tokenid: Authid,
|
||||
pub secret: String,
|
||||
}
|
||||
|
||||
// Get exclusive lock
|
||||
fn lock_config() -> Result<ApiLockGuard, Error> {
|
||||
open_api_lockfile(token_shadow_lock(), None, true)
|
||||
}
|
||||
|
||||
fn read_file() -> Result<HashMap<Authid, String>, Error> {
|
||||
let json = proxmox_sys::fs::file_get_json(token_shadow(), Some(Value::Null))?;
|
||||
|
||||
if json == Value::Null {
|
||||
Ok(HashMap::new())
|
||||
} else {
|
||||
// swallow serde error which might contain sensitive data
|
||||
from_value(json)
|
||||
.map_err(|_err| format_err!("unable to parse '{}'", token_shadow().display()))
|
||||
}
|
||||
}
|
||||
|
||||
fn write_file(data: HashMap<Authid, String>) -> Result<(), Error> {
|
||||
let json = serde_json::to_vec(&data)?;
|
||||
replace_config(token_shadow(), &json)
|
||||
}
|
||||
|
||||
/// Verifies that an entry for given tokenid / API token secret exists
|
||||
pub fn verify_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let data = read_file()?;
|
||||
match data.get(tokenid) {
|
||||
Some(hashed_secret) => proxmox_sys::crypt::verify_crypt_pw(secret, hashed_secret),
|
||||
None => bail!("invalid API token"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a new entry for the given tokenid / API token secret. The secret is stored as salted hash.
|
||||
pub fn set_secret(tokenid: &Authid, secret: &str) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let _guard = lock_config()?;
|
||||
|
||||
let mut data = read_file()?;
|
||||
let hashed_secret = proxmox_sys::crypt::encrypt_pw(secret)?;
|
||||
data.insert(tokenid.clone(), hashed_secret);
|
||||
write_file(data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Deletes the entry for the given tokenid.
|
||||
pub fn delete_secret(tokenid: &Authid) -> Result<(), Error> {
|
||||
if !tokenid.is_token() {
|
||||
bail!("not an API token ID");
|
||||
}
|
||||
|
||||
let _guard = lock_config()?;
|
||||
|
||||
let mut data = read_file()?;
|
||||
data.remove(tokenid);
|
||||
write_file(data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,194 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_auth_api::types::{Authid, Userid, PROXMOX_TOKEN_ID_SCHEMA};
|
||||
use proxmox_schema::{
|
||||
api,
|
||||
api_types::{COMMENT_SCHEMA, SINGLE_LINE_COMMENT_FORMAT},
|
||||
BooleanSchema, IntegerSchema, Schema, StringSchema, Updater,
|
||||
};
|
||||
|
||||
pub const ENABLE_USER_SCHEMA: Schema = BooleanSchema::new(
|
||||
"Enable the account (default). You can set this to '0' to disable the account.",
|
||||
)
|
||||
.default(true)
|
||||
.schema();
|
||||
|
||||
pub const EXPIRE_USER_SCHEMA: Schema = IntegerSchema::new(
|
||||
"Account expiration date (seconds since epoch). '0' means no expiration date.",
|
||||
)
|
||||
.default(0)
|
||||
.minimum(0)
|
||||
.schema();
|
||||
|
||||
pub const FIRST_NAME_SCHEMA: Schema = StringSchema::new("First name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const LAST_NAME_SCHEMA: Schema = StringSchema::new("Last name.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
|
||||
.format(&SINGLE_LINE_COMMENT_FORMAT)
|
||||
.min_length(2)
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
user: {
|
||||
type: User,
|
||||
flatten: true,
|
||||
},
|
||||
tokens: {
|
||||
type: Array,
|
||||
optional: true,
|
||||
description: "List of user's API tokens.",
|
||||
items: {
|
||||
type: ApiToken
|
||||
},
|
||||
},
|
||||
"totp-locked": {
|
||||
type: bool,
|
||||
optional: true,
|
||||
default: false,
|
||||
description: "True if the user is currently locked out of TOTP factors",
|
||||
},
|
||||
"tfa-locked-until": {
|
||||
optional: true,
|
||||
description: "Contains a timestamp until when a user is locked out of 2nd factors",
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// User properties with added list of ApiTokens
|
||||
pub struct UserWithTokens {
|
||||
#[serde(flatten)]
|
||||
pub user: User,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub tokens: Vec<ApiToken>,
|
||||
#[serde(skip_serializing_if = "bool_is_false", default)]
|
||||
pub totp_locked: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tfa_locked_until: Option<i64>,
|
||||
}
|
||||
|
||||
fn bool_is_false(b: &bool) -> bool {
|
||||
!b
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
tokenid: {
|
||||
schema: PROXMOX_TOKEN_ID_SCHEMA,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Clone, PartialEq)]
|
||||
/// ApiToken properties.
|
||||
pub struct ApiToken {
|
||||
pub tokenid: Authid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
}
|
||||
|
||||
impl ApiToken {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
userid: {
|
||||
type: Userid,
|
||||
},
|
||||
comment: {
|
||||
optional: true,
|
||||
schema: COMMENT_SCHEMA,
|
||||
},
|
||||
enable: {
|
||||
optional: true,
|
||||
schema: ENABLE_USER_SCHEMA,
|
||||
},
|
||||
expire: {
|
||||
optional: true,
|
||||
schema: EXPIRE_USER_SCHEMA,
|
||||
},
|
||||
firstname: {
|
||||
optional: true,
|
||||
schema: FIRST_NAME_SCHEMA,
|
||||
},
|
||||
lastname: {
|
||||
schema: LAST_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
email: {
|
||||
schema: EMAIL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, PartialEq, Eq, Clone)]
|
||||
/// User properties.
|
||||
pub struct User {
|
||||
#[updater(skip)]
|
||||
pub userid: Userid,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub enable: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expire: Option<i64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub firstname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub lastname: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub email: Option<String>,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub fn is_active(&self) -> bool {
|
||||
if !self.enable.unwrap_or(true) {
|
||||
return false;
|
||||
}
|
||||
if let Some(expire) = self.expire {
|
||||
let now = proxmox_time::epoch_i64();
|
||||
if expire > 0 && expire <= now {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
@ -1,183 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, OnceLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use proxmox_auth_api::types::Authid;
|
||||
use proxmox_config_digest::ConfigDigest;
|
||||
use proxmox_product_config::{open_api_lockfile, replace_privileged_config, ApiLockGuard};
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
|
||||
use crate::init::{access_conf, user_config, user_config_lock};
|
||||
use crate::types::{ApiToken, User};
|
||||
|
||||
fn get_or_init_config() -> &'static SectionConfig {
|
||||
static CONFIG: OnceLock<SectionConfig> = OnceLock::new();
|
||||
CONFIG.get_or_init(|| {
|
||||
let mut config = SectionConfig::new(&Authid::API_SCHEMA);
|
||||
|
||||
let user_schema = match User::API_SCHEMA {
|
||||
Schema::Object(ref user_schema) => user_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let user_plugin =
|
||||
SectionConfigPlugin::new("user".to_string(), Some("userid".to_string()), user_schema);
|
||||
config.register_plugin(user_plugin);
|
||||
|
||||
let token_schema = match ApiToken::API_SCHEMA {
|
||||
Schema::Object(ref token_schema) => token_schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let token_plugin = SectionConfigPlugin::new(
|
||||
"token".to_string(),
|
||||
Some("tokenid".to_string()),
|
||||
token_schema,
|
||||
);
|
||||
config.register_plugin(token_plugin);
|
||||
|
||||
config
|
||||
})
|
||||
}
|
||||
|
||||
/// Get exclusive lock
|
||||
pub fn lock_config() -> Result<ApiLockGuard, Error> {
|
||||
open_api_lockfile(user_config_lock(), None, true)
|
||||
}
|
||||
|
||||
pub fn config() -> Result<(SectionConfigData, ConfigDigest), Error> {
|
||||
let content = proxmox_sys::fs::file_read_optional_string(user_config())?.unwrap_or_default();
|
||||
|
||||
let digest = ConfigDigest::from_slice(content.as_bytes());
|
||||
let mut data = get_or_init_config().parse(user_config(), &content)?;
|
||||
|
||||
access_conf().init_user_config(&mut data)?;
|
||||
|
||||
Ok((data, digest))
|
||||
}
|
||||
|
||||
pub fn cached_config() -> Result<Arc<SectionConfigData>, Error> {
|
||||
struct ConfigCache {
|
||||
data: Option<Arc<SectionConfigData>>,
|
||||
last_mtime: i64,
|
||||
last_mtime_nsec: i64,
|
||||
}
|
||||
|
||||
static CACHED_CONFIG: OnceLock<RwLock<ConfigCache>> = OnceLock::new();
|
||||
let cached_config = CACHED_CONFIG.get_or_init(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_mtime: 0,
|
||||
last_mtime_nsec: 0,
|
||||
})
|
||||
});
|
||||
|
||||
let stat = match nix::sys::stat::stat(&user_config()) {
|
||||
Ok(stat) => Some(stat),
|
||||
Err(nix::errno::Errno::ENOENT) => None,
|
||||
Err(err) => bail!("unable to stat '{}' - {err}", user_config().display()),
|
||||
};
|
||||
|
||||
{
|
||||
// limit scope
|
||||
let cache = cached_config.read().unwrap();
|
||||
if let Some(ref config) = cache.data {
|
||||
if let Some(stat) = stat {
|
||||
if stat.st_mtime == cache.last_mtime && stat.st_mtime_nsec == cache.last_mtime_nsec
|
||||
{
|
||||
return Ok(config.clone());
|
||||
}
|
||||
} else if cache.last_mtime == 0 && cache.last_mtime_nsec == 0 {
|
||||
return Ok(config.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let (config, _digest) = config()?;
|
||||
let config = Arc::new(config);
|
||||
|
||||
let mut cache = cached_config.write().unwrap();
|
||||
if let Some(stat) = stat {
|
||||
cache.last_mtime = stat.st_mtime;
|
||||
cache.last_mtime_nsec = stat.st_mtime_nsec;
|
||||
}
|
||||
cache.data = Some(config.clone());
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
let config_file = user_config();
|
||||
let raw = get_or_init_config().write(&config_file, config)?;
|
||||
replace_privileged_config(config_file, raw.as_bytes())?;
|
||||
|
||||
// increase cache generation so we reload it next time we access it
|
||||
access_conf().increment_cache_generation()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Only exposed for testing
|
||||
#[doc(hidden)]
|
||||
pub fn test_cfg_from_str(raw: &str) -> Result<(SectionConfigData, [u8; 32]), Error> {
|
||||
let cfg = get_or_init_config();
|
||||
let parsed = cfg.parse("test_user_cfg", raw)?;
|
||||
|
||||
Ok((parsed, [0; 32]))
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_userid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data
|
||||
.sections
|
||||
.iter()
|
||||
.filter_map(|(id, (section_type, _))| {
|
||||
if section_type == "user" {
|
||||
Some(id.to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
Err(_) => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_authid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
match config() {
|
||||
Ok((data, _digest)) => data.sections.keys().map(|id| id.to_string()).collect(),
|
||||
Err(_) => vec![],
|
||||
}
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
pub fn complete_token_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
|
||||
let data = match config() {
|
||||
Ok((data, _digest)) => data,
|
||||
Err(_) => return Vec::new(),
|
||||
};
|
||||
|
||||
match param.get("userid") {
|
||||
Some(userid) => {
|
||||
let user = data.lookup::<User>("user", userid);
|
||||
let tokens = data.convert_to_typed_array("token");
|
||||
match (user, tokens) {
|
||||
(Ok(_), Ok(tokens)) => tokens
|
||||
.into_iter()
|
||||
.filter_map(|token: ApiToken| {
|
||||
let tokenid = token.tokenid;
|
||||
if tokenid.is_token() && tokenid.user() == userid {
|
||||
Some(tokenid.tokenname().unwrap().as_str().to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
_ => vec![],
|
||||
}
|
||||
}
|
||||
None => vec![],
|
||||
}
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
[package]
|
||||
name = "proxmox-acme-api"
|
||||
description = "ACME API implementation"
|
||||
version = "0.1.7"
|
||||
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
exclude.workspace = true
|
||||
homepage.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
base64 = { workspace = true, optional = true }
|
||||
futures = { workspace = true, optional = true }
|
||||
hex = { workspace = true, optional = true }
|
||||
http = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, optional = true, features = ["fs"] }
|
||||
|
||||
foreign-types = { workspace = true, optional = true }
|
||||
libc = { workspace = true, optional = true }
|
||||
openssl = { workspace = true, optional = true }
|
||||
|
||||
proxmox-acme = { workspace = true, features = ["api-types"] }
|
||||
proxmox-config-digest = { workspace = true, optional = true }
|
||||
proxmox-log = { workspace = true, optional = true }
|
||||
proxmox-product-config = { workspace = true, optional = true }
|
||||
proxmox-rest-server = { workspace = true, optional = true }
|
||||
proxmox-router = { workspace = true, optional = true }
|
||||
proxmox-schema = { workspace = true, features = ["api-macro", "api-types"] }
|
||||
proxmox-section-config = { workspace = true, optional = true }
|
||||
proxmox-serde.workspace = true
|
||||
proxmox-sys = { workspace = true, optional = true }
|
||||
proxmox-time = { workspace = true, optional = true }
|
||||
proxmox-uuid = { workspace = true, optional = true }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
impl = [
|
||||
"dep:base64",
|
||||
"dep:foreign-types",
|
||||
"dep:futures",
|
||||
"dep:hex",
|
||||
"dep:http",
|
||||
"dep:hyper",
|
||||
"dep:libc",
|
||||
"dep:openssl",
|
||||
"dep:tokio",
|
||||
|
||||
"dep:proxmox-config-digest",
|
||||
"dep:proxmox-log",
|
||||
"dep:proxmox-product-config",
|
||||
"dep:proxmox-rest-server",
|
||||
"dep:proxmox-router",
|
||||
"dep:proxmox-section-config",
|
||||
"dep:proxmox-sys",
|
||||
"dep:proxmox-time",
|
||||
"dep:proxmox-uuid",
|
||||
|
||||
"proxmox-acme/async-client",
|
||||
"proxmox-acme/impl",
|
||||
"proxmox-config-digest?/openssl",
|
||||
]
|
@ -1,59 +0,0 @@
|
||||
rust-proxmox-acme-api (0.1.7-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-schema 4.0
|
||||
|
||||
* use inner mutability for ACME_ACME_CONFIG
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 15 Jan 2025 12:45:05 +0100
|
||||
|
||||
rust-proxmox-acme-api (0.1.6-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with new rest-server and router dependencies
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Sep 2024 15:42:27 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.5-1) bookworm; urgency=medium
|
||||
|
||||
* replace lazy_static with std's LazyLock and drop the dependency
|
||||
|
||||
* remove unused dependencies
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Aug 2024 11:36:01 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.4-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-log 0.2 and proxmox-rest-server 0.7
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Jul 2024 14:33:06 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.3-1) bookworm; urgency=medium
|
||||
|
||||
* adapt to tracing log infrastructure
|
||||
|
||||
* various clippy fixes
|
||||
|
||||
* upgrade proxmox-sys to 6.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 11 Jul 2024 14:50:02 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.2-1) bookworm; urgency=medium
|
||||
|
||||
* upgrade proxmox-time to 2.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 20 Jun 2024 13:57:36 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.1-1) bookworm; urgency=medium
|
||||
|
||||
* add function to create certificate revocation
|
||||
|
||||
* add function to create self signed certificates
|
||||
|
||||
* add function to get info from PEM formatted certificates
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Jun 2024 12:06:49 +0200
|
||||
|
||||
rust-proxmox-acme-api (0.1.0-1) bookworm; urgency=medium
|
||||
|
||||
* initial package
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 05 Jun 2024 12:12:42 +0200
|
@ -1,90 +0,0 @@
|
||||
Source: rust-proxmox-acme-api
|
||||
Section: rust
|
||||
Priority: optional
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
dh-sequence-cargo,
|
||||
cargo:native <!nocheck>,
|
||||
rustc:native (>= 1.80) <!nocheck>,
|
||||
libstd-rust-dev <!nocheck>,
|
||||
librust-anyhow-1+default-dev <!nocheck>,
|
||||
librust-proxmox-acme-0.5+api-types-dev (>= 0.5.3-~~) <!nocheck>,
|
||||
librust-proxmox-schema-4+api-macro-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+api-types-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+default-dev <!nocheck>,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~) <!nocheck>,
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~) <!nocheck>,
|
||||
librust-serde-1+default-dev <!nocheck>,
|
||||
librust-serde-1+derive-dev <!nocheck>,
|
||||
librust-serde-json-1+default-dev <!nocheck>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.7.0
|
||||
Vcs-Git:
|
||||
Vcs-Browser:
|
||||
Homepage: https://proxmox.com
|
||||
X-Cargo-Crate: proxmox-acme-api
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: librust-proxmox-acme-api-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-proxmox-acme-0.5+api-types-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-schema-4+api-macro-dev,
|
||||
librust-proxmox-schema-4+api-types-dev,
|
||||
librust-proxmox-schema-4+default-dev,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev
|
||||
Suggests:
|
||||
librust-proxmox-acme-api+impl-dev (= ${binary:Version})
|
||||
Provides:
|
||||
librust-proxmox-acme-api+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1.7-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1.7+default-dev (= ${binary:Version})
|
||||
Description: ACME API implementation - Rust source code
|
||||
Source code for Debianized Rust crate "proxmox-acme-api"
|
||||
|
||||
Package: librust-proxmox-acme-api+impl-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-acme-api-dev (= ${binary:Version}),
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-foreign-types-0.3+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-hex-0.4+default-dev,
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
|
||||
librust-libc-0.2+default-dev (>= 0.2.107-~~),
|
||||
librust-openssl-0.10+default-dev,
|
||||
librust-proxmox-acme-0.5+api-types-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-acme-0.5+async-client-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-acme-0.5+impl-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-config-digest-0.1+default-dev,
|
||||
librust-proxmox-config-digest-0.1+openssl-dev,
|
||||
librust-proxmox-log-0.2+default-dev (>= 0.2.5-~~),
|
||||
librust-proxmox-product-config-0.2+default-dev,
|
||||
librust-proxmox-rest-server-0.8+default-dev,
|
||||
librust-proxmox-router-3+default-dev,
|
||||
librust-proxmox-section-config-2+default-dev (>= 2.1.0-~~),
|
||||
librust-proxmox-sys-0.6+default-dev (>= 0.6.5-~~),
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-proxmox-uuid-1+default-dev (>= 1.0.1-~~),
|
||||
librust-tokio-1+default-dev (>= 1.6-~~),
|
||||
librust-tokio-1+fs-dev (>= 1.6-~~)
|
||||
Provides:
|
||||
librust-proxmox-acme-api-0+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-api-0.1.7+impl-dev (= ${binary:Version})
|
||||
Description: ACME API implementation - feature "impl"
|
||||
This metapackage enables feature "impl" for the Rust proxmox-acme-api crate, by
|
||||
pulling in any additional dependencies needed by that feature.
|
@ -1,16 +0,0 @@
|
||||
Copyright (C) 2020-2024 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
@ -1,8 +0,0 @@
|
||||
overlay = "."
|
||||
crate_src_path = ".."
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
|
||||
[source]
|
||||
# TODO: update once public
|
||||
vcs_git = ""
|
||||
vcs_browser = ""
|
@ -1,118 +0,0 @@
|
||||
//! ACME account configuration API implementation
|
||||
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde_json::json;
|
||||
|
||||
use proxmox_acme::async_client::AcmeClient;
|
||||
use proxmox_acme::types::AccountData as AcmeAccountData;
|
||||
use proxmox_log::warn;
|
||||
|
||||
use crate::account_config::AccountData;
|
||||
use crate::config::DEFAULT_ACME_DIRECTORY_ENTRY;
|
||||
use crate::types::{AccountEntry, AccountInfo, AcmeAccountName};
|
||||
|
||||
fn account_contact_from_string(s: &str) -> Vec<String> {
|
||||
s.split(&[' ', ';', ',', '\0'][..])
|
||||
.map(|s| format!("mailto:{}", s))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn list_accounts() -> Result<Vec<AccountEntry>, Error> {
|
||||
let mut entries = Vec::new();
|
||||
super::account_config::foreach_acme_account(|name| {
|
||||
entries.push(AccountEntry { name });
|
||||
ControlFlow::Continue(())
|
||||
})?;
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
pub async fn get_account(account_name: AcmeAccountName) -> Result<AccountInfo, Error> {
|
||||
let account_data = super::account_config::load_account_config(&account_name).await?;
|
||||
Ok(AccountInfo {
|
||||
location: account_data.location.clone(),
|
||||
tos: account_data.tos.clone(),
|
||||
directory: account_data.directory_url.clone(),
|
||||
account: AcmeAccountData {
|
||||
only_return_existing: false, // don't actually write this out in case it's set
|
||||
..account_data.account.clone()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_tos(directory: Option<String>) -> Result<Option<String>, Error> {
|
||||
let directory = directory.unwrap_or_else(|| DEFAULT_ACME_DIRECTORY_ENTRY.url.to_string());
|
||||
Ok(AcmeClient::new(directory)
|
||||
.terms_of_service_url()
|
||||
.await?
|
||||
.map(str::to_owned))
|
||||
}
|
||||
|
||||
pub async fn register_account(
|
||||
name: &AcmeAccountName,
|
||||
contact: String,
|
||||
tos_url: Option<String>,
|
||||
directory_url: Option<String>,
|
||||
eab_creds: Option<(String, String)>,
|
||||
) -> Result<String, Error> {
|
||||
let directory_url =
|
||||
directory_url.unwrap_or_else(|| DEFAULT_ACME_DIRECTORY_ENTRY.url.to_string());
|
||||
|
||||
let mut client = AcmeClient::new(directory_url.clone());
|
||||
|
||||
let contact = account_contact_from_string(&contact);
|
||||
let account = client
|
||||
.new_account(tos_url.is_some(), contact, None, eab_creds)
|
||||
.await?;
|
||||
|
||||
let account = AccountData::from_account_dir_tos(account, directory_url, tos_url);
|
||||
|
||||
super::account_config::create_account_config(name, &account)?;
|
||||
|
||||
Ok(account.location)
|
||||
}
|
||||
|
||||
pub async fn deactivate_account(name: &AcmeAccountName, force: bool) -> Result<(), Error> {
|
||||
let mut account_data = super::account_config::load_account_config(name).await?;
|
||||
let mut client = account_data.client();
|
||||
|
||||
match client
|
||||
.update_account(&json!({"status": "deactivated"}))
|
||||
.await
|
||||
{
|
||||
Ok(account) => {
|
||||
account_data.account = account.data.clone();
|
||||
super::account_config::save_account_config(name, &account_data)?;
|
||||
}
|
||||
Err(err) if !force => return Err(err),
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"error deactivating account {}, proceedeing anyway - {}",
|
||||
name, err,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
super::account_config::mark_account_deactivated(name)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_account(name: &AcmeAccountName, contact: Option<String>) -> Result<(), Error> {
|
||||
let mut account_data = super::account_config::load_account_config(name).await?;
|
||||
let mut client = account_data.client();
|
||||
|
||||
let data = match contact {
|
||||
Some(contact) => json!({
|
||||
"contact": account_contact_from_string(&contact),
|
||||
}),
|
||||
None => json!({}),
|
||||
};
|
||||
|
||||
let account = client.update_account(&data).await?;
|
||||
account_data.account = account.data.clone();
|
||||
super::account_config::save_account_config(name, &account_data)?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,212 +0,0 @@
|
||||
//! ACME account configuration helpers (load/save config)
|
||||
|
||||
use std::fs::OpenOptions;
|
||||
use std::ops::ControlFlow;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_product_config::replace_secret_config;
|
||||
use proxmox_sys::error::SysError;
|
||||
|
||||
use proxmox_schema::api_types::SAFE_ID_REGEX;
|
||||
|
||||
use proxmox_acme::async_client::AcmeClient;
|
||||
use proxmox_acme::types::AccountData as AcmeAccountData;
|
||||
use proxmox_acme::Account;
|
||||
|
||||
use crate::acme_account_dir;
|
||||
use crate::types::AcmeAccountName;
|
||||
|
||||
#[inline]
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!*b
|
||||
}
|
||||
|
||||
// Our on-disk format inherited from PVE's proxmox-acme code.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AccountData {
|
||||
/// The account's location URL.
|
||||
pub location: String,
|
||||
|
||||
/// The account data.
|
||||
pub account: AcmeAccountData,
|
||||
|
||||
/// The private key as PEM formatted string.
|
||||
pub key: String,
|
||||
|
||||
/// ToS URL the user agreed to.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tos: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "is_false", default)]
|
||||
pub debug: bool,
|
||||
|
||||
/// The directory's URL.
|
||||
pub directory_url: String,
|
||||
}
|
||||
|
||||
impl AccountData {
|
||||
pub fn from_account_dir_tos(
|
||||
account: &Account,
|
||||
directory_url: String,
|
||||
tos: Option<String>,
|
||||
) -> Self {
|
||||
AccountData {
|
||||
location: account.location.clone(),
|
||||
key: account.private_key.clone(),
|
||||
account: AcmeAccountData {
|
||||
only_return_existing: false, // don't actually write this out in case it's set
|
||||
..account.data.clone()
|
||||
},
|
||||
debug: false,
|
||||
tos,
|
||||
directory_url,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn client(&self) -> AcmeClient {
|
||||
let mut client = AcmeClient::new(self.directory_url.clone());
|
||||
client.set_account(Account {
|
||||
location: self.location.clone(),
|
||||
private_key: self.key.clone(),
|
||||
data: self.account.clone(),
|
||||
});
|
||||
client
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the path to the account configuration file (`$config_dir/accounts/$name`).
|
||||
pub fn account_config_filename(name: &str) -> PathBuf {
|
||||
acme_account_dir().join(name)
|
||||
}
|
||||
|
||||
pub(crate) fn foreach_acme_account<F>(mut func: F) -> Result<(), Error>
|
||||
where
|
||||
F: FnMut(AcmeAccountName) -> ControlFlow<Result<(), Error>>,
|
||||
{
|
||||
match proxmox_sys::fs::scan_subdir(-1, acme_account_dir(), &SAFE_ID_REGEX) {
|
||||
Ok(files) => {
|
||||
for file in files {
|
||||
let file = file?;
|
||||
let file_name = unsafe { file.file_name_utf8_unchecked() };
|
||||
|
||||
if file_name.starts_with('_') {
|
||||
continue;
|
||||
}
|
||||
|
||||
let account_name = match AcmeAccountName::from_string(file_name.to_owned()) {
|
||||
Ok(account_name) => account_name,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if let ControlFlow::Break(result) = func(account_name) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(err) if err.not_found() => Ok(()),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
}
|
||||
|
||||
// Mark account as deactivated
|
||||
pub(crate) fn mark_account_deactivated(account_name: &str) -> Result<(), Error> {
|
||||
let from = account_config_filename(account_name);
|
||||
for i in 0..100 {
|
||||
let to = account_config_filename(&format!("_deactivated_{}_{}", account_name, i));
|
||||
if !Path::new(&to).exists() {
|
||||
return std::fs::rename(&from, &to).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to move account path {:?} to {:?} - {}",
|
||||
from,
|
||||
to,
|
||||
err
|
||||
)
|
||||
});
|
||||
}
|
||||
}
|
||||
bail!(
|
||||
"No free slot to rename deactivated account {:?}, please cleanup {:?}",
|
||||
from,
|
||||
acme_account_dir()
|
||||
);
|
||||
}
|
||||
|
||||
// Load an existing ACME account by name.
|
||||
pub(crate) async fn load_account_config(account_name: &str) -> Result<AccountData, Error> {
|
||||
let account_config_filename = account_config_filename(account_name);
|
||||
let data = match tokio::fs::read(&account_config_filename).await {
|
||||
Ok(data) => data,
|
||||
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
|
||||
bail!("acme account '{}' does not exist", account_name)
|
||||
}
|
||||
Err(err) => bail!(
|
||||
"failed to load acme account from {:?} - {}",
|
||||
account_config_filename,
|
||||
err
|
||||
),
|
||||
};
|
||||
let data: AccountData = serde_json::from_slice(&data).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to parse acme account from {:?} - {}",
|
||||
account_config_filename,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
// Save an new ACME account (fails if the file already exists).
|
||||
pub(crate) fn create_account_config(
|
||||
account_name: &AcmeAccountName,
|
||||
account: &AccountData,
|
||||
) -> Result<(), Error> {
|
||||
let account_config_filename = account_config_filename(account_name.as_ref());
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.mode(0o600)
|
||||
.open(&account_config_filename)
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"failed to open {:?} for writing: {}",
|
||||
account_config_filename,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
serde_json::to_writer_pretty(file, account).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to write acme account to {:?}: {}",
|
||||
account_config_filename,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Save ACME account data (overtwrite existing data).
|
||||
pub(crate) fn save_account_config(
|
||||
account_name: &AcmeAccountName,
|
||||
account: &AccountData,
|
||||
) -> Result<(), Error> {
|
||||
let account_config_filename = account_config_filename(account_name.as_ref());
|
||||
|
||||
let mut data = Vec::<u8>::new();
|
||||
serde_json::to_writer_pretty(&mut data, account).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to serialize acme account to {:?}: {}",
|
||||
account_config_filename,
|
||||
err
|
||||
)
|
||||
})?;
|
||||
|
||||
replace_secret_config(account_config_filename, &data)
|
||||
}
|
@ -1,315 +0,0 @@
|
||||
//! Plugin type definitions.
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::process::Stdio;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use hyper::{Body, Request, Response};
|
||||
use tokio::io::{AsyncBufReadExt, AsyncRead, AsyncWriteExt, BufReader};
|
||||
use tokio::process::Command;
|
||||
|
||||
use proxmox_acme::async_client::AcmeClient;
|
||||
use proxmox_acme::{Authorization, Challenge};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
|
||||
use crate::plugin_config::PluginData;
|
||||
use crate::types::{AcmeDomain, DnsPlugin};
|
||||
|
||||
const PROXMOX_ACME_SH_PATH: &str = "/usr/share/proxmox-acme/proxmox-acme";
|
||||
|
||||
pub(crate) fn get_acme_plugin(
|
||||
plugin_data: &PluginData,
|
||||
name: &str,
|
||||
) -> Result<Option<Box<dyn AcmePlugin + Send + Sync + 'static>>, Error> {
|
||||
let (ty, data) = match plugin_data.get(name) {
|
||||
Some(plugin) => plugin,
|
||||
None => return Ok(None),
|
||||
};
|
||||
|
||||
Ok(Some(match ty.as_str() {
|
||||
"dns" => {
|
||||
let plugin: DnsPlugin = serde::Deserialize::deserialize(data)?;
|
||||
Box::new(plugin)
|
||||
}
|
||||
"standalone" => {
|
||||
// this one has no config
|
||||
Box::<StandaloneServer>::default()
|
||||
}
|
||||
other => bail!("missing implementation for plugin type '{}'", other),
|
||||
}))
|
||||
}
|
||||
|
||||
pub(crate) trait AcmePlugin {
|
||||
/// Setup everything required to trigger the validation and return the corresponding validation
|
||||
/// URL.
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>>;
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>>;
|
||||
}
|
||||
|
||||
fn extract_challenge<'a>(
|
||||
authorization: &'a Authorization,
|
||||
ty: &str,
|
||||
) -> Result<&'a Challenge, Error> {
|
||||
authorization
|
||||
.challenges
|
||||
.iter()
|
||||
.find(|ch| ch.ty == ty)
|
||||
.ok_or_else(|| format_err!("no supported challenge type ({}) found", ty))
|
||||
}
|
||||
|
||||
async fn pipe_to_tasklog<T: AsyncRead + Unpin>(
|
||||
pipe: T,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut pipe = BufReader::new(pipe);
|
||||
let mut line = String::new();
|
||||
loop {
|
||||
line.clear();
|
||||
match pipe.read_line(&mut line).await {
|
||||
Ok(0) => return Ok(()),
|
||||
Ok(_) => task.log_message(line.as_str()),
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsPlugin {
|
||||
async fn action<'a>(
|
||||
&self,
|
||||
client: &mut AcmeClient,
|
||||
authorization: &'a Authorization,
|
||||
domain: &AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
action: &str,
|
||||
) -> Result<&'a str, Error> {
|
||||
let challenge = extract_challenge(authorization, "dns-01")?;
|
||||
let mut stdin_data = client
|
||||
.dns_01_txt_value(
|
||||
challenge
|
||||
.token()
|
||||
.ok_or_else(|| format_err!("missing token in challenge"))?,
|
||||
)?
|
||||
.into_bytes();
|
||||
stdin_data.push(b'\n');
|
||||
stdin_data.extend(self.data.as_bytes());
|
||||
if stdin_data.last() != Some(&b'\n') {
|
||||
stdin_data.push(b'\n');
|
||||
}
|
||||
|
||||
let mut command = Command::new("/usr/bin/setpriv");
|
||||
|
||||
#[rustfmt::skip]
|
||||
command.args([
|
||||
"--reuid", "nobody",
|
||||
"--regid", "nogroup",
|
||||
"--clear-groups",
|
||||
"--reset-env",
|
||||
"--",
|
||||
"/bin/bash",
|
||||
PROXMOX_ACME_SH_PATH,
|
||||
action,
|
||||
&self.core.api,
|
||||
domain.alias.as_deref().unwrap_or(&domain.domain),
|
||||
]);
|
||||
|
||||
// We could use 1 socketpair, but tokio wraps them all in `File` internally causing `close`
|
||||
// to be called separately on all of them without exception, so we need 3 pipes :-(
|
||||
|
||||
let mut child = command
|
||||
.stdin(Stdio::piped())
|
||||
.stdout(Stdio::piped())
|
||||
.stderr(Stdio::piped())
|
||||
.spawn()?;
|
||||
|
||||
let mut stdin = child.stdin.take().expect("Stdio::piped()");
|
||||
let stdout = child.stdout.take().expect("Stdio::piped() failed?");
|
||||
let stdout = pipe_to_tasklog(stdout, Arc::clone(&task));
|
||||
let stderr = child.stderr.take().expect("Stdio::piped() failed?");
|
||||
let stderr = pipe_to_tasklog(stderr, Arc::clone(&task));
|
||||
let stdin = async move {
|
||||
stdin.write_all(&stdin_data).await?;
|
||||
stdin.flush().await?;
|
||||
Ok::<_, std::io::Error>(())
|
||||
};
|
||||
match futures::try_join!(stdin, stdout, stderr) {
|
||||
Ok(((), (), ())) => (),
|
||||
Err(err) => {
|
||||
if let Err(err) = child.kill().await {
|
||||
task.log_message(format!(
|
||||
"failed to kill '{} {}' command: {}",
|
||||
PROXMOX_ACME_SH_PATH, action, err
|
||||
));
|
||||
}
|
||||
bail!("'{}' failed: {}", PROXMOX_ACME_SH_PATH, err);
|
||||
}
|
||||
}
|
||||
|
||||
let status = child.wait().await?;
|
||||
if !status.success() {
|
||||
bail!(
|
||||
"'{} {}' exited with error ({})",
|
||||
PROXMOX_ACME_SH_PATH,
|
||||
action,
|
||||
status.code().unwrap_or(-1)
|
||||
);
|
||||
}
|
||||
|
||||
Ok(&challenge.url)
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmePlugin for DnsPlugin {
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||
Box::pin(async move {
|
||||
let result = self
|
||||
.action(client, authorization, domain, task.clone(), "setup")
|
||||
.await;
|
||||
|
||||
let validation_delay = self.core.validation_delay.unwrap_or(30) as u64;
|
||||
if validation_delay > 0 {
|
||||
task.log_message(format!(
|
||||
"Sleeping {} seconds to wait for TXT record propagation",
|
||||
validation_delay
|
||||
));
|
||||
tokio::time::sleep(Duration::from_secs(validation_delay)).await;
|
||||
}
|
||||
result
|
||||
})
|
||||
}
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
domain: &'d AcmeDomain,
|
||||
task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||
Box::pin(async move {
|
||||
self.action(client, authorization, domain, task, "teardown")
|
||||
.await
|
||||
.map(drop)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct StandaloneServer {
|
||||
abort_handle: Option<futures::future::AbortHandle>,
|
||||
}
|
||||
|
||||
// In case the "order_certificates" future gets dropped between setup & teardown, let's also cancel
|
||||
// the HTTP listener on Drop:
|
||||
impl Drop for StandaloneServer {
|
||||
fn drop(&mut self) {
|
||||
self.stop();
|
||||
}
|
||||
}
|
||||
|
||||
impl StandaloneServer {
|
||||
fn stop(&mut self) {
|
||||
if let Some(abort) = self.abort_handle.take() {
|
||||
abort.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn standalone_respond(
|
||||
req: Request<Body>,
|
||||
path: Arc<String>,
|
||||
key_auth: Arc<String>,
|
||||
) -> Result<Response<Body>, hyper::Error> {
|
||||
if req.method() == hyper::Method::GET && req.uri().path() == path.as_str() {
|
||||
Ok(Response::builder()
|
||||
.status(http::StatusCode::OK)
|
||||
.body(key_auth.as_bytes().to_vec().into())
|
||||
.unwrap())
|
||||
} else {
|
||||
Ok(Response::builder()
|
||||
.status(http::StatusCode::NOT_FOUND)
|
||||
.body("Not found.".into())
|
||||
.unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmePlugin for StandaloneServer {
|
||||
fn setup<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
client: &'b mut AcmeClient,
|
||||
authorization: &'c Authorization,
|
||||
_domain: &'d AcmeDomain,
|
||||
_task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<&'c str, Error>> + Send + 'fut>> {
|
||||
use hyper::server::conn::AddrIncoming;
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
|
||||
Box::pin(async move {
|
||||
self.stop();
|
||||
|
||||
let challenge = extract_challenge(authorization, "http-01")?;
|
||||
let token = challenge
|
||||
.token()
|
||||
.ok_or_else(|| format_err!("missing token in challenge"))?;
|
||||
let key_auth = Arc::new(client.key_authorization(token)?);
|
||||
let path = Arc::new(format!("/.well-known/acme-challenge/{}", token));
|
||||
|
||||
let service = make_service_fn(move |_| {
|
||||
let path = Arc::clone(&path);
|
||||
let key_auth = Arc::clone(&key_auth);
|
||||
async move {
|
||||
Ok::<_, hyper::Error>(service_fn(move |request| {
|
||||
standalone_respond(request, Arc::clone(&path), Arc::clone(&key_auth))
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
// `[::]:80` first, then `*:80`
|
||||
let incoming = AddrIncoming::bind(&(([0u16; 8], 80).into()))
|
||||
.or_else(|_| AddrIncoming::bind(&(([0u8; 4], 80).into())))?;
|
||||
|
||||
let server = hyper::Server::builder(incoming).serve(service);
|
||||
|
||||
let (future, abort) = futures::future::abortable(server);
|
||||
self.abort_handle = Some(abort);
|
||||
tokio::spawn(future);
|
||||
|
||||
Ok(challenge.url.as_str())
|
||||
})
|
||||
}
|
||||
|
||||
fn teardown<'fut, 'a: 'fut, 'b: 'fut, 'c: 'fut, 'd: 'fut>(
|
||||
&'a mut self,
|
||||
_client: &'b mut AcmeClient,
|
||||
_authorization: &'c Authorization,
|
||||
_domain: &'d AcmeDomain,
|
||||
_task: Arc<WorkerTask>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Error>> + Send + 'fut>> {
|
||||
Box::pin(async move {
|
||||
if let Some(abort) = self.abort_handle.take() {
|
||||
abort.abort();
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
@ -1,400 +0,0 @@
|
||||
use std::mem::MaybeUninit;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use foreign_types::ForeignTypeRef;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use openssl::rsa::Rsa;
|
||||
use openssl::x509::{X509Builder, X509};
|
||||
|
||||
use proxmox_acme::async_client::AcmeClient;
|
||||
use proxmox_log::{info, warn};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
|
||||
use crate::types::{AcmeConfig, AcmeDomain};
|
||||
use crate::CertificateInfo;
|
||||
|
||||
pub async fn revoke_certificate(acme_config: &AcmeConfig, certificate: &[u8]) -> Result<(), Error> {
|
||||
let mut acme = super::account_config::load_account_config(&acme_config.account)
|
||||
.await?
|
||||
.client();
|
||||
|
||||
acme.revoke_certificate(certificate, None).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct OrderedCertificate {
|
||||
pub certificate: Vec<u8>,
|
||||
pub private_key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
pub async fn order_certificate(
|
||||
worker: Arc<WorkerTask>,
|
||||
acme_config: &AcmeConfig,
|
||||
domains: &[AcmeDomain],
|
||||
) -> Result<Option<OrderedCertificate>, Error> {
|
||||
use proxmox_acme::authorization::Status;
|
||||
use proxmox_acme::order::Identifier;
|
||||
|
||||
let get_domain_config = |domain: &str| {
|
||||
domains
|
||||
.iter()
|
||||
.find(|d| d.domain == domain)
|
||||
.ok_or_else(|| format_err!("no config for domain '{}'", domain))
|
||||
};
|
||||
|
||||
if domains.is_empty() {
|
||||
info!("No domains configured to be ordered from an ACME server.");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let mut acme = super::account_config::load_account_config(&acme_config.account)
|
||||
.await?
|
||||
.client();
|
||||
|
||||
let (plugins, _) = super::plugin_config::plugin_config()?;
|
||||
|
||||
info!("Placing ACME order");
|
||||
|
||||
let order = acme
|
||||
.new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase()))
|
||||
.await?;
|
||||
|
||||
info!("Order URL: {}", order.location);
|
||||
|
||||
let identifiers: Vec<String> = order
|
||||
.data
|
||||
.identifiers
|
||||
.iter()
|
||||
.map(|identifier| match identifier {
|
||||
Identifier::Dns(domain) => domain.clone(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
for auth_url in &order.data.authorizations {
|
||||
info!("Getting authorization details from '{}'", auth_url);
|
||||
let mut auth = acme.get_authorization(auth_url).await?;
|
||||
|
||||
let domain = match &mut auth.identifier {
|
||||
Identifier::Dns(domain) => domain.to_ascii_lowercase(),
|
||||
};
|
||||
|
||||
if auth.status == Status::Valid {
|
||||
info!("{} is already validated!", domain);
|
||||
continue;
|
||||
}
|
||||
|
||||
info!("The validation for {} is pending", domain);
|
||||
let domain_config: &AcmeDomain = get_domain_config(&domain)?;
|
||||
let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone");
|
||||
let mut plugin_cfg =
|
||||
crate::acme_plugin::get_acme_plugin(&plugins, plugin_id)?.ok_or_else(|| {
|
||||
format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain)
|
||||
})?;
|
||||
|
||||
info!("Setting up validation plugin");
|
||||
let validation_url = plugin_cfg
|
||||
.setup(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||
.await?;
|
||||
|
||||
let result = request_validation(&mut acme, auth_url, validation_url).await;
|
||||
|
||||
if let Err(err) = plugin_cfg
|
||||
.teardown(&mut acme, &auth, domain_config, Arc::clone(&worker))
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to teardown plugin '{}' for domain '{}' - {}",
|
||||
plugin_id, domain, err
|
||||
);
|
||||
}
|
||||
|
||||
result?;
|
||||
}
|
||||
|
||||
info!("All domains validated");
|
||||
info!("Creating CSR");
|
||||
|
||||
let csr = proxmox_acme::util::Csr::generate(&identifiers, &Default::default())?;
|
||||
let mut finalize_error_cnt = 0u8;
|
||||
let order_url = &order.location;
|
||||
let mut order;
|
||||
loop {
|
||||
use proxmox_acme::order::Status;
|
||||
|
||||
order = acme.get_order(order_url).await?;
|
||||
|
||||
match order.status {
|
||||
Status::Pending => {
|
||||
info!("still pending, trying to finalize anyway");
|
||||
let finalize = order
|
||||
.finalize
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||
if let Err(err) = acme.finalize(finalize, &csr.data).await {
|
||||
if finalize_error_cnt >= 5 {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
finalize_error_cnt += 1;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
Status::Ready => {
|
||||
info!("order is ready, finalizing");
|
||||
let finalize = order
|
||||
.finalize
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||
acme.finalize(finalize, &csr.data).await?;
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
}
|
||||
Status::Processing => {
|
||||
info!("still processing, trying again in 30 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
Status::Valid => {
|
||||
info!("valid");
|
||||
break;
|
||||
}
|
||||
other => bail!("order status: {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
info!("Downloading certificate");
|
||||
let certificate = acme
|
||||
.get_certificate(
|
||||
order
|
||||
.certificate
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("missing certificate url in finalized order"))?,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(Some(OrderedCertificate {
|
||||
certificate: certificate.to_vec(),
|
||||
private_key_pem: csr.private_key_pem,
|
||||
}))
|
||||
}
|
||||
|
||||
async fn request_validation(
|
||||
acme: &mut AcmeClient,
|
||||
auth_url: &str,
|
||||
validation_url: &str,
|
||||
) -> Result<(), Error> {
|
||||
info!("Triggering validation");
|
||||
acme.request_challenge_validation(validation_url).await?;
|
||||
|
||||
info!("Sleeping for 5 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
loop {
|
||||
use proxmox_acme::authorization::Status;
|
||||
|
||||
let auth = acme.get_authorization(auth_url).await?;
|
||||
match auth.status {
|
||||
Status::Pending => {
|
||||
info!("Status is still 'pending', trying again in 10 seconds");
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
Status::Valid => return Ok(()),
|
||||
other => bail!(
|
||||
"validating challenge '{}' failed - status: {:?}",
|
||||
validation_url,
|
||||
other
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_self_signed_cert(
|
||||
product_name: &str,
|
||||
nodename: &str,
|
||||
domain: Option<&str>,
|
||||
) -> Result<(PKey<Private>, X509), Error> {
|
||||
let rsa = Rsa::generate(4096).unwrap();
|
||||
|
||||
let mut x509 = X509Builder::new()?;
|
||||
|
||||
x509.set_version(2)?;
|
||||
|
||||
let today = openssl::asn1::Asn1Time::days_from_now(0)?;
|
||||
x509.set_not_before(&today)?;
|
||||
let expire = openssl::asn1::Asn1Time::days_from_now(365 * 1000)?;
|
||||
x509.set_not_after(&expire)?;
|
||||
|
||||
let mut fqdn = nodename.to_owned();
|
||||
|
||||
if let Some(domain) = domain {
|
||||
fqdn.push('.');
|
||||
fqdn.push_str(domain);
|
||||
}
|
||||
|
||||
// we try to generate an unique 'subject' to avoid browser problems
|
||||
//(reused serial numbers, ..)
|
||||
let uuid = proxmox_uuid::Uuid::generate();
|
||||
|
||||
let mut subject_name = openssl::x509::X509NameBuilder::new()?;
|
||||
subject_name.append_entry_by_text("O", product_name)?;
|
||||
subject_name.append_entry_by_text("OU", &format!("{:X}", uuid))?;
|
||||
subject_name.append_entry_by_text("CN", &fqdn)?;
|
||||
let subject_name = subject_name.build();
|
||||
|
||||
x509.set_subject_name(&subject_name)?;
|
||||
x509.set_issuer_name(&subject_name)?;
|
||||
|
||||
let bc = openssl::x509::extension::BasicConstraints::new(); // CA = false
|
||||
let bc = bc.build()?;
|
||||
x509.append_extension(bc)?;
|
||||
|
||||
let usage = openssl::x509::extension::ExtendedKeyUsage::new()
|
||||
.server_auth()
|
||||
.build()?;
|
||||
x509.append_extension(usage)?;
|
||||
|
||||
let context = x509.x509v3_context(None, None);
|
||||
|
||||
let mut alt_names = openssl::x509::extension::SubjectAlternativeName::new();
|
||||
|
||||
alt_names.ip("127.0.0.1");
|
||||
alt_names.ip("::1");
|
||||
|
||||
alt_names.dns("localhost");
|
||||
|
||||
if nodename != "localhost" {
|
||||
alt_names.dns(nodename);
|
||||
}
|
||||
if nodename != fqdn {
|
||||
alt_names.dns(&fqdn);
|
||||
}
|
||||
|
||||
let alt_names = alt_names.build(&context)?;
|
||||
|
||||
x509.append_extension(alt_names)?;
|
||||
|
||||
let pub_pem = rsa.public_key_to_pem()?;
|
||||
let pubkey = PKey::public_key_from_pem(&pub_pem)?;
|
||||
|
||||
x509.set_pubkey(&pubkey)?;
|
||||
|
||||
let context = x509.x509v3_context(None, None);
|
||||
let ext = openssl::x509::extension::SubjectKeyIdentifier::new().build(&context)?;
|
||||
x509.append_extension(ext)?;
|
||||
|
||||
let context = x509.x509v3_context(None, None);
|
||||
let ext = openssl::x509::extension::AuthorityKeyIdentifier::new()
|
||||
.keyid(true)
|
||||
.build(&context)?;
|
||||
x509.append_extension(ext)?;
|
||||
|
||||
let privkey = PKey::from_rsa(rsa)?;
|
||||
|
||||
x509.sign(&privkey, openssl::hash::MessageDigest::sha256())?;
|
||||
|
||||
Ok((privkey, x509.build()))
|
||||
}
|
||||
|
||||
impl CertificateInfo {
|
||||
pub fn from_pem(filename: &str, cert_pem: &[u8]) -> Result<Self, Error> {
|
||||
let x509 = openssl::x509::X509::from_pem(cert_pem)?;
|
||||
|
||||
let cert_pem = String::from_utf8(cert_pem.to_vec())
|
||||
.map_err(|_| format_err!("certificate in {:?} is not a valid PEM file", filename))?;
|
||||
|
||||
let pubkey = x509.public_key()?;
|
||||
|
||||
let subject = x509name_to_string(x509.subject_name())?;
|
||||
let issuer = x509name_to_string(x509.issuer_name())?;
|
||||
|
||||
let fingerprint = x509.digest(openssl::hash::MessageDigest::sha256())?;
|
||||
let fingerprint = hex::encode(fingerprint)
|
||||
.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|v| std::str::from_utf8(v).unwrap())
|
||||
.collect::<Vec<&str>>()
|
||||
.join(":");
|
||||
|
||||
let public_key_type = openssl::nid::Nid::from_raw(pubkey.id().as_raw())
|
||||
.long_name()
|
||||
.unwrap_or("<unsupported key type>")
|
||||
.to_owned();
|
||||
|
||||
let san = x509
|
||||
.subject_alt_names()
|
||||
.map(|san| {
|
||||
san.into_iter()
|
||||
.filter_map(|name| {
|
||||
// this is not actually a map and we don't want to break the pattern
|
||||
#[allow(clippy::manual_map)]
|
||||
if let Some(name) = name.dnsname() {
|
||||
Some(format!("DNS: {name}"))
|
||||
} else if let Some(ip) = name.ipaddress() {
|
||||
Some(format!("IP: {ip:?}"))
|
||||
} else if let Some(email) = name.email() {
|
||||
Some(format!("EMAIL: {email}"))
|
||||
} else if let Some(uri) = name.uri() {
|
||||
Some(format!("URI: {uri}"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(CertificateInfo {
|
||||
filename: filename.to_string(),
|
||||
pem: Some(cert_pem),
|
||||
subject,
|
||||
issuer,
|
||||
fingerprint: Some(fingerprint),
|
||||
public_key_bits: Some(pubkey.bits()),
|
||||
notbefore: asn1_time_to_unix(x509.not_before()).ok(),
|
||||
notafter: asn1_time_to_unix(x509.not_after()).ok(),
|
||||
public_key_type,
|
||||
san,
|
||||
})
|
||||
}
|
||||
|
||||
/// Check if the certificate is expired at or after a specific unix epoch.
|
||||
pub fn is_expired_after_epoch(&self, epoch: i64) -> Result<bool, Error> {
|
||||
if let Some(notafter) = self.notafter {
|
||||
Ok(notafter < epoch)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn x509name_to_string(name: &openssl::x509::X509NameRef) -> Result<String, Error> {
|
||||
let mut parts = Vec::new();
|
||||
for entry in name.entries() {
|
||||
parts.push(format!(
|
||||
"{} = {}",
|
||||
entry.object().nid().short_name()?,
|
||||
entry.data().as_utf8()?
|
||||
));
|
||||
}
|
||||
Ok(parts.join(", "))
|
||||
}
|
||||
|
||||
// C type:
|
||||
#[allow(non_camel_case_types)]
|
||||
type ASN1_TIME = <openssl::asn1::Asn1TimeRef as ForeignTypeRef>::CType;
|
||||
|
||||
unsafe extern "C" {
|
||||
fn ASN1_TIME_to_tm(s: *const ASN1_TIME, tm: *mut libc::tm) -> libc::c_int;
|
||||
}
|
||||
|
||||
fn asn1_time_to_unix(time: &openssl::asn1::Asn1TimeRef) -> Result<i64, Error> {
|
||||
let mut c_tm = MaybeUninit::<libc::tm>::uninit();
|
||||
let rc = unsafe { ASN1_TIME_to_tm(time.as_ptr(), c_tm.as_mut_ptr()) };
|
||||
if rc != 1 {
|
||||
bail!("failed to parse ASN1 time");
|
||||
}
|
||||
let mut c_tm = unsafe { c_tm.assume_init() };
|
||||
proxmox_time::timegm(&mut c_tm)
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
//! Read DNS Challenge schemas.
|
||||
//!
|
||||
//! Those schemas are provided by debian package "libproxmox-acme-plugins".
|
||||
|
||||
use std::sync::{Arc, LazyLock, Mutex};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_sys::fs::file_read_string;
|
||||
|
||||
use crate::types::AcmeChallengeSchema;
|
||||
|
||||
const ACME_DNS_SCHEMA_FN: &str = "/usr/share/proxmox-acme/dns-challenge-schema.json";
|
||||
|
||||
/// Wrapper for efficient Arc use when returning the ACME challenge-plugin schema for serializing.
|
||||
pub struct ChallengeSchemaWrapper {
|
||||
inner: Arc<Vec<AcmeChallengeSchema>>,
|
||||
}
|
||||
|
||||
impl Serialize for ChallengeSchemaWrapper {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
self.inner.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
fn load_dns_challenge_schema() -> Result<Vec<AcmeChallengeSchema>, Error> {
|
||||
let raw = file_read_string(ACME_DNS_SCHEMA_FN)?;
|
||||
let schemas: serde_json::Map<String, Value> = serde_json::from_str(&raw)?;
|
||||
|
||||
Ok(schemas
|
||||
.iter()
|
||||
.map(|(id, schema)| AcmeChallengeSchema {
|
||||
id: id.to_owned(),
|
||||
name: schema
|
||||
.get("name")
|
||||
.and_then(Value::as_str)
|
||||
.unwrap_or(id)
|
||||
.to_owned(),
|
||||
ty: "dns".into(),
|
||||
schema: schema.to_owned(),
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub fn get_cached_challenge_schemas() -> Result<ChallengeSchemaWrapper, Error> {
|
||||
#[allow(clippy::type_complexity)]
|
||||
static CACHE: LazyLock<Mutex<Option<(Arc<Vec<AcmeChallengeSchema>>, SystemTime)>>> =
|
||||
LazyLock::new(|| Mutex::new(None));
|
||||
|
||||
// the actual loading code
|
||||
let mut last = CACHE.lock().unwrap();
|
||||
|
||||
let actual_mtime = std::fs::metadata(ACME_DNS_SCHEMA_FN)?.modified()?;
|
||||
|
||||
let schema = match &*last {
|
||||
Some((schema, cached_mtime)) if *cached_mtime >= actual_mtime => schema.clone(),
|
||||
_ => {
|
||||
let new_schema = Arc::new(load_dns_challenge_schema()?);
|
||||
*last = Some((Arc::clone(&new_schema), actual_mtime));
|
||||
new_schema
|
||||
}
|
||||
};
|
||||
|
||||
Ok(ChallengeSchemaWrapper { inner: schema })
|
||||
}
|
@ -1,18 +0,0 @@
|
||||
use std::borrow::Cow;
|
||||
|
||||
use crate::types::KnownAcmeDirectory;
|
||||
|
||||
/// List of known ACME directorties.
|
||||
pub const KNOWN_ACME_DIRECTORIES: &[KnownAcmeDirectory] = &[
|
||||
KnownAcmeDirectory {
|
||||
name: Cow::Borrowed("Let's Encrypt V2"),
|
||||
url: Cow::Borrowed("https://acme-v02.api.letsencrypt.org/directory"),
|
||||
},
|
||||
KnownAcmeDirectory {
|
||||
name: Cow::Borrowed("Let's Encrypt V2 Staging"),
|
||||
url: Cow::Borrowed("https://acme-staging-v02.api.letsencrypt.org/directory"),
|
||||
},
|
||||
];
|
||||
|
||||
/// Default ACME directorties.
|
||||
pub const DEFAULT_ACME_DIRECTORY_ENTRY: &KnownAcmeDirectory = &KNOWN_ACME_DIRECTORIES[0];
|
@ -1,55 +0,0 @@
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_product_config::create_secret_dir;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct AcmeApiConfig {
|
||||
acme_config_dir: PathBuf,
|
||||
acme_account_dir: PathBuf,
|
||||
}
|
||||
|
||||
static ACME_ACME_CONFIG: OnceLock<AcmeApiConfig> = OnceLock::new();
|
||||
|
||||
/// Initialize the global product configuration.
|
||||
pub fn init<P: AsRef<Path>>(acme_config_dir: P, create_subdirs: bool) -> Result<(), Error> {
|
||||
let acme_config_dir = acme_config_dir.as_ref().to_owned();
|
||||
|
||||
ACME_ACME_CONFIG
|
||||
.set(AcmeApiConfig {
|
||||
acme_account_dir: acme_config_dir.join("accounts"),
|
||||
acme_config_dir,
|
||||
})
|
||||
.expect("cannot set acme configuration twice");
|
||||
|
||||
if create_subdirs {
|
||||
create_secret_dir(self::acme_config_dir(), false)?;
|
||||
create_secret_dir(acme_account_dir(), false)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn acme_api_config() -> &'static AcmeApiConfig {
|
||||
ACME_ACME_CONFIG
|
||||
.get()
|
||||
.expect("ProxmoxProductConfig is not initialized!")
|
||||
}
|
||||
|
||||
fn acme_config_dir() -> &'static Path {
|
||||
acme_api_config().acme_config_dir.as_path()
|
||||
}
|
||||
|
||||
pub(crate) fn acme_account_dir() -> &'static Path {
|
||||
acme_api_config().acme_account_dir.as_path()
|
||||
}
|
||||
|
||||
pub(crate) fn plugin_cfg_filename() -> PathBuf {
|
||||
acme_config_dir().join("plugins.cfg")
|
||||
}
|
||||
|
||||
pub(crate) fn plugin_cfg_lockfile() -> PathBuf {
|
||||
acme_config_dir().join("plugins.lck")
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
//! ACME API crate (API types and API implementation)
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
mod types;
|
||||
pub use types::*;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod init;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use init::*;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod config;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use config::{DEFAULT_ACME_DIRECTORY_ENTRY, KNOWN_ACME_DIRECTORIES};
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod challenge_schemas;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use challenge_schemas::{get_cached_challenge_schemas, ChallengeSchemaWrapper};
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod account_config;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use account_config::account_config_filename;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod plugin_config;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod account_api_impl;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use account_api_impl::{
|
||||
deactivate_account, get_account, get_tos, list_accounts, register_account, update_account,
|
||||
};
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod plugin_api_impl;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use plugin_api_impl::{add_plugin, delete_plugin, get_plugin, list_plugins, update_plugin};
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub(crate) mod acme_plugin;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod certificate_helpers;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use certificate_helpers::{create_self_signed_cert, order_certificate, revoke_certificate};
|
@ -1,169 +0,0 @@
|
||||
//! ACME plugin configuration API implementation
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_config_digest::ConfigDigest;
|
||||
use proxmox_schema::param_bail;
|
||||
|
||||
use crate::types::{
|
||||
DeletablePluginProperty, DnsPlugin, DnsPluginCore, DnsPluginCoreUpdater, PluginConfig,
|
||||
};
|
||||
|
||||
use proxmox_router::{http_bail, RpcEnvironment};
|
||||
|
||||
pub fn list_plugins(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>, Error> {
|
||||
let (plugins, digest) = super::plugin_config::plugin_config()?;
|
||||
|
||||
rpcenv["digest"] = digest.to_hex().into();
|
||||
Ok(plugins
|
||||
.iter()
|
||||
.map(|(id, (ty, data))| modify_cfg_for_api(id, ty, data))
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub fn get_plugin(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<PluginConfig, Error> {
|
||||
let (plugins, digest) = super::plugin_config::plugin_config()?;
|
||||
rpcenv["digest"] = digest.to_hex().into();
|
||||
|
||||
match plugins.get(&id) {
|
||||
Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)),
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_plugin(r#type: String, core: DnsPluginCore, data: String) -> Result<(), Error> {
|
||||
// Currently we only support DNS plugins and the standalone plugin is "fixed":
|
||||
if r#type != "dns" {
|
||||
param_bail!("type", "invalid ACME plugin type: {:?}", r#type);
|
||||
}
|
||||
|
||||
let data = String::from_utf8(base64::decode(data)?)
|
||||
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||
|
||||
let id = core.id.clone();
|
||||
|
||||
let _lock = super::plugin_config::lock_plugin_config()?;
|
||||
|
||||
let (mut plugins, _digest) = super::plugin_config::plugin_config()?;
|
||||
if plugins.contains_key(&id) {
|
||||
param_bail!("id", "ACME plugin ID {:?} already exists", id);
|
||||
}
|
||||
|
||||
let plugin = serde_json::to_value(DnsPlugin { core, data })?;
|
||||
|
||||
plugins.insert(id, r#type, plugin);
|
||||
|
||||
super::plugin_config::save_plugin_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn update_plugin(
|
||||
id: String,
|
||||
update: DnsPluginCoreUpdater,
|
||||
data: Option<String>,
|
||||
delete: Option<Vec<DeletablePluginProperty>>,
|
||||
digest: Option<ConfigDigest>,
|
||||
) -> Result<(), Error> {
|
||||
let data = data
|
||||
.as_deref()
|
||||
.map(base64::decode)
|
||||
.transpose()?
|
||||
.map(String::from_utf8)
|
||||
.transpose()
|
||||
.map_err(|_| format_err!("data must be valid UTF-8"))?;
|
||||
|
||||
let _lock = super::plugin_config::lock_plugin_config()?;
|
||||
|
||||
let (mut plugins, expected_digest) = super::plugin_config::plugin_config()?;
|
||||
|
||||
expected_digest.detect_modification(digest.as_ref())?;
|
||||
|
||||
match plugins.get_mut(&id) {
|
||||
Some((ty, ref mut entry)) => {
|
||||
if ty != "dns" {
|
||||
bail!("cannot update plugin of type {:?}", ty);
|
||||
}
|
||||
|
||||
let mut plugin = DnsPlugin::deserialize(&*entry)?;
|
||||
|
||||
if let Some(delete) = delete {
|
||||
for delete_prop in delete {
|
||||
match delete_prop {
|
||||
DeletablePluginProperty::ValidationDelay => {
|
||||
plugin.core.validation_delay = None;
|
||||
}
|
||||
DeletablePluginProperty::Disable => {
|
||||
plugin.core.disable = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(data) = data {
|
||||
plugin.data = data;
|
||||
}
|
||||
if let Some(api) = update.api {
|
||||
plugin.core.api = api;
|
||||
}
|
||||
if update.validation_delay.is_some() {
|
||||
plugin.core.validation_delay = update.validation_delay;
|
||||
}
|
||||
if update.disable.is_some() {
|
||||
plugin.core.disable = update.disable;
|
||||
}
|
||||
|
||||
*entry = serde_json::to_value(plugin)?;
|
||||
}
|
||||
None => http_bail!(NOT_FOUND, "no such plugin"),
|
||||
}
|
||||
|
||||
super::plugin_config::save_plugin_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn delete_plugin(id: String) -> Result<(), Error> {
|
||||
let _lock = super::plugin_config::lock_plugin_config()?;
|
||||
|
||||
let (mut plugins, _digest) = super::plugin_config::plugin_config()?;
|
||||
if plugins.remove(&id).is_none() {
|
||||
http_bail!(NOT_FOUND, "no such plugin");
|
||||
}
|
||||
super::plugin_config::save_plugin_config(&plugins)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// See PMG/PVE's $modify_cfg_for_api sub
|
||||
fn modify_cfg_for_api(id: &str, ty: &str, data: &Value) -> PluginConfig {
|
||||
let mut entry = data.clone();
|
||||
|
||||
let obj = entry.as_object_mut().unwrap();
|
||||
obj.remove("id");
|
||||
obj.insert("plugin".to_string(), Value::String(id.to_owned()));
|
||||
obj.insert("type".to_string(), Value::String(ty.to_owned()));
|
||||
|
||||
// FIXME: This needs to go once the `Updater` is fixed.
|
||||
// None of these should be able to fail unless the user changed the files by hand, in which
|
||||
// case we leave the unmodified string in the Value for now. This will be handled with an error
|
||||
// later.
|
||||
if let Some(Value::String(ref mut data)) = obj.get_mut("data") {
|
||||
if let Ok(new) = base64::decode_config(&data, base64::URL_SAFE_NO_PAD) {
|
||||
if let Ok(utf8) = String::from_utf8(new) {
|
||||
*data = utf8;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PVE/PMG do this explicitly for ACME plugins...
|
||||
// obj.insert("digest".to_string(), Value::String(digest.clone()));
|
||||
|
||||
serde_json::from_value(entry).unwrap_or_else(|_| PluginConfig {
|
||||
plugin: "*Error*".to_string(),
|
||||
ty: "*Error*".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
@ -1,114 +0,0 @@
|
||||
//! ACME plugin configuration helpers (SectionConfig implementation)
|
||||
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_config_digest::ConfigDigest;
|
||||
use proxmox_product_config::{open_api_lockfile, replace_secret_config, ApiLockGuard};
|
||||
use proxmox_schema::{ApiType, Schema};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
|
||||
use crate::types::{DnsPlugin, StandalonePlugin, PLUGIN_ID_SCHEMA};
|
||||
|
||||
static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
impl DnsPlugin {
|
||||
pub fn decode_data(&self, output: &mut Vec<u8>) -> Result<(), Error> {
|
||||
Ok(base64::decode_config_buf(
|
||||
&self.data,
|
||||
base64::URL_SAFE_NO_PAD,
|
||||
output,
|
||||
)?)
|
||||
}
|
||||
}
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&PLUGIN_ID_SCHEMA);
|
||||
|
||||
let standalone_schema = match &StandalonePlugin::API_SCHEMA {
|
||||
Schema::Object(schema) => schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let standalone_plugin = SectionConfigPlugin::new(
|
||||
"standalone".to_string(),
|
||||
Some("id".to_string()),
|
||||
standalone_schema,
|
||||
);
|
||||
config.register_plugin(standalone_plugin);
|
||||
|
||||
let dns_challenge_schema = match DnsPlugin::API_SCHEMA {
|
||||
Schema::AllOf(ref schema) => schema,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let dns_challenge_plugin = SectionConfigPlugin::new(
|
||||
"dns".to_string(),
|
||||
Some("id".to_string()),
|
||||
dns_challenge_schema,
|
||||
);
|
||||
config.register_plugin(dns_challenge_plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
pub(crate) fn lock_plugin_config() -> Result<ApiLockGuard, Error> {
|
||||
let plugin_cfg_lockfile = crate::plugin_cfg_lockfile();
|
||||
open_api_lockfile(plugin_cfg_lockfile, None, true)
|
||||
}
|
||||
|
||||
pub(crate) fn plugin_config() -> Result<(PluginData, ConfigDigest), Error> {
|
||||
let plugin_cfg_filename = crate::plugin_cfg_filename();
|
||||
|
||||
let content =
|
||||
proxmox_sys::fs::file_read_optional_string(&plugin_cfg_filename)?.unwrap_or_default();
|
||||
|
||||
let digest = ConfigDigest::from_slice(content.as_bytes());
|
||||
let mut data = CONFIG.parse(plugin_cfg_filename, &content)?;
|
||||
|
||||
if !data.sections.contains_key("standalone") {
|
||||
let standalone = StandalonePlugin::default();
|
||||
data.set_data("standalone", "standalone", &standalone)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
Ok((PluginData { data }, digest))
|
||||
}
|
||||
|
||||
pub(crate) fn save_plugin_config(config: &PluginData) -> Result<(), Error> {
|
||||
let plugin_cfg_filename = crate::plugin_cfg_filename();
|
||||
let raw = CONFIG.write(&plugin_cfg_filename, &config.data)?;
|
||||
|
||||
replace_secret_config(plugin_cfg_filename, raw.as_bytes())
|
||||
}
|
||||
|
||||
pub(crate) struct PluginData {
|
||||
data: SectionConfigData,
|
||||
}
|
||||
|
||||
// And some convenience helpers.
|
||||
impl PluginData {
|
||||
pub fn remove(&mut self, name: &str) -> Option<(String, Value)> {
|
||||
self.data.sections.remove(name)
|
||||
}
|
||||
|
||||
pub fn contains_key(&mut self, name: &str) -> bool {
|
||||
self.data.sections.contains_key(name)
|
||||
}
|
||||
|
||||
pub fn get(&self, name: &str) -> Option<&(String, Value)> {
|
||||
self.data.sections.get(name)
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, name: &str) -> Option<&mut (String, Value)> {
|
||||
self.data.sections.get_mut(name)
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, id: String, ty: String, plugin: Value) {
|
||||
self.data.sections.insert(id, (ty, plugin));
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = (&String, &(String, Value))> + Send {
|
||||
self.data.sections.iter()
|
||||
}
|
||||
}
|
@ -1,355 +0,0 @@
|
||||
//! ACME API type definitions.
|
||||
|
||||
use std::borrow::Cow;
|
||||
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, SAFE_ID_FORMAT};
|
||||
use proxmox_schema::{api, ApiStringFormat, ApiType, Schema, StringSchema, Updater};
|
||||
|
||||
use proxmox_acme::types::AccountData as AcmeAccountData;
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
san: {
|
||||
type: Array,
|
||||
items: {
|
||||
description: "A SubjectAlternateName entry.",
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Certificate information.
|
||||
#[derive(PartialEq, Clone, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct CertificateInfo {
|
||||
/// Certificate file name.
|
||||
pub filename: String,
|
||||
|
||||
/// Certificate subject name.
|
||||
pub subject: String,
|
||||
|
||||
/// List of certificate's SubjectAlternativeName entries.
|
||||
pub san: Vec<String>,
|
||||
|
||||
/// Certificate issuer name.
|
||||
pub issuer: String,
|
||||
|
||||
/// Certificate's notBefore timestamp (UNIX epoch).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notbefore: Option<i64>,
|
||||
|
||||
/// Certificate's notAfter timestamp (UNIX epoch).
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notafter: Option<i64>,
|
||||
|
||||
/// Certificate in PEM format.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pem: Option<String>,
|
||||
|
||||
/// Certificate's public key algorithm.
|
||||
pub public_key_type: String,
|
||||
|
||||
/// Certificate's public key size if available.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub public_key_bits: Option<u32>,
|
||||
|
||||
/// The SSL Fingerprint.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub fingerprint: Option<String>,
|
||||
}
|
||||
|
||||
proxmox_schema::api_string_type! {
|
||||
#[api(format: &SAFE_ID_FORMAT)]
|
||||
/// ACME account name.
|
||||
#[derive(Clone, Eq, PartialEq, Hash, Deserialize, Serialize)]
|
||||
#[serde(transparent)]
|
||||
pub struct AcmeAccountName(String);
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: { type: String },
|
||||
url: { type: String },
|
||||
},
|
||||
)]
|
||||
/// An ACME directory endpoint with a name and URL.
|
||||
#[derive(Clone, Deserialize, Serialize, PartialEq)]
|
||||
pub struct KnownAcmeDirectory {
|
||||
/// The ACME directory's name.
|
||||
pub name: Cow<'static, str>,
|
||||
/// The ACME directory's endpoint URL.
|
||||
pub url: Cow<'static, str>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
schema: {
|
||||
type: Object,
|
||||
additional_properties: true,
|
||||
properties: {},
|
||||
},
|
||||
type: {
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Deserialize, Serialize, PartialEq)]
|
||||
/// Schema for an ACME challenge plugin.
|
||||
pub struct AcmeChallengeSchema {
|
||||
/// Plugin ID.
|
||||
pub id: String,
|
||||
|
||||
/// Human readable name, falls back to id.
|
||||
pub name: String,
|
||||
|
||||
/// Plugin Type.
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
|
||||
/// The plugin's parameter schema.
|
||||
pub schema: Value,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"domain": { format: &DNS_NAME_FORMAT },
|
||||
"alias": {
|
||||
optional: true,
|
||||
format: &DNS_ALIAS_FORMAT,
|
||||
},
|
||||
"plugin": {
|
||||
optional: true,
|
||||
format: &SAFE_ID_FORMAT,
|
||||
},
|
||||
},
|
||||
default_key: "domain",
|
||||
)]
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
/// A domain entry for an ACME certificate.
|
||||
pub struct AcmeDomain {
|
||||
/// The domain to certify for.
|
||||
pub domain: String,
|
||||
|
||||
/// The domain to use for challenges instead of the default acme challenge domain.
|
||||
///
|
||||
/// This is useful if you use CNAME entries to redirect `_acme-challenge.*` domains to a
|
||||
/// different DNS server.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub alias: Option<String>,
|
||||
|
||||
/// The plugin to use to validate this domain.
|
||||
///
|
||||
/// Empty means standalone HTTP validation is used.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub plugin: Option<String>,
|
||||
}
|
||||
|
||||
/// ACME domain configuration string [Schema].
|
||||
pub const ACME_DOMAIN_PROPERTY_SCHEMA: Schema =
|
||||
StringSchema::new("ACME domain configuration string")
|
||||
.format(&ApiStringFormat::PropertyString(&AcmeDomain::API_SCHEMA))
|
||||
.schema();
|
||||
|
||||
/// Parse [AcmeDomain] from property string.
|
||||
pub fn parse_acme_domain_string(value_str: &str) -> Result<AcmeDomain, Error> {
|
||||
let value = AcmeDomain::API_SCHEMA.parse_property_string(value_str)?;
|
||||
let value: AcmeDomain = serde_json::from_value(value)?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
/// Format [AcmeDomain] as property string.
|
||||
pub fn create_acme_domain_string(config: &AcmeDomain) -> String {
|
||||
proxmox_schema::property_string::print::<AcmeDomain>(config).unwrap()
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
/// ACME Account information.
|
||||
///
|
||||
/// This is what we return via the API.
|
||||
pub struct AccountInfo {
|
||||
/// Raw account data.
|
||||
pub account: AcmeAccountData,
|
||||
|
||||
/// The ACME directory URL the account was created at.
|
||||
pub directory: String,
|
||||
|
||||
/// The account's own URL within the ACME directory.
|
||||
pub location: String,
|
||||
|
||||
/// The ToS URL, if the user agreed to one.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tos: Option<String>,
|
||||
}
|
||||
|
||||
/// An ACME Account entry.
|
||||
///
|
||||
/// Currently only contains a 'name' property.
|
||||
#[api()]
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
pub struct AcmeAccountEntry {
|
||||
pub name: AcmeAccountName,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
/// The ACME configuration.
|
||||
///
|
||||
/// Currently only contains the name of the account use.
|
||||
pub struct AcmeConfig {
|
||||
/// Account to use to acquire ACME certificates.
|
||||
pub account: String,
|
||||
}
|
||||
|
||||
/// Parse [AcmeConfig] from property string.
|
||||
pub fn parse_acme_config_string(value_str: &str) -> Result<AcmeConfig, Error> {
|
||||
let value = AcmeConfig::API_SCHEMA.parse_property_string(value_str)?;
|
||||
let value: AcmeConfig = serde_json::from_value(value)?;
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
/// Format [AcmeConfig] as property string.
|
||||
pub fn create_acme_config_string(config: &AcmeConfig) -> String {
|
||||
proxmox_schema::property_string::print::<AcmeConfig>(config).unwrap()
|
||||
}
|
||||
|
||||
/// [Schema] for ACME Challenge Plugin ID.
|
||||
pub const PLUGIN_ID_SCHEMA: Schema = StringSchema::new("ACME Challenge Plugin ID.")
|
||||
.format(&SAFE_ID_FORMAT)
|
||||
.min_length(1)
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
#[api]
|
||||
#[derive(Clone, Default, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// ACME plugin config. The API's format is inherited from PVE/PMG:
|
||||
pub struct PluginConfig {
|
||||
/// Plugin ID.
|
||||
pub plugin: String,
|
||||
|
||||
/// Plugin type.
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
|
||||
/// DNS Api name.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub api: Option<String>,
|
||||
|
||||
/// Plugin configuration data.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub data: Option<String>,
|
||||
|
||||
/// Extra delay in seconds to wait before requesting validation.
|
||||
///
|
||||
/// Allows to cope with long TTL of DNS records.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub validation_delay: Option<u32>,
|
||||
|
||||
/// Flag to disable the config.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub disable: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
},
|
||||
)]
|
||||
#[derive(Deserialize, Serialize)]
|
||||
/// Standalone ACME Plugin for the http-1 challenge.
|
||||
pub struct StandalonePlugin {
|
||||
/// Plugin ID.
|
||||
id: String,
|
||||
}
|
||||
|
||||
impl Default for StandalonePlugin {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
id: "standalone".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: { schema: PLUGIN_ID_SCHEMA },
|
||||
disable: {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"validation-delay": {
|
||||
default: 30,
|
||||
optional: true,
|
||||
minimum: 0,
|
||||
maximum: 2 * 24 * 60 * 60,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// DNS ACME Challenge Plugin core data.
|
||||
#[derive(Deserialize, Serialize, Updater)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsPluginCore {
|
||||
/// Plugin ID.
|
||||
#[updater(skip)]
|
||||
pub id: String,
|
||||
|
||||
/// DNS API Plugin Id.
|
||||
pub api: String,
|
||||
|
||||
/// Extra delay in seconds to wait before requesting validation.
|
||||
///
|
||||
/// Allows to cope with long TTL of DNS records.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub validation_delay: Option<u32>,
|
||||
|
||||
/// Flag to disable the config.
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub disable: Option<bool>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
core: { type: DnsPluginCore },
|
||||
},
|
||||
)]
|
||||
/// DNS ACME Challenge Plugin.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DnsPlugin {
|
||||
#[serde(flatten)]
|
||||
pub core: DnsPluginCore,
|
||||
|
||||
// We handle this property separately in the API calls.
|
||||
/// DNS plugin data (base64url encoded without padding).
|
||||
#[serde(with = "proxmox_serde::string_as_base64url_nopad")]
|
||||
pub data: String,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Deletable plugin property names.
|
||||
pub enum DeletablePluginProperty {
|
||||
/// Delete the disable property
|
||||
Disable,
|
||||
/// Delete the validation-delay property
|
||||
ValidationDelay,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
name: { type: AcmeAccountName },
|
||||
},
|
||||
)]
|
||||
/// An ACME Account entry.
|
||||
///
|
||||
/// Currently only contains a 'name' property.
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
pub struct AccountEntry {
|
||||
pub name: AcmeAccountName,
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
[package]
|
||||
name = "proxmox-acme"
|
||||
description = "ACME client library"
|
||||
version = "0.5.4"
|
||||
|
||||
exclude = [ "debian" ]
|
||||
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
homepage.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[dependencies]
|
||||
base64.workspace = true
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
|
||||
# For the ACME implementation
|
||||
openssl = { workspace = true, optional = true }
|
||||
|
||||
# For the client
|
||||
native-tls = { workspace = true, optional = true }
|
||||
|
||||
proxmox-schema = { workspace = true, optional = true, features = [ "api-macro" ] }
|
||||
proxmox-http = { workspace = true, optional = true, features = [ "client" ] }
|
||||
anyhow = { workspace = true, optional = true }
|
||||
bytes = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
|
||||
[dependencies.ureq]
|
||||
optional = true
|
||||
version = "2.4"
|
||||
default-features = false
|
||||
features = [ "native-tls", "gzip" ]
|
||||
|
||||
[features]
|
||||
default = [ "impl" ]
|
||||
api-types = [ "dep:proxmox-schema" ]
|
||||
impl = [ "api-types", "dep:openssl" ]
|
||||
client = [ "impl", "dep:ureq", "dep:native-tls"]
|
||||
async-client = [ "impl", "dep:hyper", "dep:proxmox-http", "dep:anyhow", "dep:bytes" ]
|
||||
|
||||
[dev-dependencies]
|
||||
anyhow.workspace = true
|
@ -1,127 +0,0 @@
|
||||
rust-proxmox-acme (0.5.4-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-schema 4.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 15 Jan 2025 12:27:29 +0100
|
||||
|
||||
rust-proxmox-acme (0.5.3) bookworm; urgency=medium
|
||||
|
||||
* detect base64 vs base64url encoded eab hmac key
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Oct 2024 09:52:20 +0200
|
||||
|
||||
rust-proxmox-acme (0.5.2) bookworm; urgency=medium
|
||||
|
||||
* allow to compile/use api types separately.
|
||||
|
||||
* add async-client feature
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 16 May 2024 11:31:43 +0200
|
||||
|
||||
rust-proxmox-acme (0.5.1) bookworm; urgency=medium
|
||||
|
||||
* add api-types feature to provide schemas for api types
|
||||
|
||||
* derive PartialEq for api types for integration in rust based ui code
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 07 Mar 2024 13:27:08 +0100
|
||||
|
||||
rust-proxmox-acme (0.5.0) bookworm; urgency=medium
|
||||
|
||||
* add external account binding support
|
||||
|
||||
* add a few more standard fields to Meta
|
||||
|
||||
* update deprecated openssl calls
|
||||
|
||||
* documentation fixups
|
||||
|
||||
* general code improvements and cleanups
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 04 Dec 2023 11:46:26 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.4.0) pve; urgency=medium
|
||||
|
||||
* switch from curl to ureq with native-tls
|
||||
|
||||
* bump edition to 2021
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 01 Feb 2022 10:19:29 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.3.2) pve; urgency=medium
|
||||
|
||||
* rebuild with base64 0.13
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Nov 2021 12:49:25 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.3.1) pve; urgency=medium
|
||||
|
||||
* add proxy support
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 18 Nov 2021 09:46:34 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.3.0) pve; urgency=medium
|
||||
|
||||
* directory: make metadata optional
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 21 Oct 2021 13:10:27 +0200
|
||||
|
||||
rust-proxmox-acme-rs (0.2.2-1) pve; urgency=medium
|
||||
|
||||
* improve crate documentation
|
||||
|
||||
* mark `Error` as 'must_use'
|
||||
|
||||
* make status types `Copy`
|
||||
|
||||
* add Client::directory_url() to get the URL without querying the whole
|
||||
directory
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 07 May 2021 13:53:08 +0200
|
||||
|
||||
rust-proxmox-acme-rs (0.2.1-1) pve; urgency=medium
|
||||
|
||||
* make revocation workflow accessible without client
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 14 Apr 2021 14:56:49 +0200
|
||||
|
||||
rust-proxmox-acme-rs (0.2.0-1) pve; urgency=medium
|
||||
|
||||
* add 'status' and 'url' as fixed members to `Challenge`
|
||||
|
||||
* expose some workflow helpers in a more consistentw ay
|
||||
|
||||
* add `util::Csr` for CSR generation
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 12 Apr 2021 13:06:19 +0200
|
||||
|
||||
rust-proxmox-acme-rs (0.1.4-1) pve; urgency=medium
|
||||
|
||||
* collect extra account fields (such as 'created' from let's encrypt)
|
||||
in the AccountData struct
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 17 Mar 2021 15:28:09 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.1.3-1) pve; urgency=medium
|
||||
|
||||
* fix padding in ecdsa signatures
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 17 Mar 2021 13:34:10 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.1.2-1) pve; urgency=medium
|
||||
|
||||
* include Content-length header in requests
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 12 Mar 2021 15:43:01 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.1.1-1) pve; urgency=medium
|
||||
|
||||
* make AccountData fields public
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 09 Mar 2021 13:22:55 +0100
|
||||
|
||||
rust-proxmox-acme-rs (0.1.0-1) pve; urgency=medium
|
||||
|
||||
* initial release
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 09 Mar 2021 13:01:56 +0100
|
@ -1,120 +0,0 @@
|
||||
Source: rust-proxmox-acme
|
||||
Section: rust
|
||||
Priority: optional
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
dh-sequence-cargo,
|
||||
cargo:native <!nocheck>,
|
||||
rustc:native <!nocheck>,
|
||||
libstd-rust-dev <!nocheck>,
|
||||
librust-base64-0.13+default-dev <!nocheck>,
|
||||
librust-openssl-0.10+default-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+api-macro-dev <!nocheck>,
|
||||
librust-proxmox-schema-4+default-dev <!nocheck>,
|
||||
librust-serde-1+default-dev <!nocheck>,
|
||||
librust-serde-1+derive-dev <!nocheck>,
|
||||
librust-serde-json-1+default-dev <!nocheck>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.7.0
|
||||
Vcs-Git:
|
||||
Vcs-Browser:
|
||||
Homepage: https://proxmox.com
|
||||
X-Cargo-Crate: proxmox-acme
|
||||
Rules-Requires-Root: no
|
||||
|
||||
Package: librust-proxmox-acme-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev
|
||||
Recommends:
|
||||
librust-proxmox-acme+impl-dev (= ${binary:Version})
|
||||
Suggests:
|
||||
librust-proxmox-acme+api-types-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme+async-client-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme+client-dev (= ${binary:Version})
|
||||
Provides:
|
||||
librust-proxmox-acme-0-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4-dev (= ${binary:Version})
|
||||
Description: ACME client library - Rust source code
|
||||
Source code for Debianized Rust crate "proxmox-acme"
|
||||
|
||||
Package: librust-proxmox-acme+api-types-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-acme-dev (= ${binary:Version}),
|
||||
librust-proxmox-schema-4+api-macro-dev,
|
||||
librust-proxmox-schema-4+default-dev
|
||||
Provides:
|
||||
librust-proxmox-acme-0+api-types-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5+api-types-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4+api-types-dev (= ${binary:Version})
|
||||
Description: ACME client library - feature "api-types"
|
||||
This metapackage enables feature "api-types" for the Rust proxmox-acme crate,
|
||||
by pulling in any additional dependencies needed by that feature.
|
||||
|
||||
Package: librust-proxmox-acme+async-client-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-acme-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme+impl-dev (= ${binary:Version}),
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-bytes-1+default-dev,
|
||||
librust-hyper-0.14+default-dev (>= 0.14.5-~~),
|
||||
librust-proxmox-http-0.9+client-dev (>= 0.9.4-~~),
|
||||
librust-proxmox-http-0.9+default-dev (>= 0.9.4-~~)
|
||||
Provides:
|
||||
librust-proxmox-acme-0+async-client-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5+async-client-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4+async-client-dev (= ${binary:Version})
|
||||
Description: ACME client library - feature "async-client"
|
||||
This metapackage enables feature "async-client" for the Rust proxmox-acme
|
||||
crate, by pulling in any additional dependencies needed by that feature.
|
||||
|
||||
Package: librust-proxmox-acme+client-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-acme-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme+impl-dev (= ${binary:Version}),
|
||||
librust-native-tls-0.2+default-dev,
|
||||
librust-ureq-2+gzip-dev (>= 2.4-~~),
|
||||
librust-ureq-2+native-tls-dev (>= 2.4-~~)
|
||||
Provides:
|
||||
librust-proxmox-acme-0+client-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5+client-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4+client-dev (= ${binary:Version})
|
||||
Description: ACME client library - feature "client"
|
||||
This metapackage enables feature "client" for the Rust proxmox-acme crate, by
|
||||
pulling in any additional dependencies needed by that feature.
|
||||
|
||||
Package: librust-proxmox-acme+impl-dev
|
||||
Architecture: any
|
||||
Multi-Arch: same
|
||||
Depends:
|
||||
${misc:Depends},
|
||||
librust-proxmox-acme-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme+api-types-dev (= ${binary:Version}),
|
||||
librust-openssl-0.10+default-dev
|
||||
Provides:
|
||||
librust-proxmox-acme+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4+impl-dev (= ${binary:Version}),
|
||||
librust-proxmox-acme-0.5.4+default-dev (= ${binary:Version})
|
||||
Description: ACME client library - feature "impl" and 1 more
|
||||
This metapackage enables feature "impl" for the Rust proxmox-acme crate, by
|
||||
pulling in any additional dependencies needed by that feature.
|
||||
.
|
||||
Additionally, this package also provides the "default" feature.
|
@ -1,16 +0,0 @@
|
||||
Copyright (C) 2020-2021 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
@ -1,8 +0,0 @@
|
||||
overlay = "."
|
||||
crate_src_path = ".."
|
||||
maintainer = "Proxmox Support Team <support@proxmox.com>"
|
||||
|
||||
[source]
|
||||
# TODO: update once public
|
||||
vcs_git = ""
|
||||
vcs_browser = ""
|
@ -1 +0,0 @@
|
||||
3.0 (native)
|
@ -1,425 +0,0 @@
|
||||
//! ACME Account management and creation. The [`Account`] type also contains most of the ACME API
|
||||
//! entry point helpers.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::authorization::{Authorization, GetAuthorization};
|
||||
use crate::b64u;
|
||||
use crate::directory::Directory;
|
||||
use crate::jws::Jws;
|
||||
use crate::key::{Jwk, PublicKey};
|
||||
use crate::order::{NewOrder, Order, OrderData};
|
||||
use crate::request::Request;
|
||||
use crate::types::{AccountData, AccountStatus, ExternalAccountBinding};
|
||||
use crate::Error;
|
||||
|
||||
/// An ACME Account.
|
||||
///
|
||||
/// This contains the location URL, the account data and the private key for an account.
|
||||
/// This can directly be serialized via serde to persist the account.
|
||||
///
|
||||
/// In order to register a new account with an ACME provider, see the [`Account::creator`] method.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Account {
|
||||
/// Account location URL.
|
||||
pub location: String,
|
||||
|
||||
/// Acme account data.
|
||||
pub data: AccountData,
|
||||
|
||||
/// base64url encoded PEM formatted private key.
|
||||
pub private_key: String,
|
||||
}
|
||||
|
||||
impl Account {
|
||||
/// Rebuild an account from its components.
|
||||
pub fn from_parts(location: String, private_key: String, data: AccountData) -> Self {
|
||||
Self {
|
||||
location,
|
||||
data,
|
||||
private_key,
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds an [`AccountCreator`]. This handles creation of the private key and account data as
|
||||
/// well as handling the response sent by the server for the registration request.
|
||||
pub fn creator() -> AccountCreator {
|
||||
AccountCreator::default()
|
||||
}
|
||||
|
||||
/// Place a new order. This will build a [`NewOrder`] representing an in flight order creation
|
||||
/// request.
|
||||
///
|
||||
/// The returned `NewOrder`'s `request` option is *guaranteed* to be `Some(Request)`.
|
||||
pub fn new_order(
|
||||
&self,
|
||||
order: &OrderData,
|
||||
directory: &Directory,
|
||||
nonce: &str,
|
||||
) -> Result<NewOrder, Error> {
|
||||
let key = PKey::private_key_from_pem(self.private_key.as_bytes())?;
|
||||
|
||||
if order.identifiers.is_empty() {
|
||||
return Err(Error::EmptyOrder);
|
||||
}
|
||||
|
||||
let url = directory.new_order_url();
|
||||
let body = serde_json::to_string(&Jws::new(
|
||||
&key,
|
||||
Some(self.location.clone()),
|
||||
url.to_owned(),
|
||||
nonce.to_owned(),
|
||||
order,
|
||||
)?)?;
|
||||
|
||||
let request = Request {
|
||||
url: url.to_owned(),
|
||||
method: "POST",
|
||||
content_type: crate::request::JSON_CONTENT_TYPE,
|
||||
body,
|
||||
expected: crate::request::CREATED,
|
||||
};
|
||||
|
||||
Ok(NewOrder::new(request))
|
||||
}
|
||||
|
||||
/// Prepare a "POST-as-GET" request to fetch data. Low level helper.
|
||||
pub fn get_request(&self, url: &str, nonce: &str) -> Result<Request, Error> {
|
||||
let key = PKey::private_key_from_pem(self.private_key.as_bytes())?;
|
||||
let body = serde_json::to_string(&Jws::new_full(
|
||||
&key,
|
||||
Some(self.location.clone()),
|
||||
url.to_owned(),
|
||||
nonce.to_owned(),
|
||||
String::new(),
|
||||
)?)?;
|
||||
|
||||
Ok(Request {
|
||||
url: url.to_owned(),
|
||||
method: "POST",
|
||||
content_type: crate::request::JSON_CONTENT_TYPE,
|
||||
body,
|
||||
expected: 200,
|
||||
})
|
||||
}
|
||||
|
||||
/// Prepare a JSON POST request. Low level helper.
|
||||
pub fn post_request<T: Serialize>(
|
||||
&self,
|
||||
url: &str,
|
||||
nonce: &str,
|
||||
data: &T,
|
||||
) -> Result<Request, Error> {
|
||||
let key = PKey::private_key_from_pem(self.private_key.as_bytes())?;
|
||||
let body = serde_json::to_string(&Jws::new(
|
||||
&key,
|
||||
Some(self.location.clone()),
|
||||
url.to_owned(),
|
||||
nonce.to_owned(),
|
||||
data,
|
||||
)?)?;
|
||||
|
||||
Ok(Request {
|
||||
url: url.to_owned(),
|
||||
method: "POST",
|
||||
content_type: crate::request::JSON_CONTENT_TYPE,
|
||||
body,
|
||||
expected: 200,
|
||||
})
|
||||
}
|
||||
|
||||
/// Prepare a JSON POST request.
|
||||
fn post_request_raw_payload(
|
||||
&self,
|
||||
url: &str,
|
||||
nonce: &str,
|
||||
payload: String,
|
||||
) -> Result<Request, Error> {
|
||||
let key = PKey::private_key_from_pem(self.private_key.as_bytes())?;
|
||||
let body = serde_json::to_string(&Jws::new_full(
|
||||
&key,
|
||||
Some(self.location.clone()),
|
||||
url.to_owned(),
|
||||
nonce.to_owned(),
|
||||
payload,
|
||||
)?)?;
|
||||
|
||||
Ok(Request {
|
||||
url: url.to_owned(),
|
||||
method: "POST",
|
||||
content_type: crate::request::JSON_CONTENT_TYPE,
|
||||
body,
|
||||
expected: 200,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the "key authorization" for a token.
|
||||
pub fn key_authorization(&self, token: &str) -> Result<String, Error> {
|
||||
let key = PKey::private_key_from_pem(self.private_key.as_bytes())?;
|
||||
let thumbprint = PublicKey::try_from(&*key)?.thumbprint()?;
|
||||
Ok(format!("{}.{}", token, thumbprint))
|
||||
}
|
||||
|
||||
/// Get the TXT field value for a dns-01 token. This is the base64url encoded sha256 digest of
|
||||
/// the key authorization value.
|
||||
pub fn dns_01_txt_value(&self, token: &str) -> Result<String, Error> {
|
||||
let key_authorization = self.key_authorization(token)?;
|
||||
let digest = openssl::sha::sha256(key_authorization.as_bytes());
|
||||
Ok(b64u::encode(&digest))
|
||||
}
|
||||
|
||||
/// Prepare a request to update account data.
|
||||
///
|
||||
/// This is a rather low level interface. You should know what you're doing.
|
||||
pub fn update_account_request<T: Serialize>(
|
||||
&self,
|
||||
nonce: &str,
|
||||
data: &T,
|
||||
) -> Result<Request, Error> {
|
||||
self.post_request(&self.location, nonce, data)
|
||||
}
|
||||
|
||||
/// Prepare a request to deactivate this account.
|
||||
pub fn deactivate_account_request<T: Serialize>(&self, nonce: &str) -> Result<Request, Error> {
|
||||
self.post_request_raw_payload(
|
||||
&self.location,
|
||||
nonce,
|
||||
r#"{"status":"deactivated"}"#.to_string(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Prepare a request to query an Authorization for an Order.
|
||||
///
|
||||
/// Returns `Ok(None)` if `auth_index` is out of out of range. You can query the number of
|
||||
/// authorizations from via [`Order::authorization_len`] or by manually inspecting its
|
||||
/// `.data.authorization` vector.
|
||||
pub fn get_authorization(
|
||||
&self,
|
||||
order: &Order,
|
||||
auth_index: usize,
|
||||
nonce: &str,
|
||||
) -> Result<Option<GetAuthorization>, Error> {
|
||||
match order.authorization(auth_index) {
|
||||
None => Ok(None),
|
||||
Some(url) => Ok(Some(GetAuthorization::new(self.get_request(url, nonce)?))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare a request to validate a Challenge from an Authorization.
|
||||
///
|
||||
/// Returns `Ok(None)` if `challenge_index` is out of out of range. The challenge count is
|
||||
/// available by inspecting the [`Authorization::challenges`] vector.
|
||||
///
|
||||
/// This returns a raw `Request` since validation takes some time and the `Authorization`
|
||||
/// object has to be re-queried and its `status` inspected.
|
||||
pub fn validate_challenge(
|
||||
&self,
|
||||
authorization: &Authorization,
|
||||
challenge_index: usize,
|
||||
nonce: &str,
|
||||
) -> Result<Option<Request>, Error> {
|
||||
match authorization.challenges.get(challenge_index) {
|
||||
None => Ok(None),
|
||||
Some(challenge) => self
|
||||
.post_request_raw_payload(&challenge.url, nonce, "{}".to_string())
|
||||
.map(Some),
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare a request to revoke a certificate.
|
||||
///
|
||||
/// The certificate can be either PEM or DER formatted.
|
||||
///
|
||||
/// Note that this uses the account's key for authorization.
|
||||
///
|
||||
/// Revocation using a certificate's private key is not yet implemented.
|
||||
pub fn revoke_certificate(
|
||||
&self,
|
||||
certificate: &[u8],
|
||||
reason: Option<u32>,
|
||||
) -> Result<CertificateRevocation, Error> {
|
||||
let cert = if certificate.starts_with(b"-----BEGIN CERTIFICATE-----") {
|
||||
b64u::encode(&openssl::x509::X509::from_pem(certificate)?.to_der()?)
|
||||
} else {
|
||||
b64u::encode(certificate)
|
||||
};
|
||||
|
||||
let data = match reason {
|
||||
Some(reason) => serde_json::json!({ "certificate": cert, "reason": reason }),
|
||||
None => serde_json::json!({ "certificate": cert }),
|
||||
};
|
||||
|
||||
Ok(CertificateRevocation {
|
||||
account: self,
|
||||
data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Certificate revocation involves converting the certificate to base64url encoded DER and then
|
||||
/// embedding it in a json structure. Since we also need a nonce and possibly retry the request if
|
||||
/// a `BadNonce` error happens, this caches the converted data for efficiency.
|
||||
pub struct CertificateRevocation<'a> {
|
||||
account: &'a Account,
|
||||
data: Value,
|
||||
}
|
||||
|
||||
impl CertificateRevocation<'_> {
|
||||
/// Create the revocation request using the specified nonce for the given directory.
|
||||
pub fn request(&self, directory: &Directory, nonce: &str) -> Result<Request, Error> {
|
||||
self.account
|
||||
.post_request(&directory.data.revoke_cert, nonce, &self.data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper to create an account.
|
||||
///
|
||||
/// This is used to generate a private key and set the contact info for the account. Afterwards the
|
||||
/// creation request can be created via the [`request`](AccountCreator::request()) method, giving
|
||||
/// it a nonce and a directory. This can be repeated, if necessary, like when the nonce fails.
|
||||
///
|
||||
/// When the server sends a successful response, it should be passed to the
|
||||
/// [`response`](AccountCreator::response()) method to finish the creation of an [`Account`] which
|
||||
/// can then be persisted.
|
||||
#[derive(Default)]
|
||||
#[must_use = "when creating an account you must pass the response to AccountCreator::response()!"]
|
||||
pub struct AccountCreator {
|
||||
contact: Vec<String>,
|
||||
terms_of_service_agreed: bool,
|
||||
key: Option<PKey<Private>>,
|
||||
eab_credentials: Option<(String, PKey<Private>)>,
|
||||
}
|
||||
|
||||
impl AccountCreator {
|
||||
/// Replace the contact info with the provided ACME compatible data.
|
||||
pub fn set_contacts(mut self, contact: Vec<String>) -> Self {
|
||||
self.contact = contact;
|
||||
self
|
||||
}
|
||||
|
||||
/// Append a contact string.
|
||||
pub fn contact(mut self, contact: String) -> Self {
|
||||
self.contact.push(contact);
|
||||
self
|
||||
}
|
||||
|
||||
/// Append an email address to the contact list.
|
||||
pub fn email(self, email: String) -> Self {
|
||||
self.contact(format!("mailto:{}", email))
|
||||
}
|
||||
|
||||
/// Change whether the account agrees to the terms of service. Use the directory's or client's
|
||||
/// `terms_of_service_url()` method to present the user with the Terms of Service.
|
||||
pub fn agree_to_tos(mut self, agree: bool) -> Self {
|
||||
self.terms_of_service_agreed = agree;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the EAB credentials for the account registration
|
||||
pub fn set_eab_credentials(mut self, kid: String, hmac_key: String) -> Result<Self, Error> {
|
||||
let hmac_key = if hmac_key.contains('+') || hmac_key.contains('/') {
|
||||
base64::decode(hmac_key)?
|
||||
} else {
|
||||
b64u::decode(&hmac_key)?
|
||||
};
|
||||
let hmac_key = PKey::hmac(&hmac_key)?;
|
||||
self.eab_credentials = Some((kid, hmac_key));
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
/// Generate a new RSA key of the specified key size.
|
||||
pub fn generate_rsa_key(self, bits: u32) -> Result<Self, Error> {
|
||||
let key = openssl::rsa::Rsa::generate(bits)?;
|
||||
Ok(self.with_key(PKey::from_rsa(key)?))
|
||||
}
|
||||
|
||||
/// Generate a new P-256 EC key.
|
||||
pub fn generate_ec_key(self) -> Result<Self, Error> {
|
||||
let key = openssl::ec::EcKey::generate(
|
||||
openssl::ec::EcGroup::from_curve_name(openssl::nid::Nid::X9_62_PRIME256V1)?.as_ref(),
|
||||
)?;
|
||||
Ok(self.with_key(PKey::from_ec_key(key)?))
|
||||
}
|
||||
|
||||
/// Use an existing key. Note that only RSA and EC keys using the `P-256` curve are currently
|
||||
/// supported, however, this will not be checked at this point.
|
||||
pub fn with_key(mut self, key: PKey<Private>) -> Self {
|
||||
self.key = Some(key);
|
||||
self
|
||||
}
|
||||
|
||||
/// Prepare a HTTP request to create this account.
|
||||
///
|
||||
/// Changes to the user data made after this will have no effect on the account generated with
|
||||
/// the resulting request.
|
||||
/// Changing the private key between using the request and passing the response to
|
||||
/// [`response`](AccountCreator::response()) will render the account unusable!
|
||||
pub fn request(&self, directory: &Directory, nonce: &str) -> Result<Request, Error> {
|
||||
let key = self.key.as_deref().ok_or(Error::MissingKey)?;
|
||||
let url = directory.new_account_url();
|
||||
|
||||
let external_account_binding = self
|
||||
.eab_credentials
|
||||
.as_ref()
|
||||
.map(|cred| {
|
||||
ExternalAccountBinding::new(&cred.0, &cred.1, Jwk::try_from(key)?, url.to_string())
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
let data = AccountData {
|
||||
orders: None,
|
||||
status: AccountStatus::New,
|
||||
contact: self.contact.clone(),
|
||||
terms_of_service_agreed: if self.terms_of_service_agreed {
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
external_account_binding,
|
||||
only_return_existing: false,
|
||||
extra: HashMap::new(),
|
||||
};
|
||||
|
||||
let body = serde_json::to_string(&Jws::new(
|
||||
key,
|
||||
None,
|
||||
url.to_owned(),
|
||||
nonce.to_owned(),
|
||||
&data,
|
||||
)?)?;
|
||||
|
||||
Ok(Request {
|
||||
url: url.to_owned(),
|
||||
method: "POST",
|
||||
content_type: crate::request::JSON_CONTENT_TYPE,
|
||||
body,
|
||||
expected: crate::request::CREATED,
|
||||
})
|
||||
}
|
||||
|
||||
/// After issuing the request from [`request()`](AccountCreator::request()), the response's
|
||||
/// `Location` header and body must be passed to this for verification and to create an account
|
||||
/// which is to be persisted!
|
||||
pub fn response(self, location_header: String, response_body: &[u8]) -> Result<Account, Error> {
|
||||
let private_key = self
|
||||
.key
|
||||
.ok_or(Error::MissingKey)?
|
||||
.private_key_to_pem_pkcs8()?;
|
||||
let private_key = String::from_utf8(private_key).map_err(|_| {
|
||||
Error::Custom("PEM key contained illegal non-utf-8 characters".to_string())
|
||||
})?;
|
||||
|
||||
Ok(Account {
|
||||
location: location_header,
|
||||
data: serde_json::from_slice(response_body)
|
||||
.map_err(|err| Error::BadAccountData(err.to_string()))?,
|
||||
private_key,
|
||||
})
|
||||
}
|
||||
}
|
@ -1,587 +0,0 @@
|
||||
//! Async HTTP Client implementation for the ACME protocol.
|
||||
|
||||
use anyhow::format_err;
|
||||
use bytes::Bytes;
|
||||
use hyper::{Body, Request};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_http::client::Client;
|
||||
|
||||
use crate::account::AccountCreator;
|
||||
use crate::order::{Order, OrderData};
|
||||
use crate::Request as AcmeRequest;
|
||||
use crate::{Account, Authorization, Challenge, Directory, Error, ErrorResponse};
|
||||
|
||||
/// A non-blocking Acme client using tokio/hyper.
|
||||
pub struct AcmeClient {
|
||||
directory_url: String,
|
||||
account: Option<Account>,
|
||||
directory: Option<Directory>,
|
||||
nonce: Option<String>,
|
||||
http_client: Client,
|
||||
}
|
||||
|
||||
impl AcmeClient {
|
||||
/// Create a new ACME client for a given ACME directory URL.
|
||||
pub fn new(directory_url: String) -> Self {
|
||||
const USER_AGENT_STRING: &str = "proxmox-acme-client/1.0";
|
||||
const TCP_KEEPALIVE_TIME: u32 = 120;
|
||||
|
||||
let options = proxmox_http::HttpOptions {
|
||||
proxy_config: None, // fixme???
|
||||
user_agent: Some(USER_AGENT_STRING.to_string()),
|
||||
tcp_keepalive: Some(TCP_KEEPALIVE_TIME),
|
||||
};
|
||||
|
||||
let http_client = Client::with_options(options);
|
||||
|
||||
Self {
|
||||
directory_url,
|
||||
account: None,
|
||||
directory: None,
|
||||
nonce: None,
|
||||
http_client,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current account, if there is one.
|
||||
pub fn account(&self) -> Option<&Account> {
|
||||
self.account.as_ref()
|
||||
}
|
||||
|
||||
/// Set the account this client should use.
|
||||
pub fn set_account(&mut self, account: Account) {
|
||||
self.account = Some(account);
|
||||
}
|
||||
|
||||
/// Convenience method to create a new account with a list of ACME compatible contact strings
|
||||
/// (eg. `mailto:someone@example.com`).
|
||||
///
|
||||
/// Please remember to persist the returned `Account` structure somewhere to not lose access to
|
||||
/// the account!
|
||||
///
|
||||
/// If an RSA key size is provided, an RSA key will be generated. Otherwise an EC key using the
|
||||
/// P-256 curve will be generated.
|
||||
pub async fn new_account(
|
||||
&mut self,
|
||||
tos_agreed: bool,
|
||||
contact: Vec<String>,
|
||||
rsa_bits: Option<u32>,
|
||||
eab_creds: Option<(String, String)>,
|
||||
) -> Result<&Account, anyhow::Error> {
|
||||
let mut account = Account::creator()
|
||||
.set_contacts(contact)
|
||||
.agree_to_tos(tos_agreed);
|
||||
|
||||
if let Some((eab_kid, eab_hmac_key)) = eab_creds {
|
||||
account = account.set_eab_credentials(eab_kid, eab_hmac_key)?;
|
||||
}
|
||||
|
||||
let account = if let Some(bits) = rsa_bits {
|
||||
account.generate_rsa_key(bits)?
|
||||
} else {
|
||||
account.generate_ec_key()?
|
||||
};
|
||||
|
||||
let _ = self.register_account(account).await?;
|
||||
|
||||
// unwrap: Setting `self.account` is literally this function's job, we just can't keep
|
||||
// the borrow from from `self.register_account()` active due to clashes.
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).key_authorization()`.
|
||||
pub fn key_authorization(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||
Ok(Self::need_account(&self.account)?.key_authorization(token)?)
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).dns_01_txt_value()`.
|
||||
/// the key authorization value.
|
||||
pub fn dns_01_txt_value(&self, token: &str) -> Result<String, anyhow::Error> {
|
||||
Ok(Self::need_account(&self.account)?.dns_01_txt_value(token)?)
|
||||
}
|
||||
|
||||
async fn register_account(
|
||||
&mut self,
|
||||
account: AccountCreator,
|
||||
) -> Result<&Account, anyhow::Error> {
|
||||
let mut retry = retry();
|
||||
let mut response = loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
let request = account.request(directory, nonce)?;
|
||||
match self.run_request(request).await {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
};
|
||||
|
||||
let account = account.response(response.location_required()?, &response.body)?;
|
||||
|
||||
self.account = Some(account);
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
/// Update account data.
|
||||
///
|
||||
/// Low-level version: we allow arbitrary data to be passed to the remote here, it's up to the
|
||||
/// user to know what to do for now.
|
||||
pub async fn update_account<T: Serialize>(
|
||||
&mut self,
|
||||
data: &T,
|
||||
) -> Result<&Account, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
let response = loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.post_request(&account.location, nonce, data)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
};
|
||||
|
||||
// unwrap: we've been keeping an immutable reference to it from the top of the method
|
||||
let _ = account;
|
||||
self.account.as_mut().unwrap().data = response.json()?;
|
||||
// fixme: self.save()?;
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
/// Method to create a new order for a set of domains.
|
||||
///
|
||||
/// Please remember to persist the order somewhere (ideally along with the account data) in
|
||||
/// order to finish & query it later on.
|
||||
pub async fn new_order<I>(&mut self, domains: I) -> Result<Order, anyhow::Error>
|
||||
where
|
||||
I: IntoIterator<Item = String>,
|
||||
{
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let order = domains
|
||||
.into_iter()
|
||||
.fold(OrderData::new(), |order, domain| order.domain(domain));
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut new_order = account.new_order(&order, directory, nonce)?;
|
||||
let mut response = match Self::execute(
|
||||
&mut self.http_client,
|
||||
new_order.request.take().unwrap(),
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(response) => response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
return Ok(
|
||||
new_order.response(response.location_required()?, response.bytes().as_ref())?
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Low level "POST-as-GET" request.
|
||||
async fn post_as_get(&mut self, url: &str) -> Result<AcmeResponse, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.get_request(url, nonce)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Low level POST request.
|
||||
async fn post<T: Serialize>(
|
||||
&mut self,
|
||||
url: &str,
|
||||
data: &T,
|
||||
) -> Result<AcmeResponse, anyhow::Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (_directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = account.post_request(url, nonce, data)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request challenge validation. Afterwards, the challenge should be polled.
|
||||
pub async fn request_challenge_validation(
|
||||
&mut self,
|
||||
url: &str,
|
||||
) -> Result<Challenge, anyhow::Error> {
|
||||
Ok(self
|
||||
.post(url, &serde_json::Value::Object(Default::default()))
|
||||
.await?
|
||||
.json()?)
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Authorization' URL, get and deserialize it.
|
||||
pub async fn get_authorization(&mut self, url: &str) -> Result<Authorization, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.json()?)
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Order' URL, get and deserialize it.
|
||||
pub async fn get_order(&mut self, url: &str) -> Result<OrderData, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.json()?)
|
||||
}
|
||||
|
||||
/// Finalize an Order via its `finalize` URL property and the DER encoded CSR.
|
||||
pub async fn finalize(&mut self, url: &str, csr: &[u8]) -> Result<(), anyhow::Error> {
|
||||
let csr = base64::encode_config(csr, base64::URL_SAFE_NO_PAD);
|
||||
let data = serde_json::json!({ "csr": csr });
|
||||
self.post(url, &data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Download a certificate via its 'certificate' URL property.
|
||||
///
|
||||
/// The certificate will be a PEM certificate chain.
|
||||
pub async fn get_certificate(&mut self, url: &str) -> Result<Bytes, anyhow::Error> {
|
||||
Ok(self.post_as_get(url).await?.body)
|
||||
}
|
||||
|
||||
/// Revoke an existing certificate (PEM or DER formatted).
|
||||
pub async fn revoke_certificate(
|
||||
&mut self,
|
||||
certificate: &[u8],
|
||||
reason: Option<u32>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
// TODO: This can also work without an account.
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let revocation = account.revoke_certificate(certificate, reason)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let (directory, nonce) = Self::get_dir_nonce(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let request = revocation.request(directory, nonce)?;
|
||||
match Self::execute(&mut self.http_client, request, &mut self.nonce).await {
|
||||
Ok(_response) => return Ok(()),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err.into()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn need_account(account: &Option<Account>) -> Result<&Account, anyhow::Error> {
|
||||
account
|
||||
.as_ref()
|
||||
.ok_or_else(|| format_err!("cannot use client without an account"))
|
||||
}
|
||||
|
||||
/// Get the directory URL without querying the `Directory` structure.
|
||||
///
|
||||
/// The difference to [`directory`](AcmeClient::directory()) is that this does not
|
||||
/// attempt to fetch the directory data from the ACME server.
|
||||
pub fn directory_url(&self) -> &str {
|
||||
&self.directory_url
|
||||
}
|
||||
}
|
||||
|
||||
struct AcmeResponse {
|
||||
body: Bytes,
|
||||
location: Option<String>,
|
||||
got_nonce: bool,
|
||||
}
|
||||
|
||||
impl AcmeResponse {
|
||||
/// Convenience helper to assert that a location header was part of the response.
|
||||
fn location_required(&mut self) -> Result<String, anyhow::Error> {
|
||||
self.location
|
||||
.take()
|
||||
.ok_or_else(|| format_err!("missing Location header"))
|
||||
}
|
||||
|
||||
/// Convenience shortcut to perform json deserialization of the returned body.
|
||||
fn json<T: for<'a> Deserialize<'a>>(&self) -> Result<T, Error> {
|
||||
Ok(serde_json::from_slice(&self.body)?)
|
||||
}
|
||||
|
||||
/// Convenience shortcut to get the body as bytes.
|
||||
fn bytes(&self) -> &[u8] {
|
||||
&self.body
|
||||
}
|
||||
}
|
||||
|
||||
impl AcmeClient {
|
||||
/// Non-self-borrowing run_request version for borrow workarounds.
|
||||
async fn execute(
|
||||
http_client: &mut Client,
|
||||
request: AcmeRequest,
|
||||
nonce: &mut Option<String>,
|
||||
) -> Result<AcmeResponse, Error> {
|
||||
let req_builder = Request::builder().method(request.method).uri(&request.url);
|
||||
|
||||
let http_request = if !request.content_type.is_empty() {
|
||||
req_builder
|
||||
.header("Content-Type", request.content_type)
|
||||
.header("Content-Length", request.body.len())
|
||||
.body(request.body.into())
|
||||
} else {
|
||||
req_builder.body(Body::empty())
|
||||
}
|
||||
.map_err(|err| Error::Custom(format!("failed to create http request: {}", err)))?;
|
||||
|
||||
let response = http_client
|
||||
.request(http_request)
|
||||
.await
|
||||
.map_err(|err| Error::Custom(err.to_string()))?;
|
||||
let (parts, body) = response.into_parts();
|
||||
|
||||
let status = parts.status.as_u16();
|
||||
let body = hyper::body::to_bytes(body)
|
||||
.await
|
||||
.map_err(|err| Error::Custom(format!("failed to retrieve response body: {}", err)))?;
|
||||
|
||||
let got_nonce = if let Some(new_nonce) = parts.headers.get(crate::REPLAY_NONCE) {
|
||||
let new_nonce = new_nonce.to_str().map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"received invalid replay-nonce header from ACME server: {}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
*nonce = Some(new_nonce.to_owned());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if parts.status.is_success() {
|
||||
if status != request.expected {
|
||||
return Err(Error::InvalidApi(format!(
|
||||
"ACME server responded with unexpected status code: {:?}",
|
||||
parts.status
|
||||
)));
|
||||
}
|
||||
|
||||
let location = parts
|
||||
.headers
|
||||
.get("Location")
|
||||
.map(|header| {
|
||||
header.to_str().map(str::to_owned).map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"received invalid location header from ACME server: {}",
|
||||
err
|
||||
))
|
||||
})
|
||||
})
|
||||
.transpose()?;
|
||||
|
||||
return Ok(AcmeResponse {
|
||||
body,
|
||||
location,
|
||||
got_nonce,
|
||||
});
|
||||
}
|
||||
|
||||
let error: ErrorResponse = serde_json::from_slice(&body).map_err(|err| {
|
||||
Error::Client(format!(
|
||||
"error status with improper error ACME response: {}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
if error.ty == crate::error::BAD_NONCE {
|
||||
if !got_nonce {
|
||||
return Err(Error::InvalidApi(
|
||||
"badNonce without a new Replay-Nonce header".to_string(),
|
||||
));
|
||||
}
|
||||
return Err(Error::BadNonce);
|
||||
}
|
||||
|
||||
Err(Error::Api(error))
|
||||
}
|
||||
|
||||
/// Low-level API to run an n API request. This automatically updates the current nonce!
|
||||
async fn run_request(&mut self, request: AcmeRequest) -> Result<AcmeResponse, Error> {
|
||||
Self::execute(&mut self.http_client, request, &mut self.nonce).await
|
||||
}
|
||||
|
||||
/// Get the Directory information.
|
||||
pub async fn directory(&mut self) -> Result<&Directory, Error> {
|
||||
Ok(Self::get_directory(
|
||||
&mut self.http_client,
|
||||
&self.directory_url,
|
||||
&mut self.directory,
|
||||
&mut self.nonce,
|
||||
)
|
||||
.await?
|
||||
.0)
|
||||
}
|
||||
|
||||
async fn get_directory<'a, 'b>(
|
||||
http_client: &mut Client,
|
||||
directory_url: &str,
|
||||
directory: &'a mut Option<Directory>,
|
||||
nonce: &'b mut Option<String>,
|
||||
) -> Result<(&'a Directory, Option<&'b str>), Error> {
|
||||
if let Some(d) = directory {
|
||||
return Ok((d, nonce.as_deref()));
|
||||
}
|
||||
|
||||
let response = Self::execute(
|
||||
http_client,
|
||||
AcmeRequest {
|
||||
url: directory_url.to_string(),
|
||||
method: "GET",
|
||||
content_type: "",
|
||||
body: String::new(),
|
||||
expected: 200,
|
||||
},
|
||||
nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
*directory = Some(Directory::from_parts(
|
||||
directory_url.to_string(),
|
||||
response.json()?,
|
||||
));
|
||||
|
||||
Ok((directory.as_ref().unwrap(), nonce.as_deref()))
|
||||
}
|
||||
|
||||
/// Like `get_directory`, but if the directory provides no nonce, also performs a `HEAD`
|
||||
/// request on the new nonce URL.
|
||||
async fn get_dir_nonce<'a, 'b>(
|
||||
http_client: &mut Client,
|
||||
directory_url: &str,
|
||||
directory: &'a mut Option<Directory>,
|
||||
nonce: &'b mut Option<String>,
|
||||
) -> Result<(&'a Directory, &'b str), Error> {
|
||||
// this let construct is a lifetime workaround:
|
||||
let _ = Self::get_directory(http_client, directory_url, directory, nonce).await?;
|
||||
let dir = directory.as_ref().unwrap(); // the above fails if it couldn't fill this option
|
||||
if nonce.is_none() {
|
||||
// this is also a lifetime issue...
|
||||
let _ = Self::get_nonce(http_client, nonce, dir.new_nonce_url()).await?;
|
||||
};
|
||||
Ok((dir, nonce.as_deref().unwrap()))
|
||||
}
|
||||
|
||||
/// Convenience method to get the ToS URL from the contained `Directory`.
|
||||
///
|
||||
/// This requires mutable self as the directory information may be lazily loaded, which can
|
||||
/// fail.
|
||||
pub async fn terms_of_service_url(&mut self) -> Result<Option<&str>, Error> {
|
||||
Ok(self.directory().await?.terms_of_service_url())
|
||||
}
|
||||
|
||||
async fn get_nonce<'a>(
|
||||
http_client: &mut Client,
|
||||
nonce: &'a mut Option<String>,
|
||||
new_nonce_url: &str,
|
||||
) -> Result<&'a str, Error> {
|
||||
let response = Self::execute(
|
||||
http_client,
|
||||
AcmeRequest {
|
||||
url: new_nonce_url.to_owned(),
|
||||
method: "HEAD",
|
||||
content_type: "",
|
||||
body: String::new(),
|
||||
expected: 200,
|
||||
},
|
||||
nonce,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !response.got_nonce {
|
||||
return Err(Error::InvalidApi(
|
||||
"no new nonce received from new nonce URL".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
nonce
|
||||
.as_deref()
|
||||
.ok_or_else(|| Error::Client("failed to update nonce".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// bad nonce retry count helper
|
||||
struct Retry(usize);
|
||||
|
||||
const fn retry() -> Retry {
|
||||
Retry(0)
|
||||
}
|
||||
|
||||
impl Retry {
|
||||
fn tick(&mut self) -> Result<(), Error> {
|
||||
if self.0 >= 3 {
|
||||
Err(Error::Client("kept getting a badNonce error!".to_string()))
|
||||
} else {
|
||||
self.0 += 1;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
@ -1,162 +0,0 @@
|
||||
//! Authorization and Challenge data.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::order::Identifier;
|
||||
use crate::request::Request;
|
||||
use crate::Error;
|
||||
|
||||
/// Status of an [`Authorization`].
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Status {
|
||||
/// The authorization was deactivated by the client.
|
||||
Deactivated,
|
||||
|
||||
/// The authorization expired.
|
||||
Expired,
|
||||
|
||||
/// The authorization failed and is now invalid.
|
||||
Invalid,
|
||||
|
||||
/// Validation is pending.
|
||||
Pending,
|
||||
|
||||
/// The authorization was revoked by the server.
|
||||
Revoked,
|
||||
|
||||
/// The identifier is authorized.
|
||||
Valid,
|
||||
}
|
||||
|
||||
impl Status {
|
||||
/// Convenience method to check if the status is 'pending'.
|
||||
#[inline]
|
||||
pub fn is_pending(self) -> bool {
|
||||
self == Status::Pending
|
||||
}
|
||||
|
||||
/// Convenience method to check if the status is 'valid'.
|
||||
#[inline]
|
||||
pub fn is_valid(self) -> bool {
|
||||
self == Status::Valid
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents an authorization state for an order. The user is expected to pick a challenge,
|
||||
/// execute it, and the request validation for it.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Authorization {
|
||||
/// The identifier (usually domain name) this authorization is for.
|
||||
pub identifier: Identifier,
|
||||
|
||||
/// The current status of this authorization entry.
|
||||
pub status: Status,
|
||||
|
||||
/// Expiration date for the authorization.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expires: Option<String>,
|
||||
|
||||
/// List of challenges which can be used to complete this authorization.
|
||||
pub challenges: Vec<Challenge>,
|
||||
|
||||
/// The authorization is for a wildcard domain.
|
||||
#[serde(default, skip_serializing_if = "is_false")]
|
||||
pub wildcard: bool,
|
||||
}
|
||||
|
||||
/// The state of a challenge.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ChallengeStatus {
|
||||
/// The challenge is pending and has not been validated yet.
|
||||
Pending,
|
||||
|
||||
/// The validation is in progress.
|
||||
Processing,
|
||||
|
||||
/// The challenge was successfully validated.
|
||||
Valid,
|
||||
|
||||
/// Validation of this challenge failed.
|
||||
Invalid,
|
||||
}
|
||||
|
||||
impl ChallengeStatus {
|
||||
/// Convenience method to check if the status is 'pending'.
|
||||
#[inline]
|
||||
pub fn is_pending(self) -> bool {
|
||||
self == ChallengeStatus::Pending
|
||||
}
|
||||
|
||||
/// Convenience method to check if the status is 'valid'.
|
||||
#[inline]
|
||||
pub fn is_valid(self) -> bool {
|
||||
self == ChallengeStatus::Valid
|
||||
}
|
||||
}
|
||||
|
||||
/// A challenge object contains information on how to complete an authorization for an order.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Challenge {
|
||||
/// The challenge type (such as `"dns-01"`).
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
|
||||
/// The current challenge status.
|
||||
pub status: ChallengeStatus,
|
||||
|
||||
/// The URL used to post to in order to begin the validation for this challenge.
|
||||
pub url: String,
|
||||
|
||||
/// Contains the remaining fields of the Challenge object, such as the `token`.
|
||||
#[serde(flatten)]
|
||||
pub data: HashMap<String, Value>,
|
||||
}
|
||||
|
||||
impl Challenge {
|
||||
/// Most challenges have a `token` used for key authorizations. This is a convenience helper to
|
||||
/// access it.
|
||||
pub fn token(&self) -> Option<&str> {
|
||||
self.data.get("token").and_then(Value::as_str)
|
||||
}
|
||||
}
|
||||
|
||||
/// Serde helper
|
||||
#[inline]
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!*b
|
||||
}
|
||||
|
||||
/// Represents an in-flight query for an authorization.
|
||||
///
|
||||
/// This is created via [`Account::get_authorization`](crate::Account::get_authorization()).
|
||||
pub struct GetAuthorization {
|
||||
//order: OrderData,
|
||||
/// The request to send to the ACME provider. This is wrapped in an option in order to allow
|
||||
/// moving it out instead of copying the contents.
|
||||
///
|
||||
/// When generated via [`Account::get_authorization`](crate::Account::get_authorization()),
|
||||
/// this is guaranteed to be `Some`.
|
||||
///
|
||||
/// The response should be passed to the the [`response`](GetAuthorization::response()) method.
|
||||
pub request: Option<Request>,
|
||||
}
|
||||
|
||||
impl GetAuthorization {
|
||||
pub(crate) fn new(request: Request) -> Self {
|
||||
Self {
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deal with the response we got from the server.
|
||||
pub fn response(self, response_body: &[u8]) -> Result<Authorization, Error> {
|
||||
Ok(serde_json::from_slice(response_body)?)
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
fn config() -> base64::Config {
|
||||
base64::Config::new(base64::CharacterSet::UrlSafe, false)
|
||||
}
|
||||
|
||||
/// Encode bytes as base64url into a `String`.
|
||||
pub fn encode(data: &[u8]) -> String {
|
||||
base64::encode_config(data, config())
|
||||
}
|
||||
|
||||
/// Decode a base64url encoded string.
|
||||
pub fn decode<T: AsRef<[u8]>>(data: &T) -> Result<Vec<u8>, crate::Error> {
|
||||
Ok(base64::decode_config(data.as_ref(), config())?)
|
||||
}
|
||||
|
||||
// curiously currently unused as we don't deserialize any of that
|
||||
// /// Decode bytes from a base64url string.
|
||||
// pub fn decode(data: &str) -> Result<Vec<u8>, base64::DecodeError> {
|
||||
// base64::decode_config(data, config())
|
||||
// }
|
||||
|
||||
/// Our serde module for encoding bytes as base64url encoded strings.
|
||||
pub mod bytes {
|
||||
use serde::{Serialize, Serializer};
|
||||
//use serde::{Deserialize, Deserializer};
|
||||
|
||||
pub fn serialize<S>(data: &[u8], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
super::encode(data).serialize(serializer)
|
||||
}
|
||||
|
||||
// curiously currently unused as we don't deserialize any of that
|
||||
// pub fn deserialize<'de, D>(deserializer: D) -> Result<Vec<u8>, D::Error>
|
||||
// where
|
||||
// D: Deserializer<'de>,
|
||||
// {
|
||||
// use serde::de::Error;
|
||||
|
||||
// Ok(super::decode(&String::deserialize(deserializer)?)
|
||||
// .map_err(|e| D::Error::custom(e.to_string()))?)
|
||||
// }
|
||||
}
|
@ -1,614 +0,0 @@
|
||||
//! A blocking higher-level ACME client implementation using 'curl'.
|
||||
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::b64u;
|
||||
use crate::error;
|
||||
use crate::order::OrderData;
|
||||
use crate::request::ErrorResponse;
|
||||
use crate::{Account, Authorization, Challenge, Directory, Error, Order, Request};
|
||||
|
||||
macro_rules! format_err {
|
||||
($($fmt:tt)*) => { Error::Client(format!($($fmt)*)) };
|
||||
}
|
||||
|
||||
macro_rules! bail {
|
||||
($($fmt:tt)*) => {{ return Err(format_err!($($fmt)*)); }}
|
||||
}
|
||||
|
||||
/// Low level HTTP response structure.
|
||||
pub struct HttpResponse {
|
||||
/// The raw HTTP response body as a byte vector.
|
||||
pub body: Vec<u8>,
|
||||
|
||||
/// The http status code.
|
||||
pub status: u16,
|
||||
|
||||
/// The headers relevant to the ACME protocol.
|
||||
pub headers: Headers,
|
||||
}
|
||||
|
||||
impl HttpResponse {
|
||||
/// Check the HTTP status code for a success code (200..299).
|
||||
pub fn is_success(&self) -> bool {
|
||||
self.status >= 200 && self.status < 300
|
||||
}
|
||||
|
||||
/// Convenience shortcut to perform json deserialization of the returned body.
|
||||
pub fn json<T: for<'a> Deserialize<'a>>(&self) -> Result<T, Error> {
|
||||
Ok(serde_json::from_slice(&self.body)?)
|
||||
}
|
||||
|
||||
/// Access the raw body as bytes.
|
||||
pub fn bytes(&self) -> &[u8] {
|
||||
&self.body
|
||||
}
|
||||
|
||||
/// Get the returned location header. Borrowing shortcut to `self.headers.location`.
|
||||
pub fn location(&self) -> Option<&str> {
|
||||
self.headers.location.as_deref()
|
||||
}
|
||||
|
||||
/// Convenience helper to assert that a location header was part of the response.
|
||||
pub fn location_required(&mut self) -> Result<String, Error> {
|
||||
self.headers
|
||||
.location
|
||||
.take()
|
||||
.ok_or_else(|| format_err!("missing Location header"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Contains headers from the HTTP response which are relevant parts of the Acme API.
|
||||
///
|
||||
/// Note that access to the `nonce` header is internal to this crate only, since a nonce will
|
||||
/// always be moved out of the response into the `Client` whenever a new nonce is received.
|
||||
#[derive(Default)]
|
||||
pub struct Headers {
|
||||
/// The 'Location' header usually encodes the URL where an account or order can be queried from
|
||||
/// after they were created.
|
||||
pub location: Option<String>,
|
||||
nonce: Option<String>,
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
agent: Option<ureq::Agent>,
|
||||
nonce: Option<String>,
|
||||
proxy: Option<String>,
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn agent(&mut self) -> Result<&mut ureq::Agent, Error> {
|
||||
if self.agent.is_none() {
|
||||
let connector = Arc::new(
|
||||
native_tls::TlsConnector::new()
|
||||
.map_err(|err| format_err!("failed to create tls connector: {}", err))?,
|
||||
);
|
||||
|
||||
let mut builder = ureq::AgentBuilder::new().tls_connector(connector);
|
||||
|
||||
if let Some(proxy) = self.proxy.as_deref() {
|
||||
builder = builder.proxy(
|
||||
ureq::Proxy::new(proxy)
|
||||
.map_err(|err| format_err!("failed to set proxy: {}", err))?,
|
||||
);
|
||||
}
|
||||
|
||||
self.agent = Some(builder.build());
|
||||
}
|
||||
|
||||
Ok(self.agent.as_mut().unwrap())
|
||||
}
|
||||
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
agent: None,
|
||||
nonce: None,
|
||||
proxy: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
method: &[u8],
|
||||
url: &str,
|
||||
request_body: Option<(&str, &[u8])>, // content-type and body
|
||||
) -> Result<HttpResponse, Error> {
|
||||
let agent = self.agent()?;
|
||||
let req = match method {
|
||||
b"POST" => agent.post(url),
|
||||
b"GET" => agent.get(url),
|
||||
b"HEAD" => agent.head(url),
|
||||
other => bail!("invalid http method: {:?}", other),
|
||||
};
|
||||
|
||||
let response = if let Some((content_type, body)) = request_body {
|
||||
req.set("Content-Type", content_type)
|
||||
.set("Content-Length", &body.len().to_string())
|
||||
.send_bytes(body)
|
||||
} else {
|
||||
req.call()
|
||||
}
|
||||
.map_err(|err| format_err!("http request failed: {}", err))?;
|
||||
|
||||
let mut headers = Headers::default();
|
||||
if let Some(value) = response.header(crate::LOCATION) {
|
||||
headers.location = Some(value.to_owned());
|
||||
}
|
||||
|
||||
if let Some(value) = response.header(crate::REPLAY_NONCE) {
|
||||
headers.nonce = Some(value.to_owned());
|
||||
}
|
||||
|
||||
let status = response.status();
|
||||
|
||||
let mut body = Vec::new();
|
||||
response
|
||||
.into_reader()
|
||||
.take(16 * 1024 * 1024) // arbitrary limit
|
||||
.read_to_end(&mut body)
|
||||
.map_err(|err| format_err!("failed to read response body: {}", err))?;
|
||||
|
||||
Ok(HttpResponse {
|
||||
status,
|
||||
headers,
|
||||
body,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_proxy(&mut self, proxy: String) {
|
||||
self.proxy = Some(proxy);
|
||||
self.agent = None;
|
||||
}
|
||||
|
||||
/// Low-level API to run an API request. This automatically updates the current nonce!
|
||||
fn run_request(&mut self, request: Request) -> Result<HttpResponse, Error> {
|
||||
let body = if request.body.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some((request.content_type, request.body.as_bytes()))
|
||||
};
|
||||
|
||||
let mut response = self
|
||||
.execute(request.method.as_bytes(), &request.url, body)
|
||||
.map_err({
|
||||
// borrow fixup:
|
||||
let method = &request.method;
|
||||
let url = &request.url;
|
||||
move |err| format_err!("failed to execute {} request to {}: {}", method, url, err)
|
||||
})?;
|
||||
|
||||
let got_nonce = self.update_nonce(&mut response)?;
|
||||
|
||||
if response.is_success() {
|
||||
if response.status != request.expected {
|
||||
return Err(Error::InvalidApi(format!(
|
||||
"API server responded with unexpected status code: {:?}",
|
||||
response.status
|
||||
)));
|
||||
}
|
||||
return Ok(response);
|
||||
}
|
||||
|
||||
let error: ErrorResponse = response.json().map_err(|err| {
|
||||
format_err!("error status with improper error ACME response: {}", err)
|
||||
})?;
|
||||
|
||||
if error.ty == error::BAD_NONCE {
|
||||
if !got_nonce {
|
||||
return Err(Error::InvalidApi(
|
||||
"badNonce without a new Replay-Nonce header".to_string(),
|
||||
));
|
||||
}
|
||||
return Err(Error::BadNonce);
|
||||
}
|
||||
|
||||
Err(Error::Api(error))
|
||||
}
|
||||
|
||||
/// If the response contained a nonce, update our nonce and return `true`, otherwise return
|
||||
/// `false`.
|
||||
fn update_nonce(&mut self, response: &mut HttpResponse) -> Result<bool, Error> {
|
||||
match response.headers.nonce.take() {
|
||||
Some(nonce) => {
|
||||
self.nonce = Some(nonce);
|
||||
Ok(true)
|
||||
}
|
||||
None => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the nonce, if there isn't one it is an error.
|
||||
fn must_update_nonce(&mut self, response: &mut HttpResponse) -> Result<(), Error> {
|
||||
if !self.update_nonce(response)? {
|
||||
bail!("newNonce URL did not return a nonce");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the Nonce.
|
||||
fn new_nonce(&mut self, new_nonce_url: &str) -> Result<(), Error> {
|
||||
let mut response = self.execute(b"HEAD", new_nonce_url, None).map_err(|err| {
|
||||
Error::InvalidApi(format!("failed to get HEAD of newNonce URL: {}", err))
|
||||
})?;
|
||||
|
||||
if !response.is_success() {
|
||||
bail!("HEAD on newNonce URL returned error");
|
||||
}
|
||||
|
||||
self.must_update_nonce(&mut response)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Make sure a nonce is available without forcing renewal.
|
||||
fn nonce(&mut self, new_nonce_url: &str) -> Result<&str, Error> {
|
||||
if self.nonce.is_none() {
|
||||
self.new_nonce(new_nonce_url)?;
|
||||
}
|
||||
self.nonce
|
||||
.as_deref()
|
||||
.ok_or_else(|| format_err!("failed to get nonce"))
|
||||
}
|
||||
}
|
||||
|
||||
/// A blocking Acme client using curl's `Easy` interface.
|
||||
pub struct Client {
|
||||
inner: Inner,
|
||||
directory: Option<Directory>,
|
||||
account: Option<Account>,
|
||||
directory_url: String,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
/// Create a new Client. This has no account associated with it yet, so the next step is to
|
||||
/// either attach an existing `Account` or create a new one.
|
||||
pub fn new(directory_url: String) -> Self {
|
||||
Self {
|
||||
inner: Inner::new(),
|
||||
directory: None,
|
||||
account: None,
|
||||
directory_url,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the directory URL without querying the `Directory` structure.
|
||||
///
|
||||
/// The difference to [`directory`](Client::directory()) is that this does not
|
||||
/// attempt to fetch the directory data from the ACME server.
|
||||
pub fn directory_url(&self) -> &str {
|
||||
&self.directory_url
|
||||
}
|
||||
|
||||
/// Set the account this client should use.
|
||||
pub fn set_account(&mut self, account: Account) {
|
||||
self.account = Some(account);
|
||||
}
|
||||
|
||||
/// Get the Directory information.
|
||||
pub fn directory(&mut self) -> Result<&Directory, Error> {
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)
|
||||
}
|
||||
|
||||
/// Get the Directory information.
|
||||
fn get_directory<'a>(
|
||||
inner: &'_ mut Inner,
|
||||
directory: &'a mut Option<Directory>,
|
||||
directory_url: &str,
|
||||
) -> Result<&'a Directory, Error> {
|
||||
if let Some(d) = directory {
|
||||
return Ok(d);
|
||||
}
|
||||
|
||||
let response = inner
|
||||
.execute(b"GET", directory_url, None)
|
||||
.map_err(|err| Error::InvalidApi(format!("failed to get directory info: {}", err)))?;
|
||||
|
||||
if !response.is_success() {
|
||||
bail!(
|
||||
"GET on the directory URL returned error status ({})",
|
||||
response.status
|
||||
);
|
||||
}
|
||||
|
||||
*directory = Some(Directory::from_parts(
|
||||
directory_url.to_string(),
|
||||
response.json()?,
|
||||
));
|
||||
Ok(directory.as_ref().unwrap())
|
||||
}
|
||||
|
||||
/// Get the current account, if there is one.
|
||||
pub fn account(&self) -> Option<&Account> {
|
||||
self.account.as_ref()
|
||||
}
|
||||
|
||||
/// Convenience method to get the ToS URL from the contained `Directory`.
|
||||
///
|
||||
/// This requires mutable self as the directory information may be lazily loaded, which can
|
||||
/// fail.
|
||||
pub fn terms_of_service_url(&mut self) -> Result<Option<&str>, Error> {
|
||||
Ok(self.directory()?.terms_of_service_url())
|
||||
}
|
||||
|
||||
/// Get a fresh nonce (this should normally not be required as nonces are updated
|
||||
/// automatically, even when a `badNonce` error occurs, which according to the ACME API
|
||||
/// specification should include a new valid nonce in its headers anyway).
|
||||
pub fn new_nonce(&mut self) -> Result<(), Error> {
|
||||
let was_none = self.inner.nonce.is_none();
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
if was_none && self.inner.nonce.is_some() {
|
||||
// this was the first call and we already got a nonce from querying the directory
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// otherwise actually call up to get a new nonce
|
||||
self.inner.new_nonce(directory.new_nonce_url())
|
||||
}
|
||||
|
||||
/// borrow helper
|
||||
fn nonce<'a>(inner: &'a mut Inner, directory: &'_ Directory) -> Result<&'a str, Error> {
|
||||
inner.nonce(directory.new_nonce_url())
|
||||
}
|
||||
|
||||
/// Convenience method to create a new account with a list of ACME compatible contact strings
|
||||
/// (eg. `mailto:someone@example.com`).
|
||||
///
|
||||
/// Please remember to persist the returned `Account` structure somewhere to not lose access to
|
||||
/// the account!
|
||||
///
|
||||
/// If an RSA key size is provided, an RSA key will be generated. Otherwise an EC key using the
|
||||
/// P-256 curve will be generated.
|
||||
pub fn new_account(
|
||||
&mut self,
|
||||
contact: Vec<String>,
|
||||
tos_agreed: bool,
|
||||
rsa_bits: Option<u32>,
|
||||
eab_creds: Option<(String, String)>,
|
||||
) -> Result<&Account, Error> {
|
||||
let mut account = Account::creator()
|
||||
.set_contacts(contact)
|
||||
.agree_to_tos(tos_agreed);
|
||||
if let Some((eab_kid, eab_hmac_key)) = eab_creds {
|
||||
account = account.set_eab_credentials(eab_kid, eab_hmac_key)?;
|
||||
}
|
||||
let account = if let Some(bits) = rsa_bits {
|
||||
account.generate_rsa_key(bits)?
|
||||
} else {
|
||||
account.generate_ec_key()?
|
||||
};
|
||||
|
||||
self.register_account(account)
|
||||
}
|
||||
|
||||
/// Register an ACME account.
|
||||
///
|
||||
/// This uses an [`AccountCreator`](crate::account::AccountCreator) since it may need to build
|
||||
/// the request multiple times in case the we get a `BadNonce` error.
|
||||
pub fn register_account(
|
||||
&mut self,
|
||||
account: crate::account::AccountCreator,
|
||||
) -> Result<&Account, Error> {
|
||||
let mut retry = retry();
|
||||
let mut response = loop {
|
||||
retry.tick()?;
|
||||
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let request = account.request(directory, nonce)?;
|
||||
match self.run_request(request) {
|
||||
Ok(response) => break response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
};
|
||||
|
||||
let account = account.response(response.location_required()?, response.bytes().as_ref())?;
|
||||
|
||||
self.account = Some(account);
|
||||
Ok(self.account.as_ref().unwrap())
|
||||
}
|
||||
|
||||
fn need_account(account: &Option<Account>) -> Result<&Account, Error> {
|
||||
account
|
||||
.as_ref()
|
||||
.ok_or_else(|| format_err!("cannot use client without an account"))
|
||||
}
|
||||
|
||||
/// Update account data.
|
||||
///
|
||||
/// Low-level version: we allow arbitrary data to be passed to the remote here, it's up to the
|
||||
/// user to know what to do for now.
|
||||
pub fn update_account<T: Serialize>(&mut self, data: &T) -> Result<&Account, Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
let response = loop {
|
||||
retry.tick()?;
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let request = account.post_request(&account.location, nonce, data)?;
|
||||
let response = match self.inner.run_request(request) {
|
||||
Ok(response) => response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
break response;
|
||||
};
|
||||
|
||||
// unwrap: we asserted we have an account at the top of the method!
|
||||
let account = self.account.as_mut().unwrap();
|
||||
account.data = response.json()?;
|
||||
Ok(account)
|
||||
}
|
||||
|
||||
/// Method to create a new order for a set of domains.
|
||||
///
|
||||
/// Please remember to persist the order somewhere (ideally along with the account data) in
|
||||
/// order to finish & query it later on.
|
||||
pub fn new_order(&mut self, domains: Vec<String>) -> Result<Order, Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let order = domains
|
||||
.into_iter()
|
||||
.fold(OrderData::new(), |order, domain| order.domain(domain));
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let mut new_order = account.new_order(&order, directory, nonce)?;
|
||||
let mut response = match self.inner.run_request(new_order.request.take().unwrap()) {
|
||||
Ok(response) => response,
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
|
||||
return new_order.response(response.location_required()?, response.bytes().as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Authorization' URL, get and deserialize it.
|
||||
pub fn get_authorization(&mut self, url: &str) -> Result<Authorization, Error> {
|
||||
self.post_as_get(url)?.json()
|
||||
}
|
||||
|
||||
/// Assuming the provided URL is an 'Order' URL, get and deserialize it.
|
||||
pub fn get_order(&mut self, url: &str) -> Result<OrderData, Error> {
|
||||
self.post_as_get(url)?.json()
|
||||
}
|
||||
|
||||
/// Low level "POST-as-GET" request.
|
||||
pub fn post_as_get(&mut self, url: &str) -> Result<HttpResponse, Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let request = account.get_request(url, nonce)?;
|
||||
match self.inner.run_request(request) {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Low level POST request.
|
||||
pub fn post<T: Serialize>(&mut self, url: &str, data: &T) -> Result<HttpResponse, Error> {
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let request = account.post_request(url, nonce, data)?;
|
||||
match self.inner.run_request(request) {
|
||||
Ok(response) => return Ok(response),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request challenge validation. Afterwards, the challenge should be polled.
|
||||
pub fn request_challenge_validation(&mut self, url: &str) -> Result<Challenge, Error> {
|
||||
self.post(url, &serde_json::json!({}))?.json()
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).key_authorization()`.
|
||||
pub fn key_authorization(&self, token: &str) -> Result<String, Error> {
|
||||
Self::need_account(&self.account)?.key_authorization(token)
|
||||
}
|
||||
|
||||
/// Shortcut to `account().ok_or_else(...).dns_01_txt_value()`.
|
||||
/// the key authorization value.
|
||||
pub fn dns_01_txt_value(&self, token: &str) -> Result<String, Error> {
|
||||
Self::need_account(&self.account)?.dns_01_txt_value(token)
|
||||
}
|
||||
|
||||
/// Low-level API to run an n API request. This automatically updates the current nonce!
|
||||
pub fn run_request(&mut self, request: Request) -> Result<HttpResponse, Error> {
|
||||
self.inner.run_request(request)
|
||||
}
|
||||
|
||||
/// Finalize an Order via its `finalize` URL property and the DER encoded CSR.
|
||||
pub fn finalize(&mut self, url: &str, csr: &[u8]) -> Result<(), Error> {
|
||||
let csr = b64u::encode(csr);
|
||||
let data = serde_json::json!({ "csr": csr });
|
||||
self.post(url, &data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Download a certificate via its 'certificate' URL property.
|
||||
///
|
||||
/// The certificate will be a PEM certificate chain.
|
||||
pub fn get_certificate(&mut self, url: &str) -> Result<Vec<u8>, Error> {
|
||||
Ok(self.post_as_get(url)?.body)
|
||||
}
|
||||
|
||||
/// Revoke an existing certificate (PEM or DER formatted).
|
||||
pub fn revoke_certificate(
|
||||
&mut self,
|
||||
certificate: &[u8],
|
||||
reason: Option<u32>,
|
||||
) -> Result<(), Error> {
|
||||
// TODO: This can also work without an account.
|
||||
let account = Self::need_account(&self.account)?;
|
||||
|
||||
let revocation = account.revoke_certificate(certificate, reason)?;
|
||||
|
||||
let mut retry = retry();
|
||||
loop {
|
||||
retry.tick()?;
|
||||
|
||||
let directory =
|
||||
Self::get_directory(&mut self.inner, &mut self.directory, &self.directory_url)?;
|
||||
let nonce = Self::nonce(&mut self.inner, directory)?;
|
||||
let request = revocation.request(directory, nonce)?;
|
||||
match self.inner.run_request(request) {
|
||||
Ok(_response) => return Ok(()),
|
||||
Err(err) if err.is_bad_nonce() => continue,
|
||||
Err(err) => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set a proxy
|
||||
pub fn set_proxy(&mut self, proxy: String) {
|
||||
self.inner.set_proxy(proxy)
|
||||
}
|
||||
}
|
||||
|
||||
/// bad nonce retry count helper
|
||||
struct Retry(usize);
|
||||
|
||||
const fn retry() -> Retry {
|
||||
Retry(0)
|
||||
}
|
||||
|
||||
impl Retry {
|
||||
fn tick(&mut self) -> Result<(), Error> {
|
||||
if self.0 >= 3 {
|
||||
bail!("kept getting a badNonce error!");
|
||||
}
|
||||
self.0 += 1;
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
//! ACME Directory information.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// An ACME Directory. This contains the base URL and the directory data as received via a `GET`
|
||||
/// request to the URL.
|
||||
pub struct Directory {
|
||||
/// The main entry point URL to the ACME directory.
|
||||
pub url: String,
|
||||
|
||||
/// The json structure received via a `GET` request to the directory URL. This contains the
|
||||
/// URLs for various API entry points.
|
||||
pub data: DirectoryData,
|
||||
}
|
||||
|
||||
/// The ACME Directory object structure.
|
||||
///
|
||||
/// The data in here is typically not relevant to the user of this crate.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct DirectoryData {
|
||||
/// The entry point to create a new account.
|
||||
pub new_account: String,
|
||||
|
||||
/// The entry point to retrieve a new nonce, should be used with a `HEAD` request.
|
||||
pub new_nonce: String,
|
||||
|
||||
/// URL to post new orders to.
|
||||
pub new_order: String,
|
||||
|
||||
/// URL to use for certificate revocation.
|
||||
pub revoke_cert: String,
|
||||
|
||||
/// Account key rollover URL.
|
||||
pub key_change: String,
|
||||
|
||||
/// Metadata object, for additional information which aren't directly part of the API
|
||||
/// itself, such as the terms of service.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub meta: Option<Meta>,
|
||||
}
|
||||
|
||||
/// The directory's "meta" object.
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Meta {
|
||||
/// The terms of service. This is typically in the form of an URL.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub terms_of_service: Option<String>,
|
||||
|
||||
/// Flag indicating if EAB is required, None is equivalent to false
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_account_required: Option<bool>,
|
||||
|
||||
/// Website with information about the ACME Server
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub website: Option<String>,
|
||||
|
||||
/// List of hostnames used by the CA, intended for the use with caa dns records
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub caa_identities: Vec<String>,
|
||||
}
|
||||
|
||||
impl Directory {
|
||||
/// Create a `Directory` given the parsed `DirectoryData` of a `GET` request to the directory
|
||||
/// URL.
|
||||
pub fn from_parts(url: String, data: DirectoryData) -> Self {
|
||||
Self { url, data }
|
||||
}
|
||||
|
||||
/// Get the ToS URL.
|
||||
pub fn terms_of_service_url(&self) -> Option<&str> {
|
||||
match &self.data.meta {
|
||||
Some(meta) => meta.terms_of_service.as_deref(),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get if external account binding is required
|
||||
pub fn external_account_binding_required(&self) -> bool {
|
||||
matches!(
|
||||
&self.data.meta,
|
||||
Some(Meta {
|
||||
external_account_required: Some(true),
|
||||
..
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
/// Get the "newNonce" URL. Use `HEAD` requests on this to get a new nonce.
|
||||
pub fn new_nonce_url(&self) -> &str {
|
||||
&self.data.new_nonce
|
||||
}
|
||||
|
||||
pub(crate) fn new_account_url(&self) -> &str {
|
||||
&self.data.new_account
|
||||
}
|
||||
|
||||
pub(crate) fn new_order_url(&self) -> &str {
|
||||
&self.data.new_order
|
||||
}
|
||||
|
||||
/// Access to the in the Acme spec defined metadata structure.
|
||||
pub fn meta(&self) -> Option<&Meta> {
|
||||
self.data.meta.as_ref()
|
||||
}
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkey::{HasPrivate, PKeyRef};
|
||||
use openssl::sign::Signer;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::key::Jwk;
|
||||
use crate::types::ExternalAccountBinding;
|
||||
use crate::{b64u, Error};
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct Protected {
|
||||
alg: &'static str,
|
||||
url: String,
|
||||
kid: String,
|
||||
}
|
||||
|
||||
impl ExternalAccountBinding {
|
||||
/// Create a new instance
|
||||
pub fn new<P>(
|
||||
eab_kid: &str,
|
||||
eab_hmac_key: &PKeyRef<P>,
|
||||
jwk: Jwk,
|
||||
url: String,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
let protected = Protected {
|
||||
alg: "HS256",
|
||||
kid: eab_kid.to_string(),
|
||||
url,
|
||||
};
|
||||
let payload = b64u::encode(serde_json::to_string(&jwk)?.as_bytes());
|
||||
let protected_data = b64u::encode(serde_json::to_string(&protected)?.as_bytes());
|
||||
let signature = {
|
||||
let protected = protected_data.as_bytes();
|
||||
let payload = payload.as_bytes();
|
||||
Self::sign_hmac(eab_hmac_key, protected, payload)?
|
||||
};
|
||||
|
||||
let signature = b64u::encode(&signature);
|
||||
Ok(ExternalAccountBinding {
|
||||
protected: protected_data,
|
||||
payload,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
fn sign_hmac<P>(key: &PKeyRef<P>, protected: &[u8], payload: &[u8]) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
let mut signer = Signer::new(MessageDigest::sha256(), key)?;
|
||||
signer.update(protected)?;
|
||||
signer.update(b".")?;
|
||||
signer.update(payload)?;
|
||||
Ok(signer.sign_to_vec()?)
|
||||
}
|
||||
}
|
@ -1,154 +0,0 @@
|
||||
//! The `Error` type and some ACME error constants for reference.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use openssl::error::ErrorStack as SslErrorStack;
|
||||
|
||||
/// The ACME error string for a "bad nonce" error.
|
||||
pub const BAD_NONCE: &str = "urn:ietf:params:acme:error:badNonce";
|
||||
|
||||
/// The ACME error string for a "user action required" error.
|
||||
pub const USER_ACTION_REQUIRED: &str = "urn:ietf:params:acme:error:userActionRequired";
|
||||
|
||||
/// Error types returned by this crate.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "unused errors have no effect"]
|
||||
pub enum Error {
|
||||
/// A `badNonce` API response. The request should be retried with the new nonce received along
|
||||
/// with this response.
|
||||
BadNonce,
|
||||
|
||||
/// A `userActionRequired` API response. Typically this means there was a change to the ToS and
|
||||
/// the user has to agree to the new terms.
|
||||
UserActionRequired(String),
|
||||
|
||||
/// Other error responses from the Acme API not handled specially.
|
||||
Api(crate::request::ErrorResponse),
|
||||
|
||||
/// The Acme API behaved unexpectedly.
|
||||
InvalidApi(String),
|
||||
|
||||
/// Tried to use an `Account` or `AccountCreator` without a private key.
|
||||
MissingKey,
|
||||
|
||||
/// Tried to create an `Account` without providing a single contact info.
|
||||
MissingContactInfo,
|
||||
|
||||
/// Tried to use an empty `Order`.
|
||||
EmptyOrder,
|
||||
|
||||
/// A raw `openssl::PKey` containing an unsupported key was passed.
|
||||
UnsupportedKeyType,
|
||||
|
||||
/// A raw `openssl::PKey` or `openssl::EcKey` with an unsupported curve was passed.
|
||||
UnsupportedGroup,
|
||||
|
||||
/// Failed to parse the account data returned by the API upon account creation.
|
||||
BadAccountData(String),
|
||||
|
||||
/// Failed to parse the order data returned by the API from a new-order request.
|
||||
BadOrderData(String),
|
||||
|
||||
/// An openssl error occurred during a crypto operation.
|
||||
RawSsl(SslErrorStack),
|
||||
|
||||
/// An openssl error occurred during a crypto operation.
|
||||
/// With some textual context.
|
||||
Ssl(&'static str, SslErrorStack),
|
||||
|
||||
/// An otherwise uncaught serde error happened.
|
||||
Json(serde_json::Error),
|
||||
|
||||
/// Failed to parse
|
||||
BadBase64(base64::DecodeError),
|
||||
|
||||
/// Can be used by the user for textual error messages without having to downcast to regular
|
||||
/// acme errors.
|
||||
Custom(String),
|
||||
|
||||
/// If built with the `client` feature, this is where general ureq/network errors end up.
|
||||
/// This is usually a `ureq::Error`, however in order to provide an API which is not
|
||||
/// feature-dependent, this variant is always present and contains a boxed `dyn Error`.
|
||||
HttpClient(Box<dyn std::error::Error + Send + Sync + 'static>),
|
||||
|
||||
/// If built with the `client` feature, this is where client specific errors which are not from
|
||||
/// errors forwarded from `ureq` end up.
|
||||
Client(String),
|
||||
|
||||
/// A non-openssl error occurred while building data for the CSR.
|
||||
Csr(String),
|
||||
}
|
||||
|
||||
impl Error {
|
||||
/// Create an `Error` from a custom text.
|
||||
pub fn custom<T: std::fmt::Display>(s: T) -> Self {
|
||||
Error::Custom(s.to_string())
|
||||
}
|
||||
|
||||
/// Convenience method to check if this error represents a bad nonce error in which case the
|
||||
/// request needs to be re-created using a new nonce.
|
||||
pub fn is_bad_nonce(&self) -> bool {
|
||||
matches!(self, Error::BadNonce)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Error::Api(err) => match err.detail.as_deref() {
|
||||
Some(detail) => write!(f, "{}: {}", err.ty, detail),
|
||||
None => fmt::Display::fmt(&err.ty, f),
|
||||
},
|
||||
Error::InvalidApi(err) => write!(f, "Acme Server API misbehaved: {}", err),
|
||||
Error::BadNonce => f.write_str("bad nonce, please retry with a new nonce"),
|
||||
Error::UserActionRequired(err) => write!(f, "user action required: {}", err),
|
||||
Error::MissingKey => f.write_str("cannot build an account without a key"),
|
||||
Error::MissingContactInfo => f.write_str("account requires contact info"),
|
||||
Error::EmptyOrder => f.write_str("cannot make an empty order"),
|
||||
Error::UnsupportedKeyType => f.write_str("unsupported key type"),
|
||||
Error::UnsupportedGroup => f.write_str("unsupported EC group"),
|
||||
Error::BadAccountData(err) => {
|
||||
write!(f, "bad response to account query or creation: {}", err)
|
||||
}
|
||||
Error::BadOrderData(err) => {
|
||||
write!(f, "bad response to new-order query or creation: {}", err)
|
||||
}
|
||||
Error::RawSsl(err) => fmt::Display::fmt(err, f),
|
||||
Error::Ssl(context, err) => {
|
||||
write!(f, "{}: {}", context, err)
|
||||
}
|
||||
Error::Json(err) => fmt::Display::fmt(err, f),
|
||||
Error::Custom(err) => fmt::Display::fmt(err, f),
|
||||
Error::HttpClient(err) => fmt::Display::fmt(err, f),
|
||||
Error::Client(err) => fmt::Display::fmt(err, f),
|
||||
Error::Csr(err) => fmt::Display::fmt(err, f),
|
||||
Error::BadBase64(err) => fmt::Display::fmt(err, f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SslErrorStack> for Error {
|
||||
fn from(e: SslErrorStack) -> Self {
|
||||
Error::RawSsl(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for Error {
|
||||
fn from(e: serde_json::Error) -> Self {
|
||||
Error::Json(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<crate::request::ErrorResponse> for Error {
|
||||
fn from(e: crate::request::ErrorResponse) -> Self {
|
||||
Error::Api(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<base64::DecodeError> for Error {
|
||||
fn from(e: base64::DecodeError) -> Self {
|
||||
Error::BadBase64(e)
|
||||
}
|
||||
}
|
@ -1,43 +0,0 @@
|
||||
use openssl::hash::Hasher;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::Error;
|
||||
|
||||
pub fn to_hash_canonical(value: &Value, output: &mut Hasher) -> Result<(), Error> {
|
||||
match value {
|
||||
Value::Null | Value::String(_) | Value::Number(_) | Value::Bool(_) => {
|
||||
serde_json::to_writer(output, &value)?;
|
||||
}
|
||||
Value::Array(list) => {
|
||||
output.update(b"[")?;
|
||||
let mut iter = list.iter();
|
||||
if let Some(item) = iter.next() {
|
||||
to_hash_canonical(item, output)?;
|
||||
for item in iter {
|
||||
output.update(b",")?;
|
||||
to_hash_canonical(item, output)?;
|
||||
}
|
||||
}
|
||||
output.update(b"]")?;
|
||||
}
|
||||
Value::Object(map) => {
|
||||
output.update(b"{")?;
|
||||
let mut keys: Vec<&str> = map.keys().map(String::as_str).collect();
|
||||
keys.sort_unstable();
|
||||
let mut iter = keys.into_iter();
|
||||
if let Some(key) = iter.next() {
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.update(b":")?;
|
||||
to_hash_canonical(&map[key], output)?;
|
||||
for key in iter {
|
||||
output.update(b",")?;
|
||||
serde_json::to_writer(&mut *output, &key)?;
|
||||
output.update(b":")?;
|
||||
to_hash_canonical(&map[key], output)?;
|
||||
}
|
||||
}
|
||||
output.update(b"}")?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
@ -1,168 +0,0 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use openssl::hash::{Hasher, MessageDigest};
|
||||
use openssl::pkey::{HasPrivate, PKeyRef};
|
||||
use openssl::sign::Signer;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::b64u;
|
||||
use crate::key::{Jwk, PublicKey};
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Protected {
|
||||
alg: &'static str,
|
||||
nonce: String,
|
||||
url: String,
|
||||
#[serde(flatten)]
|
||||
key: KeyId,
|
||||
}
|
||||
|
||||
/// Acme requires to the use of *either* `jwk` *or* `kid` depending on the action taken.
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum KeyId {
|
||||
/// This is the actual JWK structure.
|
||||
Jwk(Jwk),
|
||||
|
||||
/// This should be the account location.
|
||||
Kid(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Jws {
|
||||
protected: String,
|
||||
payload: String,
|
||||
signature: String,
|
||||
}
|
||||
|
||||
impl Jws {
|
||||
pub fn new<P, T>(
|
||||
key: &PKeyRef<P>,
|
||||
location: Option<String>,
|
||||
url: String,
|
||||
nonce: String,
|
||||
payload: &T,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
P: HasPrivate,
|
||||
T: Serialize,
|
||||
{
|
||||
Self::new_full(
|
||||
key,
|
||||
location,
|
||||
url,
|
||||
nonce,
|
||||
b64u::encode(serde_json::to_string(payload)?.as_bytes()),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn new_full<P: HasPrivate>(
|
||||
key: &PKeyRef<P>,
|
||||
location: Option<String>,
|
||||
url: String,
|
||||
nonce: String,
|
||||
payload: String,
|
||||
) -> Result<Self, Error> {
|
||||
let jwk = Jwk::try_from(key)?;
|
||||
|
||||
let pubkey = jwk.key.clone();
|
||||
let mut protected = Protected {
|
||||
alg: "",
|
||||
nonce,
|
||||
url,
|
||||
key: match location {
|
||||
Some(location) => KeyId::Kid(location),
|
||||
None => KeyId::Jwk(jwk),
|
||||
},
|
||||
};
|
||||
|
||||
let (digest, ec_order_bytes): (MessageDigest, usize) = match &pubkey {
|
||||
PublicKey::Rsa(_) => (Self::prepare_rsa(key, &mut protected), 0),
|
||||
PublicKey::Ec(_) => Self::prepare_ec(key, &mut protected),
|
||||
};
|
||||
|
||||
let protected_data = b64u::encode(serde_json::to_string(&protected)?.as_bytes());
|
||||
|
||||
let signature = {
|
||||
let prot = protected_data.as_bytes();
|
||||
let payload = payload.as_bytes();
|
||||
match &pubkey {
|
||||
PublicKey::Rsa(_) => Self::sign_rsa(key, digest, prot, payload),
|
||||
PublicKey::Ec(_) => Self::sign_ec(key, digest, ec_order_bytes, prot, payload),
|
||||
}?
|
||||
};
|
||||
|
||||
let signature = b64u::encode(&signature);
|
||||
|
||||
Ok(Jws {
|
||||
protected: protected_data,
|
||||
payload,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
fn prepare_rsa<P>(_key: &PKeyRef<P>, protected: &mut Protected) -> MessageDigest
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
protected.alg = "RS256";
|
||||
MessageDigest::sha256()
|
||||
}
|
||||
|
||||
/// Returns the digest and the size of the two signature components 'r' and 's'.
|
||||
fn prepare_ec<P>(_key: &PKeyRef<P>, protected: &mut Protected) -> (MessageDigest, usize)
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
// Note: if we support >256 bit keys we'll want to also support using ES512 here probably
|
||||
protected.alg = "ES256";
|
||||
// 'r' and 's' are each 256 bit numbers:
|
||||
(MessageDigest::sha256(), 32)
|
||||
}
|
||||
|
||||
fn sign_rsa<P>(
|
||||
key: &PKeyRef<P>,
|
||||
digest: MessageDigest,
|
||||
protected: &[u8],
|
||||
payload: &[u8],
|
||||
) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
let mut signer = Signer::new(digest, key)?;
|
||||
signer.set_rsa_padding(openssl::rsa::Padding::PKCS1)?;
|
||||
signer.update(protected)?;
|
||||
signer.update(b".")?;
|
||||
signer.update(payload)?;
|
||||
Ok(signer.sign_to_vec()?)
|
||||
}
|
||||
|
||||
fn sign_ec<P>(
|
||||
key: &PKeyRef<P>,
|
||||
digest: MessageDigest,
|
||||
ec_order_bytes: usize,
|
||||
protected: &[u8],
|
||||
payload: &[u8],
|
||||
) -> Result<Vec<u8>, Error>
|
||||
where
|
||||
P: HasPrivate,
|
||||
{
|
||||
let mut hasher = Hasher::new(digest)?;
|
||||
hasher.update(protected)?;
|
||||
hasher.update(b".")?;
|
||||
hasher.update(payload)?;
|
||||
let sig =
|
||||
openssl::ecdsa::EcdsaSig::sign(hasher.finish()?.as_ref(), key.ec_key()?.as_ref())?;
|
||||
let r = sig.r().to_vec();
|
||||
let s = sig.s().to_vec();
|
||||
let mut out = Vec::with_capacity(ec_order_bytes * 2);
|
||||
out.extend(std::iter::repeat(0u8).take(ec_order_bytes - r.len()));
|
||||
out.extend(r);
|
||||
out.extend(std::iter::repeat(0u8).take(ec_order_bytes - s.len()));
|
||||
out.extend(s);
|
||||
Ok(out)
|
||||
}
|
||||
}
|
@ -1,129 +0,0 @@
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
|
||||
use openssl::hash::{Hasher, MessageDigest};
|
||||
use openssl::pkey::{HasPublic, Id, PKeyRef};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::b64u;
|
||||
use crate::Error;
|
||||
|
||||
/// An RSA public key.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct RsaPublicKey {
|
||||
#[serde(with = "b64u::bytes")]
|
||||
e: Vec<u8>,
|
||||
#[serde(with = "b64u::bytes")]
|
||||
n: Vec<u8>,
|
||||
}
|
||||
|
||||
/// An EC public key.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct EcPublicKey {
|
||||
crv: &'static str,
|
||||
#[serde(with = "b64u::bytes")]
|
||||
x: Vec<u8>,
|
||||
#[serde(with = "b64u::bytes")]
|
||||
y: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A public key.
|
||||
///
|
||||
/// Internally tagged, so this already contains the 'kty' member.
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(tag = "kty")]
|
||||
pub enum PublicKey {
|
||||
#[serde(rename = "RSA")]
|
||||
Rsa(RsaPublicKey),
|
||||
#[serde(rename = "EC")]
|
||||
Ec(EcPublicKey),
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// The thumbprint is the b64u encoded sha256sum of the *canonical* json representation.
|
||||
pub fn thumbprint(&self) -> Result<String, Error> {
|
||||
let mut hasher = Hasher::new(MessageDigest::sha256())?;
|
||||
crate::json::to_hash_canonical(&serde_json::to_value(self)?, &mut hasher)?;
|
||||
Ok(b64u::encode(hasher.finish()?.as_ref()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
pub struct Jwk {
|
||||
#[serde(rename = "use", skip_serializing_if = "Option::is_none")]
|
||||
pub usage: Option<String>,
|
||||
|
||||
/// The key data is internally tagged, we can just flatten it.
|
||||
#[serde(flatten)]
|
||||
pub key: PublicKey,
|
||||
}
|
||||
|
||||
impl<P: HasPublic> TryFrom<&PKeyRef<P>> for Jwk {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(key: &PKeyRef<P>) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
key: key.try_into()?,
|
||||
usage: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: HasPublic> TryFrom<&PKeyRef<P>> for PublicKey {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(key: &PKeyRef<P>) -> Result<Self, Self::Error> {
|
||||
match key.id() {
|
||||
Id::RSA => Ok(PublicKey::Rsa(RsaPublicKey::try_from(&key.rsa()?)?)),
|
||||
Id::EC => Ok(PublicKey::Ec(EcPublicKey::try_from(&key.ec_key()?)?)),
|
||||
_ => Err(Error::UnsupportedKeyType),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: HasPublic> TryFrom<&openssl::rsa::Rsa<P>> for RsaPublicKey {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(key: &openssl::rsa::Rsa<P>) -> Result<Self, Self::Error> {
|
||||
Ok(RsaPublicKey {
|
||||
e: key.e().to_vec(),
|
||||
n: key.n().to_vec(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: HasPublic> TryFrom<&openssl::ec::EcKey<P>> for EcPublicKey {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(key: &openssl::ec::EcKey<P>) -> Result<Self, Self::Error> {
|
||||
let group = key.group();
|
||||
|
||||
if group.curve_name() != Some(openssl::nid::Nid::X9_62_PRIME256V1) {
|
||||
return Err(Error::UnsupportedGroup);
|
||||
}
|
||||
|
||||
let mut ctx = openssl::bn::BigNumContext::new()?;
|
||||
let mut x = openssl::bn::BigNum::new()?;
|
||||
let mut y = openssl::bn::BigNum::new()?;
|
||||
key.public_key()
|
||||
.affine_coordinates(group, &mut x, &mut y, &mut ctx)?;
|
||||
|
||||
Ok(EcPublicKey {
|
||||
crv: "P-256",
|
||||
x: x.to_vec(),
|
||||
y: y.to_vec(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_key_conversion() -> Result<(), Error> {
|
||||
let key = openssl::ec::EcKey::generate(
|
||||
openssl::ec::EcGroup::from_curve_name(openssl::nid::Nid::X9_62_PRIME256V1)?.as_ref(),
|
||||
)?;
|
||||
|
||||
let _ = EcPublicKey::try_from(&key).expect("failed to jsonify ec key");
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,91 +0,0 @@
|
||||
//! ACME protocol helper.
|
||||
//!
|
||||
//! This is supposed to implement the low level parts of the ACME protocol, providing an [`Account`]
|
||||
//! and some other helper types which allow interacting with an ACME server by implementing methods
|
||||
//! which create [`Request`]s the user can then combine with a nonce and send to the the ACME
|
||||
//! server using whatever http client they choose.
|
||||
//!
|
||||
//! This is a rather low level crate, and while it provides an optional synchronous client using
|
||||
//! curl (for simplicity), users should have basic understanding of the ACME API in order to
|
||||
//! implement a client using this.
|
||||
//!
|
||||
//! The [`Account`] helper supports RSA and ECC keys and provides most of the API methods.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(unsafe_code)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
#[cfg(feature = "api-types")]
|
||||
pub mod types;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
mod b64u;
|
||||
#[cfg(feature = "impl")]
|
||||
mod eab;
|
||||
#[cfg(feature = "impl")]
|
||||
mod json;
|
||||
#[cfg(feature = "impl")]
|
||||
mod jws;
|
||||
#[cfg(feature = "impl")]
|
||||
mod key;
|
||||
#[cfg(feature = "impl")]
|
||||
mod request;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod account;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod authorization;
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod directory;
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod error;
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod order;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
pub mod util;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use account::Account;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use authorization::{Authorization, Challenge};
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use directory::Directory;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use error::Error;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use order::Order;
|
||||
|
||||
#[cfg(feature = "impl")]
|
||||
#[doc(inline)]
|
||||
pub use request::Request;
|
||||
|
||||
// we don't inline these:
|
||||
#[cfg(feature = "impl")]
|
||||
pub use order::NewOrder;
|
||||
#[cfg(feature = "impl")]
|
||||
pub use request::ErrorResponse;
|
||||
|
||||
/// Header name for nonces.
|
||||
pub const REPLAY_NONCE: &str = "Replay-Nonce";
|
||||
|
||||
/// Header name for locations.
|
||||
pub const LOCATION: &str = "Location";
|
||||
|
||||
#[cfg(feature = "client")]
|
||||
pub mod client;
|
||||
#[cfg(feature = "client")]
|
||||
pub use client::Client;
|
||||
|
||||
#[cfg(feature = "async-client")]
|
||||
pub mod async_client;
|
@ -1,175 +0,0 @@
|
||||
//! ACME Orders data and identifiers.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::request::Request;
|
||||
use crate::Error;
|
||||
|
||||
/// Status of an [`Order`].
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[derive(Default)]
|
||||
pub enum Status {
|
||||
/// Invalid, used as a place holder for when sending objects as contrary to account creation,
|
||||
/// the Acme RFC does not require the server to ignore unknown parts of the `Order` object.
|
||||
#[default]
|
||||
New,
|
||||
|
||||
/// Authorization failed and it is now invalid.
|
||||
Invalid,
|
||||
|
||||
/// The authorization is pending and the user should look through its challenges.
|
||||
///
|
||||
/// This is the initial state of a new authorization.
|
||||
Pending,
|
||||
|
||||
/// The ACME provider is processing an authorization validation.
|
||||
Processing,
|
||||
|
||||
/// The requirements for the order have been met and it may be finalized.
|
||||
Ready,
|
||||
|
||||
/// The certificate has been issued and can be downloaded from the URL provided in the
|
||||
/// [`Order`]'s `certificate` field.
|
||||
Valid,
|
||||
}
|
||||
|
||||
impl Status {
|
||||
/// Serde helper
|
||||
fn is_new(&self) -> bool {
|
||||
*self == Status::New
|
||||
}
|
||||
|
||||
/// Convenience method to check if the status is 'pending'.
|
||||
#[inline]
|
||||
pub fn is_pending(self) -> bool {
|
||||
self == Status::Pending
|
||||
}
|
||||
|
||||
/// Convenience method to check if the status is 'valid'.
|
||||
#[inline]
|
||||
pub fn is_valid(self) -> bool {
|
||||
self == Status::Valid
|
||||
}
|
||||
}
|
||||
|
||||
/// An identifier used for a certificate request.
|
||||
///
|
||||
/// Currently only supports DNS name identifiers.
|
||||
#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(tag = "type", content = "value", rename_all = "lowercase")]
|
||||
pub enum Identifier {
|
||||
/// A DNS identifier is used to request a domain name to be added to a certificate.
|
||||
Dns(String),
|
||||
}
|
||||
|
||||
/// This contains the order data sent to and received from the ACME server.
|
||||
///
|
||||
/// This is typically filled with a set of domains and then issued as a new-order request via [`Account::new_order`](crate::Account::new_order).
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct OrderData {
|
||||
/// The order status.
|
||||
#[serde(skip_serializing_if = "Status::is_new", default)]
|
||||
pub status: Status,
|
||||
|
||||
/// This order's expiration date as RFC3339 formatted time string.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub expires: Option<String>,
|
||||
|
||||
/// List of identifiers to order for the certificate.
|
||||
pub identifiers: Vec<Identifier>,
|
||||
|
||||
/// An RFC3339 formatted time string. It is up to the user to choose a dev dependency for this
|
||||
/// shit.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub not_before: Option<String>,
|
||||
|
||||
/// An RFC3339 formatted time string. It is up to the user to choose a dev dependency for this
|
||||
/// shit.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub not_after: Option<String>,
|
||||
|
||||
/// Possible errors in this order.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<Value>,
|
||||
|
||||
/// List of URL's to authorizations the client needs to complete.
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub authorizations: Vec<String>,
|
||||
|
||||
/// URL the final CSR needs to be POSTed to in order to complete the order, once all
|
||||
/// authorizations have been performed.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub finalize: Option<String>,
|
||||
|
||||
/// URL at which the issued certificate can be fetched once it is available.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub certificate: Option<String>,
|
||||
}
|
||||
|
||||
impl OrderData {
|
||||
/// Initialize an empty order object.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Builder-style method to add a domain identifier to the data.
|
||||
pub fn domain(mut self, domain: String) -> Self {
|
||||
self.identifiers.push(Identifier::Dns(domain));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents an order for a new certificate. This combines the order's own location (URL) with
|
||||
/// the [`OrderData`] received from the ACME server.
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Order {
|
||||
/// Order location URL.
|
||||
pub location: String,
|
||||
|
||||
/// The order's data object.
|
||||
pub data: OrderData,
|
||||
}
|
||||
|
||||
impl Order {
|
||||
/// Get an authorization URL (or `None` if the index is out of range).
|
||||
pub fn authorization(&self, index: usize) -> Option<&str> {
|
||||
Some(self.data.authorizations.get(index)?)
|
||||
}
|
||||
|
||||
/// Get the number of authorizations in this object.
|
||||
pub fn authorization_len(&self) -> usize {
|
||||
self.data.authorizations.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a new in-flight order creation.
|
||||
///
|
||||
/// This is created via [`Account::new_order`](crate::Account::new_order()).
|
||||
pub struct NewOrder {
|
||||
//order: OrderData,
|
||||
/// The request to execute to place the order. When creating a [`NewOrder`] via
|
||||
/// [`Account::new_order`](crate::Account::new_order) this is guaranteed to be `Some`.
|
||||
pub request: Option<Request>,
|
||||
}
|
||||
|
||||
impl NewOrder {
|
||||
pub(crate) fn new(request: Request) -> Self {
|
||||
Self {
|
||||
//order,
|
||||
request: Some(request),
|
||||
}
|
||||
}
|
||||
|
||||
/// Deal with the response we got from the server.
|
||||
pub fn response(self, location_header: String, response_body: &[u8]) -> Result<Order, Error> {
|
||||
Ok(Order {
|
||||
location: location_header,
|
||||
data: serde_json::from_slice(response_body)
|
||||
.map_err(|err| Error::BadOrderData(err.to_string()))?,
|
||||
})
|
||||
}
|
||||
}
|
@ -1,42 +0,0 @@
|
||||
use serde::Deserialize;
|
||||
|
||||
pub(crate) const JSON_CONTENT_TYPE: &str = "application/jose+json";
|
||||
pub(crate) const CREATED: u16 = 201;
|
||||
|
||||
/// A request which should be performed on the ACME provider.
|
||||
pub struct Request {
|
||||
/// The complete URL to send the request to.
|
||||
pub url: String,
|
||||
|
||||
/// The HTTP method name to use.
|
||||
pub method: &'static str,
|
||||
|
||||
/// The `Content-Type` header to pass along.
|
||||
pub content_type: &'static str,
|
||||
|
||||
/// The body to pass along with request, or an empty string.
|
||||
pub body: String,
|
||||
|
||||
/// The expected status code a compliant ACME provider will return on success.
|
||||
pub expected: u16,
|
||||
}
|
||||
|
||||
/// An ACME error response contains a specially formatted type string, and can optionally
|
||||
/// contain textual details and a set of sub problems.
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub struct ErrorResponse {
|
||||
/// The ACME error type string.
|
||||
///
|
||||
/// Most of the time we're only interested in the "bad nonce" or "user action required"
|
||||
/// errors. When an [`Error`](crate::Error) is built from this error response, it will map
|
||||
/// to the corresponding enum values (eg. [`Error::BadNonce`](crate::Error::BadNonce)).
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
|
||||
/// A textual detail string optionally provided by the ACME provider to inform the user more
|
||||
/// verbosely about why the error occurred.
|
||||
pub detail: Option<String>,
|
||||
|
||||
/// Additional json data containing information as to why the error occurred.
|
||||
pub subproblems: Option<serde_json::Value>,
|
||||
}
|
@ -1,126 +0,0 @@
|
||||
//! Define types which are exposed with the proxmox API
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
#[cfg_attr(feature = "api-types", proxmox_schema::api())]
|
||||
/// External Account Bindings
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ExternalAccountBinding {
|
||||
/// JOSE Header (see RFC 7515)
|
||||
pub protected: String,
|
||||
/// Payload
|
||||
pub payload: String,
|
||||
/// HMAC signature
|
||||
pub signature: String,
|
||||
}
|
||||
|
||||
/// Status of an ACME account.
|
||||
#[cfg_attr(feature = "api-types", proxmox_schema::api())]
|
||||
#[derive(Clone, Copy, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum AccountStatus {
|
||||
/// This is not part of the ACME API, but a temporary marker for us until the ACME provider
|
||||
/// tells us the account's real status.
|
||||
#[serde(rename = "<invalid>")]
|
||||
New,
|
||||
|
||||
/// Means the account is valid and can be used.
|
||||
Valid,
|
||||
|
||||
/// The account has been deactivated by its user and cannot be used anymore.
|
||||
Deactivated,
|
||||
|
||||
/// The account has been revoked by the server and cannot be used anymore.
|
||||
Revoked,
|
||||
}
|
||||
|
||||
impl Default for AccountStatus {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl AccountStatus {
|
||||
/// Create a new instance with state New.
|
||||
#[inline]
|
||||
pub fn new() -> Self {
|
||||
AccountStatus::New
|
||||
}
|
||||
|
||||
/// Return true if state is New
|
||||
#[inline]
|
||||
pub fn is_new(&self) -> bool {
|
||||
*self == AccountStatus::New
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn default_true() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_false(b: &bool) -> bool {
|
||||
!*b
|
||||
}
|
||||
|
||||
#[cfg_attr(feature="api-types", proxmox_schema::api(
|
||||
properties: {
|
||||
extra: {
|
||||
type: Object,
|
||||
properties: {},
|
||||
additional_properties: true,
|
||||
},
|
||||
contact: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: String,
|
||||
description: "Contact Info.",
|
||||
},
|
||||
},
|
||||
}
|
||||
))]
|
||||
/// ACME Account data. This is the part of the account returned from and possibly sent to the ACME
|
||||
/// provider. Some fields may be uptdated by the user via a request to the account location, others
|
||||
/// may not be changed.
|
||||
#[derive(Clone, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AccountData {
|
||||
/// The current account status.
|
||||
#[serde(
|
||||
skip_serializing_if = "AccountStatus::is_new",
|
||||
default = "AccountStatus::new"
|
||||
)]
|
||||
pub status: AccountStatus,
|
||||
|
||||
/// URLs to currently pending orders.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub orders: Option<String>,
|
||||
|
||||
/// The account's contact info.
|
||||
///
|
||||
/// This usually contains a `"mailto:<email address>"` entry but may also contain some other
|
||||
/// data if the server accepts it.
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub contact: Vec<String>,
|
||||
|
||||
/// Indicated whether the user agreed to the ACME provider's terms of service.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub terms_of_service_agreed: Option<bool>,
|
||||
|
||||
/// External account information.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub external_account_binding: Option<ExternalAccountBinding>,
|
||||
|
||||
/// This is only used by the client when querying an account.
|
||||
#[serde(default = "default_true", skip_serializing_if = "is_false")]
|
||||
pub only_return_existing: bool,
|
||||
|
||||
/// Stores unknown fields if there are any.
|
||||
#[serde(flatten, default, skip_serializing_if = "HashMap::is_empty")]
|
||||
pub extra: HashMap<String, Value>,
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
//! Certificate utility methods for convenience (such as CSR generation).
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::nid::Nid;
|
||||
use openssl::pkey::PKey;
|
||||
use openssl::rsa::Rsa;
|
||||
use openssl::x509::{self, X509Name, X509Req};
|
||||
|
||||
use crate::Error;
|
||||
|
||||
/// A certificate signing request.
|
||||
pub struct Csr {
|
||||
/// DER encoded certificate request.
|
||||
pub data: Vec<u8>,
|
||||
|
||||
/// PEM formatted PKCS#8 private key.
|
||||
pub private_key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Csr {
|
||||
/// Generate a CSR in DER format with a PEM formatted PKCS8 private key.
|
||||
///
|
||||
/// The `identifiers` should be a list of domains. The `attributes` should have standard names
|
||||
/// recognized by openssl.
|
||||
pub fn generate(
|
||||
identifiers: &[impl AsRef<str>],
|
||||
attributes: &HashMap<String, &str>,
|
||||
) -> Result<Self, Error> {
|
||||
if identifiers.is_empty() {
|
||||
return Err(Error::Csr("cannot generate empty CSR".to_string()));
|
||||
}
|
||||
|
||||
let private_key = Rsa::generate(4096)
|
||||
.and_then(PKey::from_rsa)
|
||||
.map_err(|err| Error::Ssl("failed to generate RSA key: {}", err))?;
|
||||
|
||||
let private_key_pem = private_key
|
||||
.private_key_to_pem_pkcs8()
|
||||
.map_err(|err| Error::Ssl("failed to format private key as PEM pkcs8: {}", err))?;
|
||||
|
||||
let mut name = X509Name::builder()?;
|
||||
if !attributes.contains_key("CN") {
|
||||
name.append_entry_by_nid(Nid::COMMONNAME, identifiers[0].as_ref())?;
|
||||
}
|
||||
for (key, value) in attributes {
|
||||
name.append_entry_by_text(key, value)?;
|
||||
}
|
||||
let name = name.build();
|
||||
|
||||
let mut csr = X509Req::builder()?;
|
||||
csr.set_subject_name(&name)?;
|
||||
csr.set_pubkey(&private_key)?;
|
||||
|
||||
let context = csr.x509v3_context(None);
|
||||
let mut ext = openssl::stack::Stack::new()?;
|
||||
ext.push(x509::extension::BasicConstraints::new().build()?)?;
|
||||
ext.push(
|
||||
x509::extension::KeyUsage::new()
|
||||
.digital_signature()
|
||||
.key_encipherment()
|
||||
.build()?,
|
||||
)?;
|
||||
ext.push(
|
||||
x509::extension::ExtendedKeyUsage::new()
|
||||
.server_auth()
|
||||
.client_auth()
|
||||
.build()?,
|
||||
)?;
|
||||
let mut san = x509::extension::SubjectAlternativeName::new();
|
||||
for dns in identifiers {
|
||||
san.dns(dns.as_ref());
|
||||
}
|
||||
ext.push({ san }.build(&context)?)?;
|
||||
csr.add_extensions(&ext)?;
|
||||
|
||||
csr.sign(&private_key, MessageDigest::sha256())?;
|
||||
|
||||
Ok(Self {
|
||||
data: csr.build().to_der()?,
|
||||
private_key_pem,
|
||||
})
|
||||
}
|
||||
}
|
@ -1,14 +1,13 @@
|
||||
[package]
|
||||
name = "proxmox-api-macro"
|
||||
description = "Proxmox API macro"
|
||||
version = "1.3.2"
|
||||
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
exclude.workspace = true
|
||||
homepage.workspace = true
|
||||
version = "1.0.4"
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
description = "Proxmox API macro"
|
||||
|
||||
exclude.workspace = true
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
@ -23,7 +22,6 @@ syn = { workspace = true , features = [ "extra-traits" ] }
|
||||
futures.workspace = true
|
||||
serde = { workspace = true, features = [ "derive" ] }
|
||||
serde_json.workspace = true
|
||||
proxmox-section-config.workspace = true
|
||||
|
||||
[dev-dependencies.proxmox-schema]
|
||||
workspace = true
|
||||
|
@ -1,89 +1,3 @@
|
||||
rust-proxmox-api-macro (1.3.2-1) bookworm; urgency=medium
|
||||
|
||||
* mark parameter defaults as `#[allow(dead_code)]`
|
||||
|
||||
* sort variants when using `#[api]` on an `enum` to generate a OneOfSchema
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 19 Feb 2025 12:55:02 +0100
|
||||
|
||||
rust-proxmox-api-macro (1.3.1-1) bookworm; urgency=medium
|
||||
|
||||
* rebuild with proxmox-schema 4.0
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 15 Jan 2025 12:36:26 +0100
|
||||
|
||||
rust-proxmox-api-macro (1.3.0-1) bookworm; urgency=medium
|
||||
|
||||
* A missing/empty description for enums is now an error.
|
||||
|
||||
* Add experimental json_schema!() macro to create a `Schema` in json
|
||||
notation.
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 09 Jan 2025 14:20:54 +0100
|
||||
|
||||
rust-proxmox-api-macro (1.2.1-1) bookworm; urgency=medium
|
||||
|
||||
* allow declaring a field meant to collect the 'additional_properties'
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 26 Sep 2024 14:52:43 +0200
|
||||
|
||||
rust-proxmox-api-macro (1.2.0-1) bookworm; urgency=medium
|
||||
|
||||
* deprecate old "streaming" method attribute
|
||||
|
||||
* add "serializing" method attribute to replace the old "streaming" one
|
||||
|
||||
* add "stream" method attribute for the *new* streaming API
|
||||
|
||||
* fix warnings in tests
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 04 Sep 2024 15:36:05 +0200
|
||||
|
||||
rust-proxmox-api-macro (1.1.0-1) stable; urgency=medium
|
||||
|
||||
* fix handling of renames when deriving an Updater for structs
|
||||
|
||||
* experimental support for newtype-only enums for SectionConfig support
|
||||
|
||||
* use const blocks in thread_local calls
|
||||
|
||||
* documentation and typo fixe
|
||||
|
||||
* code cleanups, warning and clippy fixes
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 06 Aug 2024 14:15:49 +0200
|
||||
|
||||
rust-proxmox-api-macro (1.0.8-1) stable; urgency=medium
|
||||
|
||||
* update to proxmox-schema 3
|
||||
|
||||
* make #[serde(skip_serializing_if)] without #[serde(default)] an error
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 02 Feb 2024 13:44:40 +0100
|
||||
|
||||
rust-proxmox-api-macro (1.0.7-1) stable; urgency=medium
|
||||
|
||||
* make serde(skip_serializing_if) without serde(default) for non-Option
|
||||
types an error
|
||||
|
||||
* split field and variant attribute parsing
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 06 Dec 2023 16:02:11 +0100
|
||||
|
||||
rust-proxmox-api-macro (1.0.6-1) stable; urgency=medium
|
||||
|
||||
* clippy fix: this (Default) `impl` can be derived
|
||||
|
||||
* update to syn 2, rework attribute parsing
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 02 Oct 2023 09:27:12 +0200
|
||||
|
||||
rust-proxmox-api-macro (1.0.5-1) bookworm; urgency=medium
|
||||
|
||||
* support non-idents in serde rename attributes on enum variants
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 03 Aug 2023 08:23:42 +0200
|
||||
|
||||
rust-proxmox-api-macro (1.0.4-1) stable; urgency=medium
|
||||
|
||||
* support #[default] attribute for types which derive Default
|
||||
|
@ -1,23 +1,22 @@
|
||||
Source: rust-proxmox-api-macro
|
||||
Section: rust
|
||||
Priority: optional
|
||||
Build-Depends: debhelper-compat (= 13),
|
||||
dh-sequence-cargo,
|
||||
Build-Depends: debhelper (>= 12),
|
||||
dh-cargo (>= 25),
|
||||
cargo:native <!nocheck>,
|
||||
rustc:native <!nocheck>,
|
||||
libstd-rust-dev <!nocheck>,
|
||||
librust-anyhow-1+default-dev <!nocheck>,
|
||||
librust-proc-macro2-1+default-dev <!nocheck>,
|
||||
librust-quote-1+default-dev <!nocheck>,
|
||||
librust-syn-2+default-dev <!nocheck>,
|
||||
librust-syn-2+extra-traits-dev <!nocheck>,
|
||||
librust-syn-2+full-dev <!nocheck>,
|
||||
librust-syn-2+visit-mut-dev <!nocheck>
|
||||
librust-syn-1+default-dev <!nocheck>,
|
||||
librust-syn-1+extra-traits-dev <!nocheck>,
|
||||
librust-syn-1+full-dev <!nocheck>,
|
||||
librust-syn-1+visit-mut-dev <!nocheck>
|
||||
Maintainer: Proxmox Support Team <support@proxmox.com>
|
||||
Standards-Version: 4.7.0
|
||||
Standards-Version: 4.6.1
|
||||
Vcs-Git: git://git.proxmox.com/git/proxmox.git
|
||||
Vcs-Browser: https://git.proxmox.com/?p=proxmox.git
|
||||
Homepage: https://proxmox.com
|
||||
X-Cargo-Crate: proxmox-api-macro
|
||||
Rules-Requires-Root: no
|
||||
|
||||
@ -29,17 +28,18 @@ Depends:
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-proc-macro2-1+default-dev,
|
||||
librust-quote-1+default-dev,
|
||||
librust-syn-2+default-dev,
|
||||
librust-syn-2+extra-traits-dev,
|
||||
librust-syn-2+full-dev,
|
||||
librust-syn-2+visit-mut-dev
|
||||
librust-syn-1+default-dev,
|
||||
librust-syn-1+extra-traits-dev,
|
||||
librust-syn-1+full-dev,
|
||||
librust-syn-1+visit-mut-dev
|
||||
Provides:
|
||||
librust-proxmox-api-macro+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.3-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.3+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.3.2-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.3.2+default-dev (= ${binary:Version})
|
||||
librust-proxmox-api-macro-1.0-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.0+default-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.0.4-dev (= ${binary:Version}),
|
||||
librust-proxmox-api-macro-1.0.4+default-dev (= ${binary:Version})
|
||||
Description: Proxmox API macro - Rust source code
|
||||
Source code for Debianized Rust crate "proxmox-api-macro"
|
||||
This package contains the source for the Rust proxmox-api-macro crate, packaged
|
||||
by debcargo for use with cargo and dh-cargo.
|
||||
|
@ -1,7 +1,8 @@
|
||||
use proc_macro2::TokenStream;
|
||||
use syn::meta::ParseNestedMeta;
|
||||
use quote::ToTokens;
|
||||
use syn::{Meta, NestedMeta};
|
||||
|
||||
use crate::util;
|
||||
use crate::util::{self, default_false, parse_str_value_to_option, set_bool};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct UpdaterFieldAttributes {
|
||||
@ -19,42 +20,46 @@ impl UpdaterFieldAttributes {
|
||||
pub fn from_attributes(input: &mut Vec<syn::Attribute>) -> Self {
|
||||
let mut this = Self::default();
|
||||
|
||||
for attr in std::mem::take(input) {
|
||||
if attr.style != syn::AttrStyle::Outer || !attr.path().is_ident("updater") {
|
||||
input.push(attr);
|
||||
continue;
|
||||
}
|
||||
match attr.parse_nested_meta(|meta| this.parse(meta)) {
|
||||
Ok(()) => (),
|
||||
Err(err) => crate::add_error(err),
|
||||
}
|
||||
}
|
||||
util::extract_attributes(input, "updater", |attr, meta| this.parse(attr, meta));
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
fn parse(&mut self, meta: ParseNestedMeta<'_>) -> Result<(), syn::Error> {
|
||||
let path = &meta.path;
|
||||
fn parse(&mut self, attr: &syn::Attribute, input: NestedMeta) -> Result<(), syn::Error> {
|
||||
match input {
|
||||
NestedMeta::Lit(lit) => bail!(lit => "unexpected literal"),
|
||||
NestedMeta::Meta(meta) => self.parse_meta(attr, meta),
|
||||
}
|
||||
}
|
||||
|
||||
if path.is_ident("skip") {
|
||||
if !meta.input.is_empty() {
|
||||
return Err(meta.error("'skip' attribute does not take any data"));
|
||||
fn parse_meta(&mut self, attr: &syn::Attribute, meta: Meta) -> Result<(), syn::Error> {
|
||||
match meta {
|
||||
Meta::Path(ref path) if path.is_ident("skip") => {
|
||||
set_bool(&mut self.skip, path, true);
|
||||
}
|
||||
util::set_bool(&mut self.skip, path, true);
|
||||
} else if path.is_ident("type") {
|
||||
util::parse_str_value_to_option(&mut self.ty, path, meta.value()?);
|
||||
} else if path.is_ident("serde") {
|
||||
let content: TokenStream = meta.input.parse()?;
|
||||
self.serde.push(syn::parse_quote! { # [ #path #content ] });
|
||||
} else {
|
||||
return Err(meta.error(format!("invalid updater attribute: {path:?}")));
|
||||
Meta::NameValue(ref nv) if nv.path.is_ident("type") => {
|
||||
parse_str_value_to_option(&mut self.ty, nv)
|
||||
}
|
||||
Meta::NameValue(m) => bail!(&m => "invalid updater attribute: {:?}", m.path),
|
||||
Meta::List(m) if m.path.is_ident("serde") => {
|
||||
let mut tokens = TokenStream::new();
|
||||
m.paren_token
|
||||
.surround(&mut tokens, |tokens| m.nested.to_tokens(tokens));
|
||||
self.serde.push(syn::Attribute {
|
||||
path: m.path,
|
||||
tokens,
|
||||
..attr.clone()
|
||||
});
|
||||
}
|
||||
Meta::List(m) => bail!(&m => "invalid updater attribute: {:?}", m.path),
|
||||
Meta::Path(m) => bail!(&m => "invalid updater attribute: {:?}", m),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn skip(&self) -> bool {
|
||||
util::default_false(self.skip.as_ref())
|
||||
default_false(self.skip.as_ref())
|
||||
}
|
||||
|
||||
pub fn ty(&self) -> Option<&syn::TypePath> {
|
||||
@ -63,50 +68,8 @@ impl UpdaterFieldAttributes {
|
||||
|
||||
pub fn replace_serde_attributes(&self, attrs: &mut Vec<syn::Attribute>) {
|
||||
if !self.serde.is_empty() {
|
||||
attrs.retain(|attr| !attr.path().is_ident("serde"));
|
||||
attrs.retain(|attr| !attr.path.is_ident("serde"));
|
||||
attrs.extend(self.serde.iter().cloned())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct EnumFieldAttributes {
|
||||
/// Change the "type-key" for this entry type..
|
||||
type_key: Option<syn::LitStr>,
|
||||
}
|
||||
|
||||
impl EnumFieldAttributes {
|
||||
pub fn from_attributes(input: &mut Vec<syn::Attribute>) -> Self {
|
||||
let mut this = Self::default();
|
||||
|
||||
for attr in std::mem::take(input) {
|
||||
if attr.style != syn::AttrStyle::Outer || !attr.path().is_ident("api") {
|
||||
input.push(attr);
|
||||
continue;
|
||||
}
|
||||
match attr.parse_nested_meta(|meta| this.parse(meta)) {
|
||||
Ok(()) => (),
|
||||
Err(err) => crate::add_error(err),
|
||||
}
|
||||
}
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
fn parse(&mut self, meta: ParseNestedMeta<'_>) -> Result<(), syn::Error> {
|
||||
let path = &meta.path;
|
||||
|
||||
if path.is_ident("type_key") {
|
||||
util::duplicate(&self.type_key, path);
|
||||
self.type_key = Some(meta.value()?.parse()?);
|
||||
} else {
|
||||
return Err(meta.error(format!("invalid api attribute: {path:?}")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn type_key(&self) -> Option<&syn::LitStr> {
|
||||
self.type_key.as_ref()
|
||||
}
|
||||
}
|
||||
|
@ -4,55 +4,13 @@ use anyhow::Error;
|
||||
|
||||
use proc_macro2::{Ident, Span, TokenStream};
|
||||
use quote::quote_spanned;
|
||||
use syn::spanned::Spanned;
|
||||
|
||||
use super::attributes::EnumFieldAttributes;
|
||||
use super::Schema;
|
||||
use crate::serde;
|
||||
use crate::util::{self, FieldName, JSONObject, JSONValue, Maybe};
|
||||
|
||||
/// Enums, provided they're simple enums, simply get an enum string schema attached to them.
|
||||
pub fn handle_enum(attribs: JSONObject, enum_ty: syn::ItemEnum) -> Result<TokenStream, Error> {
|
||||
let mut first_unit = None;
|
||||
let mut first_unnamed = None;
|
||||
let mut first_named = None;
|
||||
for variant in &enum_ty.variants {
|
||||
match &variant.fields {
|
||||
syn::Fields::Unit => first_unit = Some(variant.fields.span()),
|
||||
syn::Fields::Unnamed(_) => first_unnamed = Some(variant.fields.span()),
|
||||
syn::Fields::Named(_) => first_named = Some(variant.fields.span()),
|
||||
}
|
||||
}
|
||||
|
||||
if first_unit.is_some() {
|
||||
if let Some(conflict) = first_unnamed.or(first_named) {
|
||||
bail!(
|
||||
conflict,
|
||||
"enums must be either with only unit types or only newtypes"
|
||||
);
|
||||
}
|
||||
return handle_string_enum(attribs, enum_ty);
|
||||
}
|
||||
|
||||
if first_unnamed.is_some() {
|
||||
if let Some(conflict) = first_unit.or(first_named) {
|
||||
bail!(
|
||||
conflict,
|
||||
"enums must be either with only unit types or only newtypes"
|
||||
);
|
||||
}
|
||||
return handle_section_config_enum(attribs, enum_ty);
|
||||
}
|
||||
|
||||
if let Some(bad) = first_named {
|
||||
bail!(bad, "api type enums with named fields are not allowed");
|
||||
}
|
||||
|
||||
bail!(enum_ty => "api type enums must not be empty");
|
||||
}
|
||||
|
||||
/// Enums, provided they're simple enums, simply get an enum string schema attached to them.
|
||||
fn handle_string_enum(
|
||||
pub fn handle_enum(
|
||||
mut attribs: JSONObject,
|
||||
mut enum_ty: syn::ItemEnum,
|
||||
) -> Result<TokenStream, Error> {
|
||||
@ -74,12 +32,6 @@ fn handle_string_enum(
|
||||
|
||||
if schema.description.is_none() {
|
||||
let (comment, span) = util::get_doc_comments(&enum_ty.attrs)?;
|
||||
if comment.is_empty() {
|
||||
error!(
|
||||
Span::call_site(),
|
||||
"missing doc comment on enum for api-schema description"
|
||||
);
|
||||
}
|
||||
schema.description = Maybe::Derived(syn::LitStr::new(comment.trim(), span));
|
||||
}
|
||||
|
||||
@ -105,7 +57,7 @@ fn handle_string_enum(
|
||||
comment = "<missing description>".to_string();
|
||||
}
|
||||
|
||||
let attrs = serde::VariantAttrib::try_from(&variant.attrs[..])?;
|
||||
let attrs = serde::SerdeAttrib::try_from(&variant.attrs[..])?;
|
||||
let variant_string = if let Some(renamed) = attrs.rename {
|
||||
renamed
|
||||
} else if let Some(rename_all) = container_attrs.rename_all {
|
||||
@ -117,7 +69,7 @@ fn handle_string_enum(
|
||||
};
|
||||
|
||||
if derives_default {
|
||||
if let Some(attr) = variant.attrs.iter().find(|a| a.path().is_ident("default")) {
|
||||
if let Some(attr) = variant.attrs.iter().find(|a| a.path.is_ident("default")) {
|
||||
if let Some(default_value) = &default_value {
|
||||
error!(attr => "multiple default values defined");
|
||||
error!(default_value => "default previously defined here");
|
||||
@ -162,153 +114,3 @@ fn handle_string_enum(
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn handle_section_config_enum(
|
||||
mut attribs: JSONObject,
|
||||
mut enum_ty: syn::ItemEnum,
|
||||
) -> Result<TokenStream, Error> {
|
||||
let name = &enum_ty.ident;
|
||||
|
||||
let description: syn::LitStr = match attribs.remove("description") {
|
||||
Some(desc) => desc.try_into()?,
|
||||
None => {
|
||||
let (comment, span) = util::get_doc_comments(&enum_ty.attrs)?;
|
||||
syn::LitStr::new(comment.trim(), span)
|
||||
}
|
||||
};
|
||||
|
||||
let id_schema = {
|
||||
let schema: Schema = match attribs.remove("id-schema") {
|
||||
Some(schema) => schema.try_into()?,
|
||||
None => {
|
||||
bail!(name => "missing 'id-schema' property for SectionConfig style enum")
|
||||
}
|
||||
};
|
||||
|
||||
let mut ts = TokenStream::new();
|
||||
schema.to_typed_schema(&mut ts)?;
|
||||
ts
|
||||
};
|
||||
let id_property: syn::LitStr = match attribs.remove("id-property") {
|
||||
Some(name) => name.try_into()?,
|
||||
None => bail!(name => "missing 'id-property' property for SectionConfig style enum"),
|
||||
};
|
||||
let with_type_key: TokenStream = match attribs.remove("type-key") {
|
||||
Some(value) => {
|
||||
let value: syn::LitStr = value.try_into()?;
|
||||
quote_spanned!(value.span() => .with_type_key(#value))
|
||||
}
|
||||
None => TokenStream::new(),
|
||||
};
|
||||
|
||||
let container_attrs = serde::ContainerAttrib::try_from(&enum_ty.attrs[..])?;
|
||||
let Some(tag) = container_attrs.tag.as_ref() else {
|
||||
bail!(name => r#"SectionConfig enum needs a `#[serde(tag = "...")]` container attribute"#);
|
||||
};
|
||||
|
||||
let mut variants = Vec::new();
|
||||
let mut register_sections = TokenStream::new();
|
||||
let mut to_type = TokenStream::new();
|
||||
for variant in &mut enum_ty.variants {
|
||||
let field = match &variant.fields {
|
||||
syn::Fields::Unnamed(field) if field.unnamed.len() == 1 => &field.unnamed[0],
|
||||
_ => bail!(variant => "SectionConfig style enum can only have newtype variants"),
|
||||
};
|
||||
|
||||
let attrs = serde::VariantAttrib::try_from(&variant.attrs[..])?;
|
||||
let variant_string = if let Some(renamed) = attrs.rename {
|
||||
renamed
|
||||
} else if let Some(rename_all) = container_attrs.rename_all {
|
||||
let name = rename_all.apply_to_variant(&variant.ident.to_string());
|
||||
syn::LitStr::new(&name, variant.ident.span())
|
||||
} else {
|
||||
let name = &variant.ident;
|
||||
syn::LitStr::new(&name.to_string(), name.span())
|
||||
};
|
||||
|
||||
let field_attrs = EnumFieldAttributes::from_attributes(&mut variant.attrs);
|
||||
let with_type_key = if let Some(key) = field_attrs.type_key() {
|
||||
quote_spanned!(key.span() => .with_type_key(#key))
|
||||
} else {
|
||||
TokenStream::new()
|
||||
};
|
||||
|
||||
let variant_ident = &variant.ident;
|
||||
let ty = &field.ty;
|
||||
variants.push((
|
||||
variant_string.value(),
|
||||
quote_spanned! { variant.ident.span() =>
|
||||
(
|
||||
#variant_string,
|
||||
&<#ty as ::proxmox_schema::ApiType>::API_SCHEMA,
|
||||
),
|
||||
},
|
||||
));
|
||||
register_sections.extend(quote_spanned! { variant.ident.span() =>
|
||||
this.register_plugin(
|
||||
::proxmox_section_config::SectionConfigPlugin::new(
|
||||
#variant_string.to_string(),
|
||||
Some(#id_property.to_string()),
|
||||
const {
|
||||
match &<#ty as ::proxmox_schema::ApiType>::API_SCHEMA {
|
||||
::proxmox_schema::Schema::Object(schema) => schema,
|
||||
::proxmox_schema::Schema::OneOf(schema) => schema,
|
||||
_ => panic!("enum requires an object schema"),
|
||||
}
|
||||
}
|
||||
)
|
||||
#with_type_key
|
||||
);
|
||||
});
|
||||
to_type.extend(quote_spanned! { variant.ident.span() =>
|
||||
Self::#variant_ident(_) => #variant_string,
|
||||
});
|
||||
}
|
||||
variants.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
let variants = variants
|
||||
.into_iter()
|
||||
.map(|(_name, def)| def)
|
||||
.collect::<TokenStream>();
|
||||
|
||||
Ok(quote_spanned! { name.span() =>
|
||||
#enum_ty
|
||||
|
||||
impl ::proxmox_schema::ApiType for #name {
|
||||
const API_SCHEMA: ::proxmox_schema::Schema =
|
||||
::proxmox_schema::OneOfSchema::new(
|
||||
#description,
|
||||
&(#tag, false, &#id_schema.schema()),
|
||||
&[#variants],
|
||||
)
|
||||
.schema();
|
||||
}
|
||||
|
||||
impl ::proxmox_section_config::typed::ApiSectionDataEntry for #name {
|
||||
const INTERNALLY_TAGGED: Option<&'static str> = Some(#tag);
|
||||
|
||||
fn section_config() -> &'static ::proxmox_section_config::SectionConfig {
|
||||
static CONFIG: ::std::sync::OnceLock<::proxmox_section_config::SectionConfig> =
|
||||
::std::sync::OnceLock::new();
|
||||
|
||||
CONFIG.get_or_init(|| {
|
||||
let id_schema = const {
|
||||
<Self as ::proxmox_schema::ApiType>::API_SCHEMA
|
||||
.unwrap_one_of_schema()
|
||||
.type_property_entry
|
||||
.2
|
||||
};
|
||||
let mut this = ::proxmox_section_config::SectionConfig::new(id_schema)
|
||||
#with_type_key;
|
||||
#register_sections
|
||||
this
|
||||
})
|
||||
}
|
||||
|
||||
fn section_type(&self) -> &'static str {
|
||||
match self {
|
||||
#to_type
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -109,28 +109,11 @@ impl TryFrom<JSONObject> for ReturnSchema {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
enum MethodFlavor {
|
||||
Normal,
|
||||
Serializing,
|
||||
Streaming,
|
||||
}
|
||||
|
||||
struct MethodInfo {
|
||||
input_schema: Schema,
|
||||
return_type: Option<ReturnType>,
|
||||
func: syn::ItemFn,
|
||||
wrapper_ts: TokenStream,
|
||||
default_consts: TokenStream,
|
||||
flavor: MethodFlavor,
|
||||
is_async: bool,
|
||||
}
|
||||
|
||||
/// Parse `input`, `returns` and `protected` attributes out of an function annotated
|
||||
/// with an `#[api]` attribute and produce a `const ApiMethod` named after the function.
|
||||
///
|
||||
/// See the top level macro documentation for a complete example.
|
||||
pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<TokenStream, Error> {
|
||||
pub fn handle_method(mut attribs: JSONObject, mut func: syn::ItemFn) -> Result<TokenStream, Error> {
|
||||
let input_schema: Schema = match attribs.remove("input") {
|
||||
Some(input) => input.into_object("input schema definition")?.try_into()?,
|
||||
None => Schema {
|
||||
@ -141,7 +124,7 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
},
|
||||
};
|
||||
|
||||
let input_schema = if input_schema.as_object().is_some() {
|
||||
let mut input_schema = if input_schema.as_object().is_some() {
|
||||
input_schema
|
||||
} else {
|
||||
error!(
|
||||
@ -154,70 +137,11 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
schema
|
||||
};
|
||||
|
||||
let return_type: Option<ReturnType> = attribs
|
||||
let mut return_type: Option<ReturnType> = attribs
|
||||
.remove("returns")
|
||||
.map(|ret| ret.try_into())
|
||||
.transpose()?;
|
||||
|
||||
/* FIXME: Once the deprecation period is over:
|
||||
if let Some(streaming) = attribs.remove("streaming") {
|
||||
error!(
|
||||
streaming.span(),
|
||||
"streaming attribute was renamed to 'serializing', as it did not actually stream"
|
||||
);
|
||||
}
|
||||
*/
|
||||
|
||||
let streaming: Option<syn::LitBool> = attribs
|
||||
.remove("streaming")
|
||||
.map(TryFrom::try_from)
|
||||
.transpose()?;
|
||||
let serializing: Option<syn::LitBool> = attribs
|
||||
.remove("serializing")
|
||||
.map(TryFrom::try_from)
|
||||
.transpose()?;
|
||||
let deprecation_warning = if let Some(streaming) = streaming.clone() {
|
||||
let deprecation_name = Ident::new(
|
||||
&format!("attribute_in_{}", func.sig.ident),
|
||||
streaming.span(),
|
||||
);
|
||||
quote! {
|
||||
mod #deprecation_name {
|
||||
#[deprecated = "'streaming' attribute is being renamed to 'serializing'"]
|
||||
fn streaming() {}
|
||||
fn trigger_deprecation_warning() { streaming() }
|
||||
}
|
||||
}
|
||||
} else {
|
||||
TokenStream::new()
|
||||
};
|
||||
let serializing = streaming
|
||||
.or(serializing)
|
||||
.unwrap_or(syn::LitBool::new(false, Span::call_site()));
|
||||
let streaming: syn::LitBool = attribs
|
||||
.remove("stream")
|
||||
.map(TryFrom::try_from)
|
||||
.transpose()?
|
||||
.unwrap_or(syn::LitBool::new(false, Span::call_site()));
|
||||
|
||||
let mut method_info = MethodInfo {
|
||||
input_schema,
|
||||
return_type,
|
||||
wrapper_ts: TokenStream::new(),
|
||||
default_consts: TokenStream::new(),
|
||||
is_async: func.sig.asyncness.is_some(),
|
||||
flavor: match (serializing.value(), streaming.value()) {
|
||||
(false, false) => MethodFlavor::Normal,
|
||||
(true, false) => MethodFlavor::Serializing,
|
||||
(false, true) => MethodFlavor::Streaming,
|
||||
(true, true) => {
|
||||
error!(serializing => "'stream' and 'serializing' attributes are in conflict");
|
||||
MethodFlavor::Normal
|
||||
}
|
||||
},
|
||||
func,
|
||||
};
|
||||
|
||||
let access_setter = match attribs.remove("access") {
|
||||
Some(access) => {
|
||||
let access = Access::try_from(access.into_object("access rules")?)?;
|
||||
@ -245,6 +169,12 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
.transpose()?
|
||||
.unwrap_or(false);
|
||||
|
||||
let streaming: bool = attribs
|
||||
.remove("streaming")
|
||||
.map(TryFrom::try_from)
|
||||
.transpose()?
|
||||
.unwrap_or(false);
|
||||
|
||||
if !attribs.is_empty() {
|
||||
error!(
|
||||
attribs.span(),
|
||||
@ -253,32 +183,29 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
);
|
||||
}
|
||||
|
||||
let (doc_comment, doc_span) = util::get_doc_comments(&method_info.func.attrs)?;
|
||||
let (doc_comment, doc_span) = util::get_doc_comments(&func.attrs)?;
|
||||
util::derive_descriptions(
|
||||
&mut method_info.input_schema,
|
||||
method_info
|
||||
.return_type
|
||||
.as_mut()
|
||||
.and_then(ReturnType::as_mut_schema),
|
||||
&mut input_schema,
|
||||
return_type.as_mut().and_then(ReturnType::as_mut_schema),
|
||||
&doc_comment,
|
||||
doc_span,
|
||||
)?;
|
||||
|
||||
let api_func_name = handle_function_signature(&mut method_info)?;
|
||||
let mut wrapper_ts = TokenStream::new();
|
||||
let mut default_consts = TokenStream::new();
|
||||
|
||||
let is_async = func.sig.asyncness.is_some();
|
||||
let api_func_name = handle_function_signature(
|
||||
&mut input_schema,
|
||||
&mut return_type,
|
||||
&mut func,
|
||||
&mut wrapper_ts,
|
||||
&mut default_consts,
|
||||
streaming,
|
||||
)?;
|
||||
|
||||
// input schema is done, let's give the method body a chance to extract default parameters:
|
||||
DefaultParameters(&method_info.input_schema).visit_item_fn_mut(&mut method_info.func);
|
||||
|
||||
let MethodInfo {
|
||||
input_schema,
|
||||
func,
|
||||
wrapper_ts,
|
||||
default_consts,
|
||||
return_type,
|
||||
flavor,
|
||||
is_async,
|
||||
..
|
||||
} = method_info;
|
||||
DefaultParameters(&input_schema).visit_item_fn_mut(&mut func);
|
||||
|
||||
let vis = &func.vis;
|
||||
let func_name = &func.sig.ident;
|
||||
@ -297,25 +224,11 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
returns_schema_setter = quote! { .returns(#inner) };
|
||||
}
|
||||
|
||||
let api_handler = match (flavor, is_async) {
|
||||
(MethodFlavor::Normal, true) => {
|
||||
quote! { ::proxmox_router::ApiHandler::Async(&#api_func_name) }
|
||||
}
|
||||
(MethodFlavor::Normal, false) => {
|
||||
quote! { ::proxmox_router::ApiHandler::Sync(&#api_func_name) }
|
||||
}
|
||||
(MethodFlavor::Serializing, true) => {
|
||||
quote! { ::proxmox_router::ApiHandler::SerializingAsync(&#api_func_name) }
|
||||
}
|
||||
(MethodFlavor::Serializing, false) => {
|
||||
quote! { ::proxmox_router::ApiHandler::SerializingSync(&#api_func_name) }
|
||||
}
|
||||
(MethodFlavor::Streaming, true) => {
|
||||
quote! { ::proxmox_router::ApiHandler::StreamAsync(&#api_func_name) }
|
||||
}
|
||||
(MethodFlavor::Streaming, false) => {
|
||||
quote! { ::proxmox_router::ApiHandler::StreamSync(&#api_func_name) }
|
||||
}
|
||||
let api_handler = match (streaming, is_async) {
|
||||
(true, true) => quote! { ::proxmox_router::ApiHandler::StreamingAsync(&#api_func_name) },
|
||||
(true, false) => quote! { ::proxmox_router::ApiHandler::StreamingSync(&#api_func_name) },
|
||||
(false, true) => quote! { ::proxmox_router::ApiHandler::Async(&#api_func_name) },
|
||||
(false, false) => quote! { ::proxmox_router::ApiHandler::Sync(&#api_func_name) },
|
||||
};
|
||||
|
||||
Ok(quote_spanned! { func.sig.span() =>
|
||||
@ -336,22 +249,20 @@ pub fn handle_method(mut attribs: JSONObject, func: syn::ItemFn) -> Result<Token
|
||||
#wrapper_ts
|
||||
|
||||
#func
|
||||
|
||||
#deprecation_warning
|
||||
})
|
||||
//Ok(quote::quote!(#func))
|
||||
}
|
||||
|
||||
enum ParameterType {
|
||||
enum ParameterType<'a> {
|
||||
Value,
|
||||
ApiMethod,
|
||||
RpcEnv,
|
||||
Normal(NormalParameter),
|
||||
Normal(NormalParameter<'a>),
|
||||
}
|
||||
|
||||
struct NormalParameter {
|
||||
ty: syn::Type,
|
||||
entry: ObjectEntry,
|
||||
struct NormalParameter<'a> {
|
||||
ty: &'a syn::Type,
|
||||
entry: &'a ObjectEntry,
|
||||
}
|
||||
|
||||
fn check_input_type(input: &syn::FnArg) -> Result<(&syn::PatType, &syn::PatIdent), syn::Error> {
|
||||
@ -370,8 +281,16 @@ fn check_input_type(input: &syn::FnArg) -> Result<(&syn::PatType, &syn::PatIdent
|
||||
Ok((pat_type, pat))
|
||||
}
|
||||
|
||||
fn handle_function_signature(method_info: &mut MethodInfo) -> Result<Ident, Error> {
|
||||
let sig = &method_info.func.sig;
|
||||
fn handle_function_signature(
|
||||
input_schema: &mut Schema,
|
||||
_return_type: &mut Option<ReturnType>,
|
||||
func: &mut syn::ItemFn,
|
||||
wrapper_ts: &mut TokenStream,
|
||||
default_consts: &mut TokenStream,
|
||||
streaming: bool,
|
||||
) -> Result<Ident, Error> {
|
||||
let sig = &func.sig;
|
||||
let is_async = sig.asyncness.is_some();
|
||||
|
||||
let mut api_method_param = None;
|
||||
let mut rpc_env_param = None;
|
||||
@ -389,10 +308,7 @@ fn handle_function_signature(method_info: &mut MethodInfo) -> Result<Ident, Erro
|
||||
};
|
||||
|
||||
// For any named type which exists on the function signature...
|
||||
if let Some(entry) = method_info
|
||||
.input_schema
|
||||
.find_obj_property_by_ident_mut(&pat.ident.to_string())
|
||||
{
|
||||
if let Some(entry) = input_schema.find_obj_property_by_ident_mut(&pat.ident.to_string()) {
|
||||
// try to infer the type in the schema if it is not specified explicitly:
|
||||
let is_option = util::infer_type(&mut entry.schema, &pat_type.ty)?;
|
||||
let has_default = entry.schema.find_schema_property("default").is_some();
|
||||
@ -440,49 +356,75 @@ fn handle_function_signature(method_info: &mut MethodInfo) -> Result<Ident, Erro
|
||||
// bail out with an error.
|
||||
let pat_ident = pat.ident.unraw();
|
||||
let mut param_name: FieldName = pat_ident.clone().into();
|
||||
let param_type = if let Some(entry) = method_info
|
||||
.input_schema
|
||||
.find_obj_property_by_ident(&pat_ident.to_string())
|
||||
{
|
||||
if let SchemaItem::Inferred(span) = &entry.schema.item {
|
||||
bail!(*span, "failed to infer type");
|
||||
}
|
||||
param_name = entry.name.clone();
|
||||
// Found an explicit parameter: extract it:
|
||||
ParameterType::Normal(NormalParameter {
|
||||
ty: (*pat_type.ty).clone(),
|
||||
entry: entry.clone(),
|
||||
})
|
||||
} else if is_api_method_type(&pat_type.ty) {
|
||||
if api_method_param.is_some() {
|
||||
error!(pat_type => "multiple ApiMethod parameters found");
|
||||
let param_type =
|
||||
if let Some(entry) = input_schema.find_obj_property_by_ident(&pat_ident.to_string()) {
|
||||
if let SchemaItem::Inferred(span) = &entry.schema.item {
|
||||
bail!(*span, "failed to infer type");
|
||||
}
|
||||
param_name = entry.name.clone();
|
||||
// Found an explicit parameter: extract it:
|
||||
ParameterType::Normal(NormalParameter {
|
||||
ty: &pat_type.ty,
|
||||
entry,
|
||||
})
|
||||
} else if is_api_method_type(&pat_type.ty) {
|
||||
if api_method_param.is_some() {
|
||||
error!(pat_type => "multiple ApiMethod parameters found");
|
||||
continue;
|
||||
}
|
||||
api_method_param = Some(param_list.len());
|
||||
ParameterType::ApiMethod
|
||||
} else if is_rpc_env_type(&pat_type.ty) {
|
||||
if rpc_env_param.is_some() {
|
||||
error!(pat_type => "multiple RpcEnvironment parameters found");
|
||||
continue;
|
||||
}
|
||||
rpc_env_param = Some(param_list.len());
|
||||
ParameterType::RpcEnv
|
||||
} else if is_value_type(&pat_type.ty) {
|
||||
if value_param.is_some() {
|
||||
error!(pat_type => "multiple additional Value parameters found");
|
||||
continue;
|
||||
}
|
||||
value_param = Some(param_list.len());
|
||||
ParameterType::Value
|
||||
} else {
|
||||
error!(&pat_ident => "unexpected parameter {:?}", pat_ident.to_string());
|
||||
continue;
|
||||
}
|
||||
api_method_param = Some(param_list.len());
|
||||
ParameterType::ApiMethod
|
||||
} else if is_rpc_env_type(&pat_type.ty) {
|
||||
if rpc_env_param.is_some() {
|
||||
error!(pat_type => "multiple RpcEnvironment parameters found");
|
||||
continue;
|
||||
}
|
||||
rpc_env_param = Some(param_list.len());
|
||||
ParameterType::RpcEnv
|
||||
} else if is_value_type(&pat_type.ty) {
|
||||
if value_param.is_some() {
|
||||
error!(pat_type => "multiple additional Value parameters found");
|
||||
continue;
|
||||
}
|
||||
value_param = Some(param_list.len());
|
||||
ParameterType::Value
|
||||
} else {
|
||||
error!(&pat_ident => "unexpected parameter {:?}", pat_ident.to_string());
|
||||
continue;
|
||||
};
|
||||
};
|
||||
|
||||
param_list.push((param_name, param_type));
|
||||
}
|
||||
|
||||
create_wrapper_function(method_info, param_list)
|
||||
/*
|
||||
* Doing this is actually unreliable, since we cannot support aliased Result types, or all
|
||||
* poassible combinations of paths like `result::Result<>` or `std::result::Result<>` or
|
||||
* `ApiResult`.
|
||||
|
||||
// Secondly, take a look at the return type, and then decide what to do:
|
||||
// If our function has the correct signature we may not even need a wrapper.
|
||||
if is_default_return_type(&sig.output)
|
||||
&& (
|
||||
param_list.len(),
|
||||
value_param,
|
||||
api_method_param,
|
||||
rpc_env_param,
|
||||
) == (3, Some(0), Some(1), Some(2))
|
||||
{
|
||||
return Ok(sig.ident.clone());
|
||||
}
|
||||
*/
|
||||
|
||||
create_wrapper_function(
|
||||
//input_schema,
|
||||
//return_type,
|
||||
param_list,
|
||||
func,
|
||||
wrapper_ts,
|
||||
default_consts,
|
||||
is_async,
|
||||
streaming,
|
||||
)
|
||||
}
|
||||
|
||||
fn is_api_method_type(ty: &syn::Type) -> bool {
|
||||
@ -532,18 +474,24 @@ fn is_value_type(ty: &syn::Type) -> bool {
|
||||
}
|
||||
|
||||
fn create_wrapper_function(
|
||||
method_info: &mut MethodInfo,
|
||||
//_input_schema: &Schema,
|
||||
//_returns_schema: &Option<ReturnType>,
|
||||
param_list: Vec<(FieldName, ParameterType)>,
|
||||
func: &syn::ItemFn,
|
||||
wrapper_ts: &mut TokenStream,
|
||||
default_consts: &mut TokenStream,
|
||||
is_async: bool,
|
||||
streaming: bool,
|
||||
) -> Result<Ident, Error> {
|
||||
let api_func_name = Ident::new(
|
||||
&format!("api_function_{}", &method_info.func.sig.ident),
|
||||
method_info.func.sig.ident.span(),
|
||||
&format!("api_function_{}", &func.sig.ident),
|
||||
func.sig.ident.span(),
|
||||
);
|
||||
|
||||
let mut body = TokenStream::new();
|
||||
let mut args = TokenStream::new();
|
||||
|
||||
let func_uc = method_info.func.sig.ident.to_string().to_uppercase();
|
||||
let func_uc = func.sig.ident.to_string().to_uppercase();
|
||||
|
||||
for (name, param) in param_list {
|
||||
let span = name.span();
|
||||
@ -559,71 +507,69 @@ fn create_wrapper_function(
|
||||
&func_uc,
|
||||
name,
|
||||
span,
|
||||
&mut method_info.default_consts,
|
||||
default_consts,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// build the wrapping function:
|
||||
let func_name = &method_info.func.sig.ident;
|
||||
let func_name = &func.sig.ident;
|
||||
|
||||
let await_keyword = if method_info.is_async {
|
||||
Some(quote!(.await))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let await_keyword = if is_async { Some(quote!(.await)) } else { None };
|
||||
|
||||
let question_mark = match method_info.func.sig.output {
|
||||
let question_mark = match func.sig.output {
|
||||
syn::ReturnType::Default => None,
|
||||
_ => Some(quote!(?)),
|
||||
};
|
||||
|
||||
let body = match method_info.flavor {
|
||||
MethodFlavor::Normal => {
|
||||
quote! {
|
||||
if let ::serde_json::Value::Object(ref mut input_map) = &mut input_params {
|
||||
#body
|
||||
Ok(::serde_json::to_value(#func_name(#args) #await_keyword #question_mark)?)
|
||||
} else {
|
||||
::anyhow::bail!("api function wrapper called with a non-object json value");
|
||||
}
|
||||
}
|
||||
}
|
||||
MethodFlavor::Serializing => {
|
||||
quote! {
|
||||
if let ::serde_json::Value::Object(ref mut input_map) = &mut input_params {
|
||||
#body
|
||||
let res = #func_name(#args) #await_keyword #question_mark;
|
||||
let res: ::std::boxed::Box<dyn ::proxmox_router::SerializableReturn + Send> = ::std::boxed::Box::new(res);
|
||||
Ok(res)
|
||||
} else {
|
||||
::anyhow::bail!("api function wrapper called with a non-object json value");
|
||||
}
|
||||
}
|
||||
}
|
||||
MethodFlavor::Streaming => {
|
||||
let ty = if method_info.is_async {
|
||||
quote! { ::proxmox_router::Stream }
|
||||
let body = if streaming {
|
||||
quote! {
|
||||
if let ::serde_json::Value::Object(ref mut input_map) = &mut input_params {
|
||||
#body
|
||||
let res = #func_name(#args) #await_keyword #question_mark;
|
||||
let res: ::std::boxed::Box<dyn ::proxmox_router::SerializableReturn + Send> = ::std::boxed::Box::new(res);
|
||||
Ok(res)
|
||||
} else {
|
||||
quote! { ::proxmox_router::SyncStream }
|
||||
};
|
||||
quote! {
|
||||
if let ::serde_json::Value::Object(ref mut input_map) = &mut input_params {
|
||||
#body
|
||||
let res = #func_name(#args) #await_keyword #question_mark;
|
||||
let res = #ty::from(res);
|
||||
Ok(res)
|
||||
} else {
|
||||
::anyhow::bail!("api function wrapper called with a non-object json value");
|
||||
}
|
||||
::anyhow::bail!("api function wrapper called with a non-object json value");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
quote! {
|
||||
if let ::serde_json::Value::Object(ref mut input_map) = &mut input_params {
|
||||
#body
|
||||
Ok(::serde_json::to_value(#func_name(#args) #await_keyword #question_mark)?)
|
||||
} else {
|
||||
::anyhow::bail!("api function wrapper called with a non-object json value");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match (method_info.flavor, method_info.is_async) {
|
||||
(MethodFlavor::Normal, true) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
match (streaming, is_async) {
|
||||
(true, true) => {
|
||||
wrapper_ts.extend(quote! {
|
||||
fn #api_func_name<'a>(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &'static ::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &'a mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::proxmox_router::StreamingApiFuture<'a> {
|
||||
::std::boxed::Box::pin(async move { #body })
|
||||
}
|
||||
});
|
||||
}
|
||||
(true, false) => {
|
||||
wrapper_ts.extend(quote! {
|
||||
fn #api_func_name(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::std::result::Result<::std::boxed::Box<dyn ::proxmox_router::SerializableReturn + Send>, ::anyhow::Error> {
|
||||
#body
|
||||
}
|
||||
});
|
||||
}
|
||||
(false, true) => {
|
||||
wrapper_ts.extend(quote! {
|
||||
fn #api_func_name<'a>(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &'static ::proxmox_router::ApiMethod,
|
||||
@ -643,8 +589,8 @@ fn create_wrapper_function(
|
||||
}
|
||||
});
|
||||
}
|
||||
(MethodFlavor::Normal, false) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
(false, false) => {
|
||||
wrapper_ts.extend(quote! {
|
||||
fn #api_func_name(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &::proxmox_router::ApiMethod,
|
||||
@ -654,50 +600,6 @@ fn create_wrapper_function(
|
||||
}
|
||||
});
|
||||
}
|
||||
(MethodFlavor::Serializing, true) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
fn #api_func_name<'a>(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &'static ::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &'a mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::proxmox_router::SerializingApiFuture<'a> {
|
||||
::std::boxed::Box::pin(async move { #body })
|
||||
}
|
||||
});
|
||||
}
|
||||
(MethodFlavor::Serializing, false) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
fn #api_func_name(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::std::result::Result<::std::boxed::Box<dyn ::proxmox_router::SerializableReturn + Send>, ::anyhow::Error> {
|
||||
#body
|
||||
}
|
||||
});
|
||||
}
|
||||
(MethodFlavor::Streaming, true) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
fn #api_func_name<'a>(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &'static ::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &'a mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::proxmox_router::StreamApiFuture<'a> {
|
||||
::std::boxed::Box::pin(async move { #body })
|
||||
}
|
||||
});
|
||||
}
|
||||
(MethodFlavor::Streaming, false) => {
|
||||
method_info.wrapper_ts.extend(quote! {
|
||||
fn #api_func_name(
|
||||
mut input_params: ::serde_json::Value,
|
||||
api_method_param: &::proxmox_router::ApiMethod,
|
||||
rpc_env_param: &mut dyn ::proxmox_router::RpcEnvironment,
|
||||
) -> ::std::result::Result<::proxmox_router::SyncStream, ::anyhow::Error> {
|
||||
#body
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(api_func_name)
|
||||
@ -746,7 +648,7 @@ fn extract_normal_parameter(
|
||||
});
|
||||
}
|
||||
|
||||
let no_option_type = util::is_option_type(¶m.ty).is_none();
|
||||
let no_option_type = util::is_option_type(param.ty).is_none();
|
||||
|
||||
if let Some(def) = &default_value {
|
||||
let name_uc = name.as_ident().to_string().to_uppercase();
|
||||
@ -756,9 +658,8 @@ fn extract_normal_parameter(
|
||||
);
|
||||
|
||||
// strip possible Option<> from this type:
|
||||
let ty = util::is_option_type(¶m.ty).unwrap_or(¶m.ty);
|
||||
let ty = util::is_option_type(param.ty).unwrap_or(param.ty);
|
||||
default_consts.extend(quote_spanned! { span =>
|
||||
#[allow(dead_code)]
|
||||
pub const #name: #ty = #def;
|
||||
});
|
||||
|
||||
@ -782,7 +683,7 @@ fn extract_normal_parameter(
|
||||
body.extend(quote_spanned! { span => ; });
|
||||
}
|
||||
Some(flatten_span) => {
|
||||
// Flattened parameter, we need to use our special partial-object deserializer.
|
||||
// Flattened parameter, we need ot use our special partial-object deserializer.
|
||||
// Also note that we do not support simply nesting schemas. We need a referenced type.
|
||||
// Otherwise the expanded code here gets ugly and we'd need to make sure we pull out
|
||||
// nested schemas into named variables first... No thanks.
|
||||
@ -936,7 +837,7 @@ fn serialize_input_schema(
|
||||
|
||||
struct DefaultParameters<'a>(&'a Schema);
|
||||
|
||||
impl VisitMut for DefaultParameters<'_> {
|
||||
impl<'a> VisitMut for DefaultParameters<'a> {
|
||||
fn visit_expr_mut(&mut self, i: &mut syn::Expr) {
|
||||
if let syn::Expr::Macro(exprmac) = i {
|
||||
if exprmac.mac.path.is_ident("api_get_default") {
|
||||
@ -956,7 +857,7 @@ impl VisitMut for DefaultParameters<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl DefaultParameters<'_> {
|
||||
impl<'a> DefaultParameters<'a> {
|
||||
fn get_default(&self, param_tokens: TokenStream) -> Result<syn::Expr, syn::Error> {
|
||||
let param_name: syn::LitStr = syn::parse2(param_tokens)?;
|
||||
match self.0.find_obj_property_by_ident(¶m_name.value()) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user