diff --git a/.cargo/config b/.cargo/config.toml similarity index 100% rename from .cargo/config rename to .cargo/config.toml diff --git a/Cargo.toml b/Cargo.toml index 2a078fa3d..cf9b8dc72 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "3.1.5" +version = "3.2.8" authors = [ "Dietmar Maurer ", "Dominik Csapak ", @@ -13,6 +13,7 @@ authors = [ edition = "2021" license = "AGPL-3" repository = "https://git.proxmox.com/?p=proxmox-backup.git" +rust-version = "1.80" [package] name = "proxmox-backup" @@ -53,39 +54,48 @@ path = "src/lib.rs" [workspace.dependencies] # proxmox workspace -proxmox-apt = "0.10.5" +proxmox-apt = { version = "0.11", features = [ "cache" ] } +proxmox-apt-api-types = "1.0.1" proxmox-async = "0.4" -proxmox-auth-api = "0.3" +proxmox-auth-api = "0.4" proxmox-borrow = "1" proxmox-compression = "0.2" +proxmox-config-digest = "0.1.0" +proxmox-daemon = "0.1.0" proxmox-fuse = "0.1.3" proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below proxmox-human-byte = "0.1" proxmox-io = "1.0.1" # tools and client use "tokio" feature proxmox-lang = "1.1" +proxmox-log = "0.2.4" proxmox-ldap = "0.2.1" -proxmox-metrics = "0.3" +proxmox-metrics = "0.3.1" +proxmox-notify = "0.4" proxmox-openid = "0.10.0" -proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] } +proxmox-rest-server = { version = "0.8", features = [ "templates" ] } # some use "cli", some use "cli" and "server", pbs-config uses nothing -proxmox-router = { version = "2.0.0", default_features = false } -proxmox-rrd = { version = "0.1" } +proxmox-router = { version = "3.0.0", default-features = false } +proxmox-rrd = "0.4" +proxmox-rrd-api-types = "1.0.2" # everything but pbs-config and pbs-client use "api-macro" proxmox-schema = "3" proxmox-section-config = "2" proxmox-serde = "0.1.1" +proxmox-shared-cache = "0.1" proxmox-shared-memory = "0.3.0" proxmox-sortable-macro = "0.1.2" proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] } -proxmox-sys = "0.5.3" -proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] } -proxmox-time = "1.1.6" +proxmox-sys = "0.6" +proxmox-systemd = "0.1" +proxmox-tfa = { version = "5", features = [ "api", "api-types" ] } +proxmox-time = "2" proxmox-uuid = "1" +proxmox-worker-task = "0.1" # other proxmox crates pathpatterns = "0.3" -proxmox-acme = "0.5" -pxar = "0.10.2" +proxmox-acme = "0.5.3" +pxar = "0.12" # PBS workspace pbs-api-types = { path = "pbs-api-types" } @@ -104,7 +114,7 @@ anyhow = "1.0" async-trait = "0.1.56" #apt-pkg-native = "0.3.2" base64 = "0.13" -bitflags = "1.2.1" +bitflags = "2.4" bytes = "1.0" cidr = "0.2.1" crc32fast = "1" @@ -115,12 +125,11 @@ env_logger = "0.10" flate2 = "1.0" foreign-types = "0.3" futures = "0.3" -h2 = { version = "0.3", features = [ "stream" ] } +h2 = { version = "0.4", features = [ "stream" ] } handlebars = "3.0" hex = "0.4.3" http = "0.2" hyper = { version = "0.14", features = [ "full" ] } -lazy_static = "1.4" libc = "0.2" log = "0.4.17" nix = "0.26.1" @@ -144,33 +153,30 @@ tokio = "1.6" tokio-openssl = "0.6.1" tokio-stream = "0.1.0" tokio-util = { version = "0.7", features = [ "io" ] } +tracing = "0.1" tower-service = "0.3.0" udev = "0.4" url = "2.1" walkdir = "2" xdg = "2.2" zstd = { version = "0.12", features = [ "bindgen" ] } +zstd-safe = "6.0" [dependencies] anyhow.workspace = true async-trait.workspace = true -#apt-pkg-native.workspace = true base64.workspace = true -bitflags.workspace = true bytes.workspace = true cidr.workspace = true const_format.workspace = true crc32fast.workspace = true crossbeam-channel.workspace = true endian_trait.workspace = true -flate2.workspace = true futures.workspace = true h2.workspace = true -handlebars.workspace = true hex.workspace = true http.workspace = true hyper.workspace = true -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true @@ -183,7 +189,6 @@ regex.workspace = true rustyline.workspace = true serde.workspace = true serde_json.workspace = true -siphasher.workspace = true syslog.workspace = true termcolor.workspace = true thiserror.workspace = true @@ -191,42 +196,48 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n tokio-openssl.workspace = true tokio-stream.workspace = true tokio-util = { workspace = true, features = [ "codec" ] } -tower-service.workspace = true +tracing.workspace = true udev.workspace = true url.workspace = true walkdir.workspace = true -xdg.workspace = true zstd.workspace = true #valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true } # proxmox workspace proxmox-apt.workspace = true +proxmox-apt-api-types.workspace = true proxmox-async.workspace = true proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] } proxmox-compression.workspace = true +proxmox-config-digest.workspace = true +proxmox-daemon.workspace = true proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these proxmox-human-byte.workspace = true proxmox-io.workspace = true proxmox-lang.workspace = true +proxmox-log.workspace = true proxmox-ldap.workspace = true proxmox-metrics.workspace = true +proxmox-notify = { workspace = true, features = [ "pbs-context" ] } proxmox-openid.workspace = true proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] } proxmox-router = { workspace = true, features = [ "cli", "server"] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-section-config.workspace = true proxmox-serde = { workspace = true, features = [ "serde_json" ] } +proxmox-shared-cache.workspace = true proxmox-shared-memory.workspace = true proxmox-sortable-macro.workspace = true proxmox-subscription.workspace = true proxmox-sys = { workspace = true, features = [ "timer" ] } +proxmox-systemd.workspace = true proxmox-tfa.workspace = true proxmox-time.workspace = true proxmox-uuid.workspace = true +proxmox-worker-task.workspace = true # in their respective repo -pathpatterns.workspace = true proxmox-acme.workspace = true pxar.workspace = true @@ -240,27 +251,34 @@ pbs-key-config.workspace = true pbs-tape.workspace = true pbs-tools.workspace = true proxmox-rrd.workspace = true +proxmox-rrd-api-types.workspace = true # Local path overrides # NOTE: You must run `cargo update` after changing this for it to take effect! [patch.crates-io] #proxmox-apt = { path = "../proxmox/proxmox-apt" } +#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" } #proxmox-async = { path = "../proxmox/proxmox-async" } #proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" } #proxmox-borrow = { path = "../proxmox/proxmox-borrow" } #proxmox-compression = { path = "../proxmox/proxmox-compression" } +#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" } +#proxmox-daemon = { path = "../proxmox/proxmox-daemon" } #proxmox-fuse = { path = "../proxmox-fuse" } #proxmox-http = { path = "../proxmox/proxmox-http" } #proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" } #proxmox-io = { path = "../proxmox/proxmox-io" } #proxmox-lang = { path = "../proxmox/proxmox-lang" } +#proxmox-log = { path = "../proxmox/proxmox-log" } #proxmox-ldap = { path = "../proxmox/proxmox-ldap" } #proxmox-metrics = { path = "../proxmox/proxmox-metrics" } +#proxmox-notify = { path = "../proxmox/proxmox-notify" } #proxmox-openid = { path = "../proxmox/proxmox-openid" } #proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" } #proxmox-router = { path = "../proxmox/proxmox-router" } #proxmox-rrd = { path = "../proxmox/proxmox-rrd" } +#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" } #proxmox-schema = { path = "../proxmox/proxmox-schema" } #proxmox-section-config = { path = "../proxmox/proxmox-section-config" } #proxmox-serde = { path = "../proxmox/proxmox-serde" } @@ -268,9 +286,11 @@ proxmox-rrd.workspace = true #proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" } #proxmox-subscription = { path = "../proxmox/proxmox-subscription" } #proxmox-sys = { path = "../proxmox/proxmox-sys" } +#proxmox-systemd = { path = "../proxmox/proxmox-systemd" } #proxmox-tfa = { path = "../proxmox/proxmox-tfa" } #proxmox-time = { path = "../proxmox/proxmox-time" } #proxmox-uuid = { path = "../proxmox/proxmox-uuid" } +#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" } #proxmox-acme = { path = "../proxmox/proxmox-acme" } #pathpatterns = {path = "../pathpatterns" } diff --git a/Makefile b/Makefile index 370c6255e..092b16d39 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,7 @@ include defines.mk PACKAGE := proxmox-backup ARCH := $(DEB_BUILD_ARCH) -SUBDIRS := etc www docs +SUBDIRS := etc www docs templates # Binaries usable by users USR_BIN := \ @@ -33,14 +33,15 @@ RESTORE_BIN := \ SUBCRATES != cargo metadata --no-deps --format-version=1 \ | jq -r .workspace_members'[]' \ - | awk '!/^proxmox-backup[[:space:]]/ { printf "%s ", $$1 }' + | grep "$$PWD/" \ + | sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g' -#ifeq ($(BUILD_MODE), release) +ifeq ($(BUILD_MODE), release) CARGO_BUILD_ARGS += --release --offline -COMPILEDIR := target/release -#else -#COMPILEDIR := target/debug -#endif +COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release +else +COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug +endif ifeq ($(valgrind), yes) CARGO_BUILD_ARGS += --features valgrind @@ -93,7 +94,7 @@ build: cp -a debian \ Cargo.toml src \ $(SUBCRATES) \ - docs etc examples tests www zsh-completions \ + docs etc examples tests www zsh-completions templates \ defines.mk Makefile \ ./build/ rm -f build/Cargo.lock @@ -108,12 +109,15 @@ proxmox-backup-docs: build cd build; dpkg-buildpackage -b -us -uc --no-pre-clean lintian $(DOC_DEB) -# copy the local target/ dir as a build-cache -.PHONY: deb dsc deb-nodoc +.PHONY: deb dsc deb-nodoc deb-nostrip deb-nodoc: build cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc lintian $(DEBS) +deb-nostrip: build + cd build; DEB_BUILD_OPTIONS=nostrip dpkg-buildpackage -b -us -uc + lintian $(DEBS) $(DOC_DEB) + $(DEBS): deb deb: build cd build; dpkg-buildpackage -b -us -uc @@ -176,6 +180,7 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do- --bin proxmox-restore-daemon \ --package proxmox-backup \ --bin docgen \ + --bin pbs2to3 \ --bin proxmox-backup-api \ --bin proxmox-backup-manager \ --bin proxmox-backup-proxy \ @@ -211,6 +216,7 @@ install: $(COMPILED_BINS) install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;) $(MAKE) -C www install $(MAKE) -C docs install + $(MAKE) -C templates install .PHONY: upload upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION) diff --git a/README.rst b/README.rst index b336f3c20..0caa98748 100644 --- a/README.rst +++ b/README.rst @@ -30,7 +30,7 @@ pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0"). Local cargo config ================== -This repository ships with a ``.cargo/config`` that replaces the crates.io +This repository ships with a ``.cargo/config.toml`` that replaces the crates.io registry with packaged crates located in ``/usr/share/cargo/registry``. A similar config is also applied building with dh_cargo. Cargo.lock needs to be diff --git a/debian/changelog b/debian/changelog index a1328c7cd..a4803c182 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,294 @@ +rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium + + * switch various log statements in worker tasks to the newer, more flexible + proxmox log crate. With this change, errors from task logs are now also + logged to the system log, increasing their visibility. + + * datastore api: list snapshots: avoid calculating protected attribute + twice per snapshot, this reduces the amounts of file metadata requests. + + * avoid re-calculating the backup snapshot path's date time component when + getting the full path, reducing calls to the relatively slow strftime + function from libc. + + * fix #3699: client: prefer the XDG cache directory for temporary files with + a fallback to using /tmp, as before. + + * sync job: improve log message for when syncing the root namespace. + + * client: increase read buffer from 8 KiB to 4 MiB for raw image based + backups. This reduces the time spent polling between the reader, chunker + and uploader async tasks and thus can improve backup speed significantly, + especially on setups with fast network and storage. + + * client benchmark: avoid unnecessary allocation in the AES benchmark, + causing artificial overhead. The benchmark AES results should now be more + in line with the hardware capability and what the PBS client could already + do. On our test system we saw an increase by an factor of 2.3 on this + specific benchmark. + + * docs: add external metrics server page + + * tfa: webauthn: serialize OriginUrl following RFC6454 + + * factor out apt and apt-repository handling into a new library crate for + re-use in other projects. There should be no functional change. + + * fix various typos all over the place found using the rust based `typos` + tool. + + * datastore: data blob compression: increase compression throughput by + switching away from a higher level zstd method to a lower level one, which + allows us to control the target buffer size directly and thus avoid some + allocation and syscall overhead. We saw the compression bandwidth increase + by a factor of 1.19 in our tests where both the source data and the target + datastore where located in memory backed tmpfs. + + * daily-update: ensure notification system context is initialized. + + * backup reader: derive if debug messages should be printed from the global + log level. This avoids printing some debug messages by default, e.g., the + "protocol upgrade done" message from sync jobs. + + * ui: user view: disable 'Unlock TFA' button by default to improve UX if no + user is selected. + + * manager cli: ensure the worker tasks finishes when triggering a reload of + the system network. + + * fix #5622: backup client: properly handle rate and burst parameters. + Previously, passing any non-integer value, like `1mb`, was ignored. + + * tape: read element status: ignore responses where the library specifies + that it will return a volume tag but then does not includes that field in + the actual response. As both the primary and the alternative volume tag + are not required by PBS, this specific error can simply be downgraded to a + warning. + + * pxar: dump archive: print entries to stdout instead of stderr + + * sync jobs: various clean-ups and refactoring that should not result in any + semantic change. + + * metric collection: put metrics in a cache with a 30 minutes lifetime. + + * api: add /status/metrics API to allow pull-based metric server to gather + data directly. + + * partial fix #5560: client: periodically show backup progress + + * docs: add proxmox-backup.node.cfg man page + + * docs: sync: explicitly mention `removed-vanish` flag + + -- Proxmox Support Team Fri, 18 Oct 2024 19:05:41 +0200 + +rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium + + * docs: drop blanket statement recommending against remote storage + + * ui: gc job edit: fix i18n gettext usage + + * pxar: improve error handling, e.g., avoiding duplicate information + + * close #4763: client: add command to forget (delete) whole backup group + with all its snapshots + + * close #5571: client: fix regression for `map` command + + * client: mount: wait for child to return before exiting to provide better + UX for some edge paths + + * fix #5304: client: set process uid/gid for `.pxarexclude-cli` to avoid + issues when trying to backup and restore the backup as non-root user. + + * http client: keep renewal future running on failed re-auth to make it more + resilient against some transient errors, like the request just failing due + to network instability. + + * datastore: fix problem with operations counting for the case where the + `.chunks/` directory is not available (deleted/moved) + + * manager: use confirmation helper in wipe-disk command + + -- Proxmox Support Team Wed, 03 Jul 2024 13:33:51 +0200 + +rust-proxmox-backup (3.2.6-1) bookworm; urgency=medium + + * tape: disable Programmable Early Warning Zone (PEWZ) + + * tape: handle PEWZ like regular early warning + + * docs: add note for not using remote storages + + * client: pxar: fix fuse mount performance for split archives + + -- Proxmox Support Team Mon, 17 Jun 2024 10:18:13 +0200 + +rust-proxmox-backup (3.2.5-1) bookworm; urgency=medium + + * pxar: add support for split archives + + * fix #3174: pxar: enable caching and meta comparison + + * docs: file formats: describe split pxar archive file layout + + * docs: add section describing change detection mode + + * api: datastore: add optional archive-name to file-restore + + * client: backup: conditionally write catalog for file level backups + + * docs: add table listing possible change detection modes + + -- Proxmox Support Team Mon, 10 Jun 2024 13:39:54 +0200 + +rust-proxmox-backup (3.2.4-1) bookworm; urgency=medium + + * fix: network api: permission using wrong pathname + + * fix #5503: d/control: bump dependency for proxmox-widget-toolkit + + * auth: add locking to `PbsAuthenticator` to avoid race conditions + + -- Proxmox Support Team Wed, 05 Jun 2024 16:23:38 +0200 + +rust-proxmox-backup (3.2.3-1) bookworm; urgency=medium + + * api-types: remove influxdb bucket name restrictions + + * api: datastore status: delay lookup after permission check to improve + consistency of tracked read operations + + * tape: improve throughput by not unnecessarily syncing/committing after + every archive written beyond the first 128 GiB + + * tape: save 'bytes used' in the tape inventory and show them on the web UI + to allow users to more easily see the usage of a tape + + * tape drive status: return drive activity (like cleaning, loading, + unloading, writing, ...) in the API and show them in the UI + + * ui: tape drive status: avoid checking some specific status if the current + drive activity would block doing so anyway + + * tape: write out basic MAM host-type attributes to media to make them more + easily identifiable as Proxmox Backup Server tape by common LTO tooling. + + * api: syslog: fix the documented type of the return value + + * fix #5465: restore daemon: mount NTFS with UTF-8 charset + + * restore daemon: log some more errors on directory traversal + + * fix #5422: ui: garbage-collection: make columns in global view sortable + + * auth: move to hmac keys for csrf tokens as future-proofing + + * auth: upgrade hashes on user log in if a users password is not hashed with + the latest password hashing function for hardening purpose + + * auth: use ed25519 keys when generating new auth api keys + + * notifications: fix legacy sync notifications + + * docs: document notification-mode and merge old notification section + + * docs: notifications: rewrite overview for more clarity + + * ui: datastore options: link to 'notification-mode' section + + * acme: explicitly print a query when prompting for the custom directory URI + + -- Proxmox Support Team Wed, 22 May 2024 19:31:35 +0200 + +rust-proxmox-backup (3.2.2-1) bookworm; urgency=medium + + * ui: notifications fix empty text format for the default mail author + + * ui: tape backup: do not try to delete the namespace property if its empty + + * ui: sync job: fix error if local namespace is selected first + + -- Proxmox Support Team Thu, 25 Apr 2024 12:06:04 +0200 + +rust-proxmox-backup (3.2.1-1) bookworm; urgency=medium + + * implement Active Directory support: + - api: access: add routes for managing AD realms + - config: domains: add new "ad" section type for AD realms + - realm sync: add sync job for AD realms + - manager cli: add sub-command for managing AD realms + - docs: user-management: add section about AD realm support + + * auth: fix requesting the TFA write lock exclusively + + * installation: add section about unattended/automatic installation + + * api: tape config: forbid reusing IDs between tape changer and tape drive + + * api: add support for creating and updating VLAN interfaces + + * ui: enable the VLAN widget that got moved over from PVE to the generic + widget-toolkit + + -- Proxmox Support Team Wed, 24 Apr 2024 22:05:36 +0200 + +rust-proxmox-backup (3.2.0-1) bookworm; urgency=medium + + * fix #5248: client: allow self-signed/untrusted certificate chains + + * api: make prune-group a real worker task to avoid timeouts after 30s + + * ui: sync view: rename column 'Max. Recursion' -> 'Max. Depth' + + * api: assert that maintenance mode transitions are valid, e.g., do + not allow clearing the special "delete" maitenance mode + + * fix #3217: ui: add global prune and GC job view for an overview over + all datastores + + * fix #4723: manager: add new "garbage-collection list" CLI command to + list all GC jobs + + * ui: garbage collection: show removed and pending data of last run in + bytes + + * fix #5251: login: set autocomplete on password and user + + * allow sending notifications via advanced proxmox-notify crate + + * api: add endpoints for querying known notification values/fields + + * api: add endpoints for gotify, smtp, and sendmail targets + + * api: add endpoints for managing notification matchers + + * api: add endpoints for querying/testing notification targets + + * server: notifications: + - send tape notifications via notification system + - send ACME notifications via notification system + - send update notifications via notification system + - send sync notifications via notification system + - send verify notifications via notification system + - send prune notifications via notification system + - send GC notifications via notification system + + * docs: add documentation for notification system + + * ui: notifications: pull in UX improvements for match rules creation + + * api: notification: also list datastores if user has only Backup + privs + + * manager: add CLI commands for SMTP, sendmail, and gotify + endpoints + + * manager: add CLI for administrating notification matchers and targets + + -- Proxmox Support Team Tue, 23 Apr 2024 23:45:29 +0200 + rust-proxmox-backup (3.1.5-1) bookworm; urgency=medium * fix #5190: api: OIDC: accept generic URIs for the ACR value diff --git a/debian/control b/debian/control index fac3e0c71..7b8a048f8 100644 --- a/debian/control +++ b/debian/control @@ -15,29 +15,26 @@ Build-Depends: bash-completion, libacl1-dev, libfuse3-dev, librust-anyhow-1+default-dev, - librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~), librust-async-trait-0.1+default-dev (>= 0.1.56-~~), librust-base64-0.13+default-dev, - librust-bitflags-1+default-dev (>= 1.2.1-~~), + librust-bitflags-2+default-dev (>= 2.4-~~), librust-bytes-1+default-dev, librust-cidr-0.2+default-dev (>= 0.2.1-~~), + librust-const-format-0.2+default-dev, librust-crc32fast-1+default-dev, librust-crossbeam-channel-0.5+default-dev, librust-endian-trait-0.6+arrays-dev, librust-endian-trait-0.6+default-dev, librust-env-logger-0.10+default-dev, - librust-flate2-1+default-dev, librust-foreign-types-0.3+default-dev, librust-futures-0.3+default-dev, - librust-h2-0.3+default-dev, - librust-h2-0.3+stream-dev, - librust-handlebars-3+default-dev, + librust-h2-0.4+default-dev, + librust-h2-0.4+stream-dev, librust-hex-0.4+default-dev (>= 0.4.3-~~), librust-hex-0.4+serde-dev (>= 0.4.3-~~), librust-http-0.2+default-dev, librust-hyper-0.14+default-dev, librust-hyper-0.14+full-dev, - librust-lazy-static-1+default-dev (>= 1.4-~~), librust-libc-0.2+default-dev, librust-log-0.4+default-dev (>= 0.4.17-~~), librust-nix-0.26+default-dev (>= 0.26.1-~~), @@ -48,15 +45,19 @@ Build-Depends: bash-completion, librust-pathpatterns-0.3+default-dev, librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-pin-project-lite-0.2+default-dev, - librust-proxmox-acme-0.5+default-dev, - librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~), + librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~), + librust-proxmox-apt-0.11+cache-dev, + librust-proxmox-apt-0.11+default-dev, + librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~), librust-proxmox-async-0.4+default-dev, - librust-proxmox-auth-api-0.3+api-dev, - librust-proxmox-auth-api-0.3+api-types-dev, - librust-proxmox-auth-api-0.3+default-dev, - librust-proxmox-auth-api-0.3+pam-authenticator-dev, + librust-proxmox-auth-api-0.4+api-dev, + librust-proxmox-auth-api-0.4+api-types-dev, + librust-proxmox-auth-api-0.4+default-dev, + librust-proxmox-auth-api-0.4+pam-authenticator-dev, librust-proxmox-borrow-1+default-dev, librust-proxmox-compression-0.2+default-dev, + librust-proxmox-config-digest-0.1+default-dev, + librust-proxmox-daemon-0.1+default-dev, librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~), librust-proxmox-http-0.9+client-dev, librust-proxmox-http-0.9+client-trait-dev, @@ -71,43 +72,48 @@ Build-Depends: bash-completion, librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~), librust-proxmox-lang-1+default-dev (>= 1.1-~~), librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~), - librust-proxmox-metrics-0.3+default-dev, + librust-proxmox-log-0.2+default-dev (>= 0.2.4-~~), + librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~), + librust-proxmox-notify-0.4+default-dev, + librust-proxmox-notify-0.4+pbs-context-dev, librust-proxmox-openid-0.10+default-dev, - librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~), - librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~), - librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~), - librust-proxmox-router-2+cli-dev, - librust-proxmox-router-2+default-dev, - librust-proxmox-router-2+server-dev, - librust-proxmox-rrd-0.1+default-dev, + librust-proxmox-rest-server-0.8+default-dev, + librust-proxmox-rest-server-0.8+rate-limited-stream-dev, + librust-proxmox-rest-server-0.8+templates-dev, + librust-proxmox-router-3+cli-dev, + librust-proxmox-router-3+server-dev, + librust-proxmox-rrd-0.4+default-dev, + librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~), librust-proxmox-schema-3+api-macro-dev, librust-proxmox-schema-3+default-dev, librust-proxmox-section-config-2+default-dev, librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~), librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~), + librust-proxmox-shared-cache-0.1+default-dev, librust-proxmox-shared-memory-0.3+default-dev, librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~), librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~), librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~), - librust-proxmox-sys-0.5+acl-dev (>= 0.5.3-~~), - librust-proxmox-sys-0.5+crypt-dev (>= 0.5.3-~~), - librust-proxmox-sys-0.5+default-dev (>= 0.5.3-~~), - librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.3-~~), - librust-proxmox-sys-0.5+timer-dev (>= 0.5.3-~~), - librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~), - librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~), - librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~), - librust-proxmox-time-1+default-dev (>= 1.1.6-~~), + librust-proxmox-sys-0.6+acl-dev, + librust-proxmox-sys-0.6+crypt-dev, + librust-proxmox-sys-0.6+default-dev, + librust-proxmox-sys-0.6+logrotate-dev, + librust-proxmox-sys-0.6+timer-dev, + librust-proxmox-systemd-0.1+default-dev, + librust-proxmox-tfa-5+api-dev, + librust-proxmox-tfa-5+api-types-dev, + librust-proxmox-tfa-5+default-dev, + librust-proxmox-time-2+default-dev, librust-proxmox-uuid-1+default-dev, librust-proxmox-uuid-1+serde-dev, - librust-pxar-0.10+default-dev (>= 0.10.2-~~), + librust-proxmox-worker-task-0.1+default-dev, + librust-pxar-0.12+default-dev, librust-regex-1+default-dev (>= 1.5.5-~~), librust-rustyline-9+default-dev, librust-serde-1+default-dev, librust-serde-1+derive-dev, librust-serde-json-1+default-dev, librust-serde-plain-1+default-dev, - librust-siphasher-0.3+default-dev, librust-syslog-6+default-dev, librust-tar-0.4+default-dev, librust-termcolor-1+default-dev (>= 1.1.2-~~), @@ -131,12 +137,14 @@ Build-Depends: bash-completion, librust-tokio-util-0.7+default-dev, librust-tokio-util-0.7+io-dev, librust-tower-service-0.3+default-dev, + librust-tracing-0.1+default-dev, librust-udev-0.4+default-dev, librust-url-2+default-dev (>= 2.1-~~), librust-walkdir-2+default-dev, librust-xdg-2+default-dev (>= 2.2-~~), librust-zstd-0.12+bindgen-dev, librust-zstd-0.12+default-dev, + librust-zstd-safe-6+default-dev, libsgutils2-dev, libstd-rust-dev, libsystemd-dev (>= 246-~~), @@ -175,7 +183,7 @@ Depends: fonts-font-awesome, postfix | mail-transport-agent, proxmox-backup-docs, proxmox-mini-journalreader, - proxmox-widget-toolkit (>= 3.5.2), + proxmox-widget-toolkit (>= 4.1.4), pve-xtermjs (>= 4.7.0-1), sg3-utils, smartmontools, diff --git a/debian/copyright b/debian/copyright index 12d0662bb..3e7d0fae9 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,4 +1,4 @@ -Copyright (C) 2019 - 2023 Proxmox Server Solutions GmbH +Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH This software is written by Proxmox Server Solutions GmbH diff --git a/debian/proxmox-backup-file-restore.postinst b/debian/proxmox-backup-file-restore.postinst index 9792bfb46..c73893ddd 100755 --- a/debian/proxmox-backup-file-restore.postinst +++ b/debian/proxmox-backup-file-restore.postinst @@ -9,7 +9,7 @@ update_initramfs() { CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img" # cleanup first, in case proxmox-file-restore was uninstalled since we do - # not want an unuseable image lying around + # not want an unusable image lying around rm -f "$CACHE_PATH" if [ ! -f "$INST_PATH/initramfs.img" ]; then diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install index ee114ea34..79757eadb 100644 --- a/debian/proxmox-backup-server.install +++ b/debian/proxmox-backup-server.install @@ -30,12 +30,44 @@ usr/share/man/man5/acl.cfg.5 usr/share/man/man5/datastore.cfg.5 usr/share/man/man5/domains.cfg.5 usr/share/man/man5/media-pool.cfg.5 +usr/share/man/man5/notifications-priv.cfg.5 +usr/share/man/man5/notifications.cfg.5 +usr/share/man/man5/proxmox-backup.node.cfg.5 usr/share/man/man5/remote.cfg.5 usr/share/man/man5/sync.cfg.5 usr/share/man/man5/tape-job.cfg.5 usr/share/man/man5/tape.cfg.5 usr/share/man/man5/user.cfg.5 usr/share/man/man5/verification.cfg.5 +usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs +usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs +usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs +usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs +usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs +usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs +usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs +usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs +usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs +usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs +usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs +usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs +usr/share/proxmox-backup/templates/default/test-body.html.hbs +usr/share/proxmox-backup/templates/default/test-body.txt.hbs +usr/share/proxmox-backup/templates/default/test-subject.txt.hbs +usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs +usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs +usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs +usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs usr/share/zsh/vendor-completions/_pmt usr/share/zsh/vendor-completions/_pmtx usr/share/zsh/vendor-completions/_proxmox-backup-debug diff --git a/debian/rules b/debian/rules index 54a3c22bf..a03fe11ba 100755 --- a/debian/rules +++ b/debian/rules @@ -8,7 +8,7 @@ include /usr/share/rustc/architecture.mk export BUILD_MODE=release -CARGO=/usr/share/cargo/bin/cargo +export CARGO=/usr/share/cargo/bin/cargo export CFLAGS CXXFLAGS CPPFLAGS LDFLAGS export DEB_HOST_RUST_TYPE DEB_HOST_GNU_TYPE @@ -28,6 +28,11 @@ override_dh_auto_configure: @perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \ die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml $(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system + # `cargo build` and `cargo install` have different config precedence, symlink + # the wrapper config into a place where `build` picks it up as well.. + # https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery + mkdir -p .cargo + ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml dh_auto_configure override_dh_auto_build: diff --git a/docs/Makefile b/docs/Makefile index a36f23a71..bf04fb427 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,55 +1,63 @@ include ../defines.mk GENERATED_SYNOPSIS := \ - proxmox-tape/synopsis.rst \ - proxmox-backup-client/synopsis.rst \ - proxmox-backup-client/catalog-shell-synopsis.rst \ - proxmox-backup-manager/synopsis.rst \ - proxmox-backup-debug/synopsis.rst \ - proxmox-file-restore/synopsis.rst \ - pxar/synopsis.rst \ - pmtx/synopsis.rst \ - pmt/synopsis.rst \ - config/media-pool/config.rst \ - config/tape/config.rst \ - config/tape-job/config.rst \ - config/user/config.rst \ - config/remote/config.rst \ - config/sync/config.rst \ - config/verification/config.rst \ config/acl/roles.rst \ config/datastore/config.rst \ - config/domains/config.rst + config/domains/config.rst \ + config/media-pool/config.rst \ + config/notifications-priv/config.rst \ + config/notifications/config.rst \ + config/remote/config.rst \ + config/sync/config.rst \ + config/tape-job/config.rst \ + config/tape/config.rst \ + config/user/config.rst \ + config/verification/config.rst \ + pmt/synopsis.rst \ + pmtx/synopsis.rst \ + proxmox-backup-client/catalog-shell-synopsis.rst \ + proxmox-backup-client/synopsis.rst \ + proxmox-backup-debug/synopsis.rst \ + proxmox-backup-manager/synopsis.rst \ + proxmox-file-restore/synopsis.rst \ + proxmox-tape/synopsis.rst \ + pxar/synopsis.rst \ MAN1_PAGES := \ - pxar.1 \ - pmtx.1 \ - pmt.1 \ - proxmox-tape.1 \ - proxmox-backup-proxy.1 \ - proxmox-backup-client.1 \ - proxmox-backup-manager.1 \ - proxmox-file-restore.1 \ - proxmox-backup-debug.1 \ pbs2to3.1 \ + pmt.1 \ + pmtx.1 \ + proxmox-backup-client.1 \ + proxmox-backup-debug.1 \ + proxmox-backup-manager.1 \ + proxmox-backup-proxy.1 \ + proxmox-file-restore.1 \ + proxmox-tape.1 \ + pxar.1 \ +# FIXME: prefix all man pages that are not directly relating to an existing executable with +# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible +# symlinks, e.g. with a "5pbs" man page "suffix section". MAN5_PAGES := \ - media-pool.cfg.5 \ - tape.cfg.5 \ - tape-job.cfg.5 \ acl.cfg.5 \ - user.cfg.5 \ + datastore.cfg.5 \ + domains.cfg.5 \ + media-pool.cfg.5 \ + proxmox-backup.node.cfg.5 \ + notifications-priv.cfg.5 \ + notifications.cfg.5 \ remote.cfg.5 \ sync.cfg.5 \ + tape-job.cfg.5 \ + tape.cfg.5 \ + user.cfg.5 \ verification.cfg.5 \ - datastore.cfg.5 \ - domains.cfg.5 PRUNE_SIMULATOR_FILES := \ prune-simulator/index.html \ - prune-simulator/documentation.html \ prune-simulator/clear-trigger.png \ - prune-simulator/prune-simulator.js + prune-simulator/documentation.html \ + prune-simulator/prune-simulator.js \ PRUNE_SIMULATOR_JS_SOURCE := \ /usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \ @@ -85,13 +93,13 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build BUILDDIR = output -#ifeq ($(BUILD_MODE), release) -COMPILEDIR := ../target/release +ifeq ($(BUILD_MODE), release) +COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/release SPHINXOPTS += -t release -#else -#COMPILEDIR := ../target/debug -#SPHINXOPTS += -t devbuild -#endif +else +COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/debug +SPHINXOPTS += -t devbuild +endif # Sphinx internal variables. ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) . @@ -138,9 +146,9 @@ lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE} mv $@.tmp $@ .PHONY: html -html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES} +html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg _static/custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES} $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/ + install -m 0644 _static/custom.js _static/custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/ install -dm 0755 $(BUILDDIR)/html/prune-simulator install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator install -dm 0755 $(BUILDDIR)/html/lto-barcode diff --git a/docs/custom.css b/docs/_static/custom.css similarity index 100% rename from docs/custom.css rename to docs/_static/custom.css diff --git a/docs/custom.js b/docs/_static/custom.js similarity index 100% rename from docs/custom.js rename to docs/_static/custom.js diff --git a/docs/backup-client.rst b/docs/backup-client.rst index 00a1abbb3..e56e0625b 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -280,6 +280,65 @@ Multiple paths can be excluded like this: # proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust +.. _client_change_detection_mode: + +Change Detection Mode +~~~~~~~~~~~~~~~~~~~~~ + +File-based backups containing a lot of data can take a long time, as the default +behavior for the Proxmox backup client is to read all data and encode it into a +pxar archive. +The encoded stream is split into variable sized chunks. For each chunk, a digest +is calculated and used to decide whether the chunk needs to be uploaded or can +be indexed without upload, as it is already available on the server (and +therefore deduplicated). If the backed up files are largely unchanged, +re-reading and then detecting the corresponding chunks don't need to be uploaded +after all is time consuming and undesired. + +The backup client's `change-detection-mode` can be switched from default to +`metadata` based detection to reduce limitations as described above, instructing +the client to avoid re-reading files with unchanged metadata whenever possible. +When using this mode, instead of the regular pxar archive, the backup snapshot +is stored into two separate files: the `mpxar` containing the archive's metadata +and the `ppxar` containing a concatenation of the file contents. This splitting +allows for efficient metadata lookups. + +Using the `change-detection-mode` set to `data` allows to create the same split +archive as when using the `metadata` mode, but without using a previous +reference and therefore reencoding all file payloads. +When creating the backup archives, the current file metadata is compared to the +one looked up in the previous `mpxar` archive. +The metadata comparison includes file size, file type, ownership and permission +information, as well as acls and attributes and most importantly the file's +mtime, for details see the +:ref:`pxar metadata archive format `. + +If unchanged, the entry is cached for possible re-use of content chunks without +re-reading, by indexing the already present chunks containing the contents from +the previous backup snapshot. Since the file might only partially re-use chunks +(thereby introducing wasted space in the form of padding), the decision whether +to re-use or re-encode the currently cached entries is postponed to when enough +information is available, comparing the possible padding to a threshold value. + +.. _client_change_detection_mode_table: + +============ =================================================================== +Mode Description +============ =================================================================== +``legacy`` (current default): Encode all files into a self contained pxar + archive. +``data`` Encode all files into a split data and metadata pxar archive. +``metadata`` Encode changed files, reuse unchanged from previous snapshot, + creating a split archive. +============ =================================================================== + +The following shows an example for the client invocation with the `metadata` +mode: + +.. code-block:: console + + # proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata + .. _client_encryption: Encryption diff --git a/docs/conf.py b/docs/conf.py index 95146e0ad..76ab293e2 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -108,12 +108,15 @@ man_pages = [ ('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5), ('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5), ('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5), + ('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5), ('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5), ('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5), ('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5), ('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5), ('config/user/man5', 'user.cfg', 'User Configuration', [author], 5), ('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5), + ('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5), + ('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5), ] @@ -263,6 +266,9 @@ html_static_path = ['_static'] html_js_files = [ 'custom.js', ] +html_css_files = [ + 'custom.css', +] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied diff --git a/docs/config/domains/format.rst b/docs/config/domains/format.rst index d92cd473d..09e1f2946 100644 --- a/docs/config/domains/format.rst +++ b/docs/config/domains/format.rst @@ -23,5 +23,5 @@ For LDAP realms, the LDAP bind password is stored in ``ldap_passwords.json``. user-classes inetorgperson,posixaccount,person,user -You can use the ``proxmox-backup-manager openid`` and ``proxmox-backup-manager ldap`` commands to manipulate -this file. +You can use the ``proxmox-backup-manager openid``, ``proxmox-backup-manager +ldap`` and ``proxmox-backup-manager ad`` commands to manipulate this file. diff --git a/docs/config/node/format.rst b/docs/config/node/format.rst new file mode 100644 index 000000000..f5ad5d813 --- /dev/null +++ b/docs/config/node/format.rst @@ -0,0 +1,49 @@ +The file contains these options: + +:acme: The ACME account to use on this node. + +:acmedomain0: ACME domain. + +:acmedomain1: ACME domain. + +:acmedomain2: ACME domain. + +:acmedomain3: ACME domain. + +:acmedomain4: ACME domain. + +:http-proxy: Set proxy for apt and subscription checks. + +:email-from: Fallback email from which notifications will be sent. + +:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.) + +:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.) + +:default-lang: Default language used in the GUI. + +:description: Node description. + +:task-log-max-days: Maximum days to keep task logs. + +For example: + +:: + + acme: local + acmedomain0: first.domain.com + acmedomain1: second.domain.com + acmedomain2: third.domain.com + acmedomain3: fourth.domain.com + acmedomain4: fifth.domain.com + http-proxy: internal.proxy.com + email-from: proxmox@mail.com + ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256 + ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM + default-lang: en + description: Primary PBS instance + task-log-max-days: 30 + + +You can use the ``proxmox-backup-manager node`` command to manipulate +this file. diff --git a/docs/config/node/man5.rst b/docs/config/node/man5.rst new file mode 100644 index 000000000..fbdaf3b0a --- /dev/null +++ b/docs/config/node/man5.rst @@ -0,0 +1,18 @@ +:orphan: + +======== +node.cfg +======== + +Description +=========== + +The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox +Backup Server. It contains the general configuration regarding this node. + +Options +======= + +.. include:: format.rst + +.. include:: ../../pbs-copyright.rst diff --git a/docs/config/notifications-priv/format.rst b/docs/config/notifications-priv/format.rst new file mode 100644 index 000000000..7d92c9793 --- /dev/null +++ b/docs/config/notifications-priv/format.rst @@ -0,0 +1 @@ +This file contains protected credentials for notification targets. diff --git a/docs/config/notifications-priv/man5.rst b/docs/config/notifications-priv/man5.rst new file mode 100644 index 000000000..ef6fed6ca --- /dev/null +++ b/docs/config/notifications-priv/man5.rst @@ -0,0 +1,24 @@ +:orphan: + +====================== +notifications-priv.cfg +====================== + +Description +=========== + +The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file +for Proxmox Backup Server. It contains the configuration for the +notification system configuration. + +File Format +=========== + +.. include:: format.rst + +Options +======= + +.. include:: config.rst + +.. include:: ../../pbs-copyright.rst diff --git a/docs/config/notifications/format.rst b/docs/config/notifications/format.rst new file mode 100644 index 000000000..3bda91c7c --- /dev/null +++ b/docs/config/notifications/format.rst @@ -0,0 +1,2 @@ +This file contains configuration for notification targets and notification +matchers. diff --git a/docs/config/notifications/man5.rst b/docs/config/notifications/man5.rst new file mode 100644 index 000000000..279378d11 --- /dev/null +++ b/docs/config/notifications/man5.rst @@ -0,0 +1,24 @@ +:orphan: + +================== +notifications.cfg +================== + +Description +=========== + +The file /etc/proxmox-backup/notifications.cfg is a configuration file +for Proxmox Backup Server. It contains the configuration for the +notification system configuration. + +File Format +=========== + +.. include:: format.rst + +Options +======= + +.. include:: config.rst + +.. include:: ../../pbs-copyright.rst diff --git a/docs/configuration-files.rst b/docs/configuration-files.rst index ba54a7b0b..5fabf48c4 100644 --- a/docs/configuration-files.rst +++ b/docs/configuration-files.rst @@ -67,6 +67,46 @@ Options .. include:: config/media-pool/config.rst +``node.cfg`` +~~~~~~~~~~~~~~~~~~ + +Options +^^^^^^^ + +.. include:: config/node/format.rst + +.. _notifications.cfg: + +``notifications.cfg`` +~~~~~~~~~~~~~~~~~~~~~ + +File Format +^^^^^^^^^^^ + +.. include:: config/notifications/format.rst + + +Options +^^^^^^^ + +.. include:: config/notifications/config.rst + +.. _notifications_priv.cfg: + +``notifications-priv.cfg`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +File Format +^^^^^^^^^^^ + +.. include:: config/notifications-priv/format.rst + + +Options +^^^^^^^ + +.. include:: config/notifications-priv/config.rst + ``tape.cfg`` ~~~~~~~~~~~~ diff --git a/docs/external-metric-server.rst b/docs/external-metric-server.rst new file mode 100644 index 000000000..f65082cc4 --- /dev/null +++ b/docs/external-metric-server.rst @@ -0,0 +1,55 @@ +External Metric Server +---------------------- + +Proxmox Backup Server periodically sends various metrics about your host's memory, +network and disk activity to configured external metric servers. + +Currently supported are: + + * InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ ) + * InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ ) + +The external metric server definitions are saved in +'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web +interface. + +.. note:: + + Using HTTP is recommended as UDP support has been dropped in InfluxDB v2. + +InfluxDB (HTTP) plugin configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x. +InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API. + +Since InfluxDB's v2 API is only available with authentication, you have +to generate a token that can write into the correct bucket and set it. + +In the v2 compatible API of 1.8.x, you can use 'user:password' as token +(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x. + +You can also set the maximum batch size (default 25000000 bytes) with the +'max-body-size' setting (this corresponds to the InfluxDB setting with the +same name). + +InfluxDB (UDP) plugin configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Proxmox Backup Server can also send data via UDP. This requires the InfluxDB +server to be configured correctly. The MTU can also be configured here if +necessary. + +Here is an example configuration for InfluxDB (on your InfluxDB server): + +.. code-block:: console + + [[udp]] + enabled = true + bind-address = "0.0.0.0:8089" + database = "proxmox" + batch-size = 1000 + batch-timeout = "1s" + +With this configuration, the InfluxDB server listens on all IP addresses on +port 8089, and writes the data in the *proxmox* database. diff --git a/docs/file-formats.rst b/docs/file-formats.rst index 43ecfefce..77d55b5ef 100644 --- a/docs/file-formats.rst +++ b/docs/file-formats.rst @@ -8,7 +8,53 @@ Proxmox File Archive Format (``.pxar``) .. graphviz:: pxar-format-overview.dot +.. _pxar-meta-format: +Proxmox File Archive Format - Meta (``.mpxar``) +----------------------------------------------- + +Pxar metadata archive with same structure as a regular pxar archive, with the +exception of regular file payloads not being contained within the archive +itself, but rather being stored as payload references to the corresponding pxar +payload (``.ppxar``) file. + +Can be used to lookup all the archive entries and metadata without the size +overhead introduced by the file payloads. + +.. graphviz:: meta-format-overview.dot + +.. _ppxar-format: + +Proxmox File Archive Format - Payload (``.ppxar``) +-------------------------------------------------- + +Pxar payload file storing regular file payloads to be referenced and accessed by +the corresponding pxar metadata (``.mpxar``) archive. Contains a concatenation +of regular file payloads, each prefixed by a `PAYLOAD` header. Further, the +actual referenced payload entries might be separated by padding (full/partial +payloads not referenced), introduced when reusing chunks of a previous backup +run, when chunk boundaries did not aligned to payload entry offsets. + +All headers are stored as little-endian. + +.. list-table:: + :widths: auto + + * - ``PAYLOAD_START_MARKER`` + - header of ``[u8; 16]`` consisting of type hash and size; + marks start + * - ``PAYLOAD`` + - header of ``[u8; 16]`` cosisting of type hash and size; + referenced by metadata archive + * - Payload + - raw regular file payload + * - Padding + - partial/full unreferenced payloads, caused by unaligned chunk boundary + * - ... + - further concatenation of payload header, payload and padding + * - ``PAYLOAD_TAIL_MARKER`` + - header of ``[u8; 16]`` consisting of type hash and size; + marks end .. _data-blob-format: Data Blob Format (``.blob``) diff --git a/docs/index.rst b/docs/index.rst index d70bfd1b5..93212bb88 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ in the section entitled "GNU Free Documentation License". maintenance.rst sysadmin.rst network-management.rst + notifications.rst technical-overview.rst faq.rst diff --git a/docs/installation.rst b/docs/installation.rst index 55eddfb4e..0fa1df1d9 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -50,6 +50,22 @@ It includes the following: .. note:: During the installation process, the complete server is used by default and all existing data is removed. +Install `Proxmox Backup`_ Server Unattended +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +It is possible to install `Proxmox Backup`_ Server automatically in an +unattended manner. This enables you to fully automate the setup process on +bare-metal. Once the installation is complete and the host has booted up, +automation tools like Ansible can be used to further configure the installation. + +The necessary options for the installer must be provided in an answer file. +This file allows the use of filter rules to determine which disks and network +cards should be used. + +To use the automated installation, it is first necessary to prepare an +installation ISO. For more details and information on the unattended +installation see `our wiki +`_. + Install `Proxmox Backup`_ Server on Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/lto-barcode/page-calibration.js b/docs/lto-barcode/page-calibration.js index 3c7fed821..54512a9f4 100644 --- a/docs/lto-barcode/page-calibration.js +++ b/docs/lto-barcode/page-calibration.js @@ -108,7 +108,7 @@ Ext.define('PageCalibration', { xtype: 'numberfield', value: 'a4', name: 's_x', - fieldLabel: 'Meassured Start Offset Sx (mm)', + fieldLabel: 'Measured Start Offset Sx (mm)', allowBlank: false, labelWidth: 200, }, @@ -116,7 +116,7 @@ Ext.define('PageCalibration', { xtype: 'numberfield', value: 'a4', name: 'd_x', - fieldLabel: 'Meassured Length Dx (mm)', + fieldLabel: 'Measured Length Dx (mm)', allowBlank: false, labelWidth: 200, }, @@ -124,7 +124,7 @@ Ext.define('PageCalibration', { xtype: 'numberfield', value: 'a4', name: 's_y', - fieldLabel: 'Meassured Start Offset Sy (mm)', + fieldLabel: 'Measured Start Offset Sy (mm)', allowBlank: false, labelWidth: 200, }, @@ -132,7 +132,7 @@ Ext.define('PageCalibration', { xtype: 'numberfield', value: 'a4', name: 'd_y', - fieldLabel: 'Meassured Length Dy (mm)', + fieldLabel: 'Measured Length Dy (mm)', allowBlank: false, labelWidth: 200, }, diff --git a/docs/maintenance.rst b/docs/maintenance.rst index 6dbb6941c..4bb135e4e 100644 --- a/docs/maintenance.rst +++ b/docs/maintenance.rst @@ -277,26 +277,10 @@ the **Actions** column in the table. Notifications ------------- -Proxmox Backup Server can send you notification emails about automatically +Proxmox Backup Server can send you notifications about automatically scheduled verification, garbage-collection and synchronization tasks results. -By default, notifications are sent to the email address configured for the -`root@pam` user. You can instead set this user for each datastore. - -.. image:: images/screenshots/pbs-gui-datastore-options.png - :target: _images/pbs-gui-datastore-options.png - :align: right - :alt: Datastore Options - -You can also change the level of notification received per task type, the -following options are available: - -* Always: send a notification for any scheduled task, independent of the - outcome - -* Errors: send a notification for any scheduled task that results in an error - -* Never: do not send any notification at all +Refer to the :ref:`notifications` chapter for more details. .. _maintenance_mode: diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index 38ccd5af2..dd43ccd2b 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -69,6 +69,9 @@ sync-job`` command. The configuration information for sync jobs is stored at in the GUI, or use the ``create`` subcommand. After creating a sync job, you can either start it manually from the GUI or provide it with a schedule (see :ref:`calendar-event-scheduling`) to run regularly. +Backup snapshots, groups and namespaces which are no longer available on the +**Remote** datastore can be removed from the local datastore as well by setting +the ``remove-vanished`` option for the sync job. .. code-block:: console diff --git a/docs/meta-format-overview.dot b/docs/meta-format-overview.dot new file mode 100644 index 000000000..7eea4b55b --- /dev/null +++ b/docs/meta-format-overview.dot @@ -0,0 +1,50 @@ +digraph g { +graph [ +rankdir = "LR" +fontname="Helvetica" +]; +node [ +fontsize = "16" +shape = "record" +]; +edge [ +]; + +"archive" [ +label = "archive.mpxar" +shape = "record" +]; + +"rootdir" [ +label = "FORMAT_VERSION\l|PRELUDE\l|ENTRY\l|\{XATTR\}\* extended attribute list\l|\{ACL_USER\}\* USER ACL entries\l|\{ACL_GROUP\}\* GROUP ACL entries\l|\[ACL_GROUP_OBJ\] the ACL_GROUP_OBJ \l|\[ACL_DEFAULT\] the various default ACL fields\l|\{ACL_DEFAULT_USER\}\* USER ACL entries\l|\{ACL_DEFAULT_GROUP\}\* GROUP ACL entries\l|\[FCAPS\] file capability in Linux disk format\l|\[QUOTA_PROJECT_ID\] the ext4/xfs quota project ID\l|{ PAYLOAD_REF|SYMLINK|DEVICE|{ \{DirectoryEntries\}\*|GOODBYE}}" +shape = "record" +]; + + +"entry" [ +label = " size: u64 = 64\l|type: u64 = ENTRY\l|feature_flags: u64\l|mode: u64\l|flags: u64\l|uid: u64\l|gid: u64\l|mtime: u64\l" +labeljust = "l" +shape = "record" +]; + + + +"direntry" [ +label = " FILENAME\l|{ENTRY\l|HARDLINK\l}" +shape = "record" +]; + +"payloadrefentry" [ +label = " offset: u64\l|size: u64\l" +shape = "record" +]; + +"archive" -> "rootdir":fv + +"rootdir":f0 -> "entry":f0 + +"rootdir":de -> "direntry":f0 + +"rootdir":pl -> "payloadrefentry":f0 + +} diff --git a/docs/notifications.rst b/docs/notifications.rst new file mode 100644 index 000000000..4ba8db86d --- /dev/null +++ b/docs/notifications.rst @@ -0,0 +1,257 @@ +.. _notifications: + +Notifications +============= + +Overview +-------- + +* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy + events in the system. These events are handled by the notification system. + A notification event has metadata, for example a timestamp, a severity level, + a type and other metadata fields. +* :ref:`notification_matchers` route a notification event to one or more notification + targets. A matcher can have match rules to selectively route based on the metadata + of a notification event. +* :ref:`notification_targets` are a destination to which a notification event + is routed to by a matcher. There are multiple types of target, mail-based + (Sendmail and SMTP) and Gotify. + +Datastores and tape backup jobs have a configurable :ref:`notification_mode`. +It allows you to choose between the notification system and a legacy mode +for sending notification emails. The legacy mode is equivalent to the +way notifications were handled before Proxmox Backup Server 3.2. + +The notification system can be configured in the GUI under +*Configuration → Notifications*. The configuration is stored in +:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` - +the latter contains sensitive configuration options such as +passwords or authentication tokens for notification targets and +can only be read by ``root``. + +.. _notification_targets: + +Notification Targets +-------------------- + +Proxmox Backup Server offers multiple types of notification targets. + +.. _notification_targets_sendmail: + +Sendmail +^^^^^^^^ +The sendmail binary is a program commonly found on Unix-like operating systems +that handles the sending of email messages. +It is a command-line utility that allows users and applications to send emails +directly from the command line or from within scripts. + +The sendmail notification target uses the ``sendmail`` binary to send emails to a +list of configured users or email addresses. If a user is selected as a recipient, +the email address configured in user's settings will be used. +For the ``root@pam`` user, this is the email address entered during installation. +A user's email address can be configured in ``Configuration -> Access Control -> User Management``. +If a user has no associated email address, no email will be sent. + +.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by + Postfix. It may be necessary to configure Postfix so that it can deliver + mails correctly - for example by setting an external mail relay (smart host). + In case of failed delivery, check the system logs for messages logged by + the Postfix daemon. + +See :ref:`notifications.cfg` for all configuration options. + +.. _notification_targets_smtp: + +SMTP +^^^^ +SMTP notification targets can send emails directly to an SMTP mail relay. +This target does not use the system's MTA to deliver emails. +Similar to sendmail targets, if a user is selected as a recipient, the user's configured +email address will be used. + +.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism + in case of a failed mail delivery. + +See :ref:`notifications.cfg` for all configuration options. + +.. _notification_targets_gotify: + +Gotify +^^^^^^ +`Gotify `_ is an open-source self-hosted notification server that +allows you to send push notifications to various devices and +applications. It provides a simple API and web interface, making it easy to +integrate with different platforms and services. + +See :ref:`notifications.cfg` for all configuration options. + +.. _notification_matchers: + +Notification Matchers +--------------------- + +Notification matchers route notifications to notification targets based +on their matching rules. These rules can match certain properties of a +notification, such as the timestamp (``match-calendar``), the severity of +the notification (``match-severity``) or metadata fields (``match-field``). +If a notification is matched by a matcher, all targets configured for the +matcher will receive the notification. + +An arbitrary number of matchers can be created, each with with their own +matching rules and targets to notify. +Every target is notified at most once for every notification, even if +the target is used in multiple matchers. + +A matcher without rules matches any notification; the configured targets +will always be notified. + +See :ref:`notifications.cfg` for all configuration options. + +Calendar Matching Rules +^^^^^^^^^^^^^^^^^^^^^^^ +A calendar matcher matches a notification's timestamp. + +Examples: + +* ``match-calendar 8-12`` +* ``match-calendar 8:00-15:30`` +* ``match-calendar mon-fri 9:00-17:00`` +* ``match-calendar sun,tue-wed,fri 9-17`` + +Field Matching Rules +^^^^^^^^^^^^^^^^^^^^ +Notifications have a selection of metadata fields that can be matched. +When using ``exact`` as a matching mode, a ``,`` can be used as a separator. +The matching rule then matches if the metadata field has **any** of the specified +values. + +Examples: + +* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs +* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications. +* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``. + +If a notification does not have the matched field, the rule will **not** match. +For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has +a ``datastore`` metadata field, but will not match if the field does not exist. + +Severity Matching Rules +^^^^^^^^^^^^^^^^^^^^^^^ +A notification has a associated severity that can be matched. + +Examples: + +* ``match-severity error``: Only match errors +* ``match-severity warning,error``: Match warnings and error + +The following severities are in use: +``info``, ``notice``, ``warning``, ``error``, ``unknown``. + +.. _notification_events: + +Notification Events +------------------- + +The following table contains a list of all notification events in Proxmox Backup server, their +type, severity and additional metadata fields. ``type`` as well as any other metadata field +may be used in ``match-field`` match rules. + +================================ ==================== ========== ============================================================== +Event ``type`` Severity Metadata fields (in addition to ``type``) +================================ ==================== ========== ============================================================== +ACME certificate renewal failed ``acme`` ``error`` ``hostname`` +Garbage collection failure ``gc`` ``error`` ``datastore``, ``hostname`` +Garbage collection success ``gc`` ``info`` ``datastore``, ``hostname`` +Package updates available ``package-updates`` ``info`` ``hostname`` +Prune job failure ``prune`` ``error`` ``datastore``, ``hostname``, ``job-id`` +Prune job success ``prune`` ``info`` ``datastore``, ``hostname``, ``job-id`` +Remote sync failure ``sync`` ``error`` ``datastore``, ``hostname``, ``job-id`` +Remote sync success ``sync`` ``info`` ``datastore``, ``hostname``, ``job-id`` +Tape backup job failure ``tape-backup`` ``error`` ``datastore``, ``hostname``, ``media-pool``, ``job-id`` +Tape backup job success ``tape-backup`` ``info`` ``datastore``, ``hostname``, ``media-pool``, ``job-id`` +Tape loading request ``tape-load`` ``notice`` ``hostname`` +Verification job failure ``verification`` ``error`` ``datastore``, ``hostname``, ``job-id`` +Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id`` +================================ ==================== ========== ============================================================== + +The following table contains a description of all use metadata fields. All of these +can be used in ``match-field`` match rules. + +==================== =================================== +Metadata field Description +==================== =================================== +``datastore`` The name of the datastore +``hostname`` The hostname of the backup server +``job-id`` Job ID +``media-pool`` The name of the tape media pool +``type`` Notification event type +==================== =================================== + +.. NOTE:: The daily task checking for any available system updates only sends + notifications if the node has an active subscription. + +System Mail Forwarding +---------------------- +Certain local system daemons, such as ``smartd``, send notification emails +to the local ``root`` user. Proxmox Backup Server will feed these mails +into the notification system as a notification of type ``system-mail`` +and with severity ``unknown``. + +When the email is forwarded to a sendmail target, the mail's content and headers +are forwarded as-is. For all other targets, +the system tries to extract both a subject line and the main text body +from the email content. In instances where emails solely consist of HTML +content, they will be transformed into plain text format during this process. + +Permissions +----------- +In order to modify/view the configuration for notification targets, +the ``Sys.Modify/Sys.Audit`` permissions are required for the +``/system/notifications`` ACL node. + +.. _notification_mode: + +Notification Mode +----------------- +Datastores and tape backup/restore job configuration have a ``notification-mode`` +option which can have one of two values: + +* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command. + The notification system will be bypassed and any configured targets/matchers will be ignored. + This mode is equivalent to the notification behavior for version before + Proxmox Backup Server 3.2. + +* ``notification-system``: Use the new, flexible notification system. + +If the ``notification-mode`` option is not set, Proxmox Backup Server will default +to ``legacy-sendmail``. + +Starting with Proxmox Backup Server 3.2, a datastore created in the UI will +automatically opt in to the new notification system. If the datastore is created +via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode`` +option has to be set explicitly to ``notification-system`` if the +notification system shall be used. + +The ``legacy-sendmail`` mode might be removed in a later release of +Proxmox Backup Server. + +Settings for ``legacy-sendmail`` notification mode +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server +will send notification emails via the system's ``sendmail`` command to the email +address configured for the user set in the ``notify-user`` option +(falling back to ``root@pam`` if not set). + +For datastores, you can also change the level of notifications received per task +type via the ``notify`` option. + +* Always: send a notification for any scheduled task, independent of the + outcome + +* Errors: send a notification for any scheduled task that results in an error + +* Never: do not send any notification at all + +The ``notify-user`` and ``notify`` options are ignored if ``notification-mode`` +is set to ``notification-system``. diff --git a/docs/storage.rst b/docs/storage.rst index 4444c4230..f1e15d522 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -314,7 +314,7 @@ Options There are a few per-datastore options: -* :ref:`Notifications ` +* :ref:`Notification mode and legacy notification settings ` * :ref:`Maintenance Mode ` * Verification of incoming backups @@ -419,7 +419,7 @@ remote-source to avoid that an attacker that took over the source can cause deletions of backups on the target hosts. If the source-host became victim of a ransomware attack, there is a good chance that sync jobs will fail, triggering an :ref:`error notification -`. +`. It is also possible to create :ref:`tape backups ` as a second storage medium. This way, you get an additional copy of your data on a diff --git a/docs/sysadmin.rst b/docs/sysadmin.rst index 114ad4cbb..ec124d41a 100644 --- a/docs/sysadmin.rst +++ b/docs/sysadmin.rst @@ -30,6 +30,8 @@ please refer to the standard Debian documentation. .. include:: certificate-management.rst +.. include:: external-metric-server.rst + .. include:: services.rst .. include:: command-line-tools.rst diff --git a/docs/system-requirements.rst b/docs/system-requirements.rst index fb920865d..023003c3a 100644 --- a/docs/system-requirements.rst +++ b/docs/system-requirements.rst @@ -38,7 +38,8 @@ Recommended Server System Requirements * Backup storage: - * Use only SSDs, for best results + * Prefer fast storage that delivers high IOPS for random IO workloads; use + only enterprise SSDs for best results. * If HDDs are used: Using a metadata cache is highly recommended, for example, add a ZFS :ref:`special device mirror `. diff --git a/docs/technical-overview.rst b/docs/technical-overview.rst index 89835a7cc..f79deff38 100644 --- a/docs/technical-overview.rst +++ b/docs/technical-overview.rst @@ -28,6 +28,9 @@ which are not chunked, e.g. the client log), or one or more indexes When uploading an index, the client first has to read the source data, chunk it and send the data as chunks with their identifying checksum to the server. +When using the :ref:`change detection mode ` payload +chunks for unchanged files are reused from the previous snapshot, thereby not +reading the source data again. If there is a previous Snapshot in the backup group, the client can first download the chunk list of the previous Snapshot. If it detects a chunk that @@ -53,8 +56,9 @@ The chunks of a datastore are found in /.chunks/ -This chunk directory is further subdivided by the first four bytes of the -chunk's checksum, so a chunk with the checksum +This chunk directory is further subdivided into directories grouping chunks by +their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with +the checksum a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b diff --git a/docs/user-management.rst b/docs/user-management.rst index 9c425eb47..c670cbf6a 100644 --- a/docs/user-management.rst +++ b/docs/user-management.rst @@ -27,6 +27,9 @@ choose the realm when you add a new user. Possible realms are: :ldap: LDAP server. Users can authenticate against external LDAP servers. +:ad: Active Directory server. Users can authenticate against external Active + Directory servers. + After installation, there is a single user, ``root@pam``, which corresponds to the Unix superuser. User configuration information is stored in the file ``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager`` @@ -646,15 +649,47 @@ A full list of all configuration parameters can be found at :ref:`domains.cfg`. server, you must also add them as a user of that realm in Proxmox Backup Server. This can be carried out automatically with syncing. -User Synchronization in LDAP realms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. _user_realms_ad: -It is possible to automatically sync users for LDAP-based realms, rather than -having to add them to Proxmox VE manually. Synchronization options can be set -in the LDAP realm configuration dialog window in the GUI and via the -``proxmox-backup-manager ldap create/update`` command. -User synchronization can started in the GUI at -Configuration > Access Control > Realms by selecting a realm and pressing the -`Sync` button. In the sync dialog, some of the default options set in the realm -configuration can be overridden. Alternatively, user synchronization can also -be started via the ``proxmox-backup-manager ldap sync`` command. +Active Directory +~~~~~~~~~~~~~~~~ + +Proxmox Backup Server can also utilize external Microsoft Active Directory +servers for user authentication. +To achieve this, a realm of the type ``ad`` has to be configured. + +For an Active Directory realm, the authentication domain name and the server +address must be specified. Most options from :ref:`user_realms_ldap` apply to +Active Directory as well, most importantly the bind credentials ``bind-dn`` +and ``password``. This is typically required by default for Microsoft Active +Directory. The ``bind-dn`` can be specified either in AD-specific +``user@company.net`` syntax or the common LDAP-DN syntax. + +The authentication domain name must only be specified if anonymous bind is +requested. If bind credentials are given, the domain name is automatically +inferred from the bind users' base domain, as reported by the Active Directory +server. + +A full list of all configuration parameters can be found at :ref:`domains.cfg`. + +.. note:: In order to allow a particular user to authenticate using the Active + Directory server, you must also add them as a user of that realm in Proxmox + Backup Server. This can be carried out automatically with syncing. + +.. note:: Currently, case-insensitive usernames are not supported. + +User Synchronization in LDAP/AD realms +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +It is possible to automatically sync users for LDAP and AD-based realms, rather +than having to add them to Proxmox Backup Server manually. Synchronization +options can be set in the LDAP realm configuration dialog window in the GUI and +via the ``proxmox-backup-manager ldap`` and ``proxmox-backup-manager ad`` +commands, respectively. + +User synchronization can be started in the GUI under **Configuration > Access +Control > Realms** by selecting a realm and pressing the `Sync` button. In the +sync dialog, some of the default options set in the realm configuration can be +overridden. Alternatively, user synchronization can also be started via the +``proxmox-backup-manager ldap sync`` and ``proxmox-backup-manager ad sync`` +command, respectively. diff --git a/examples/tape-write-benchmark.rs b/examples/tape-write-benchmark.rs new file mode 100644 index 000000000..d5686e65a --- /dev/null +++ b/examples/tape-write-benchmark.rs @@ -0,0 +1,91 @@ +use std::{ + fs::File, + io::Read, + time::{Duration, SystemTime}, +}; + +use anyhow::{format_err, Error}; +use pbs_tape::TapeWrite; +use proxmox_backup::tape::drive::{LtoTapeHandle, TapeDriver}; + +const URANDOM_PATH: &str = "/dev/urandom"; +const CHUNK_SIZE: usize = 4 * 1024 * 1024; // 4 MiB +const LOG_LIMIT: usize = 4 * 1024 * 1024 * 1024; // 4 GiB + +fn write_chunks<'a>( + mut writer: Box, + blob_size: usize, + max_size: usize, + max_time: Duration, +) -> Result<(), Error> { + // prepare chunks in memory + + let mut blob: Vec = vec![0u8; blob_size]; + + let mut file = File::open(URANDOM_PATH)?; + file.read_exact(&mut blob[..])?; + + let start_time = SystemTime::now(); + loop { + let iteration_time = SystemTime::now(); + let mut count = 0; + let mut bytes_written = 0; + let mut idx = 0; + let mut incr_count = 0; + loop { + if writer.write_all(&blob)? { + eprintln!("LEOM reached"); + break; + } + + // modifying chunks a bit to mitigate compression/deduplication + blob[idx] = blob[idx].wrapping_add(1); + incr_count += 1; + if incr_count >= 256 { + incr_count = 0; + idx += 1; + } + count += 1; + bytes_written += blob_size; + + if bytes_written > max_size { + break; + } + } + + let elapsed = iteration_time.elapsed()?.as_secs_f64(); + let elapsed_total = start_time.elapsed()?; + eprintln!( + "{:.2}s: wrote {} chunks ({:.2} MB at {:.2} MB/s, average: {:.2} MB/s)", + elapsed_total.as_secs_f64(), + count, + bytes_written as f64 / 1_000_000.0, + (bytes_written as f64) / (1_000_000.0 * elapsed), + (writer.bytes_written() as f64) / (1_000_000.0 * elapsed_total.as_secs_f64()), + ); + + if elapsed_total > max_time { + break; + } + } + + Ok(()) +} +fn main() -> Result<(), Error> { + let mut args = std::env::args_os(); + args.next(); // binary name + let path = args.next().expect("no path to tape device given"); + let file = File::open(path).map_err(|err| format_err!("could not open tape device: {err}"))?; + let mut drive = LtoTapeHandle::new(file) + .map_err(|err| format_err!("error creating drive handle: {err}"))?; + write_chunks( + drive + .write_file() + .map_err(|err| format_err!("error starting file write: {err}"))?, + CHUNK_SIZE, + LOG_LIMIT, + Duration::new(60 * 20, 0), + ) + .map_err(|err| format_err!("error writing data to tape: {err}"))?; + Ok(()) +} diff --git a/examples/test_chunk_size.rs b/examples/test_chunk_size.rs index a01a5e640..2ebc22f64 100644 --- a/examples/test_chunk_size.rs +++ b/examples/test_chunk_size.rs @@ -5,10 +5,10 @@ extern crate proxmox_backup; use anyhow::Error; use std::io::{Read, Write}; -use pbs_datastore::Chunker; +use pbs_datastore::{Chunker, ChunkerImpl}; struct ChunkWriter { - chunker: Chunker, + chunker: ChunkerImpl, last_chunk: usize, chunk_offset: usize, @@ -23,7 +23,7 @@ struct ChunkWriter { impl ChunkWriter { fn new(chunk_size: usize) -> Self { ChunkWriter { - chunker: Chunker::new(chunk_size), + chunker: ChunkerImpl::new(chunk_size), last_chunk: 0, chunk_offset: 0, chunk_count: 0, @@ -69,7 +69,8 @@ impl Write for ChunkWriter { fn write(&mut self, data: &[u8]) -> std::result::Result { let chunker = &mut self.chunker; - let pos = chunker.scan(data); + let ctx = pbs_datastore::chunker::Context::default(); + let pos = chunker.scan(data, &ctx); if pos > 0 { self.chunk_offset += pos; diff --git a/examples/test_chunk_speed.rs b/examples/test_chunk_speed.rs index 37e13e0de..2d79604ab 100644 --- a/examples/test_chunk_speed.rs +++ b/examples/test_chunk_speed.rs @@ -1,6 +1,6 @@ extern crate proxmox_backup; -use pbs_datastore::Chunker; +use pbs_datastore::{Chunker, ChunkerImpl}; fn main() { let mut buffer = Vec::new(); @@ -12,7 +12,7 @@ fn main() { buffer.push(byte); } } - let mut chunker = Chunker::new(64 * 1024); + let mut chunker = ChunkerImpl::new(64 * 1024); let count = 5; @@ -23,8 +23,9 @@ fn main() { for _i in 0..count { let mut pos = 0; let mut _last = 0; + let ctx = pbs_datastore::chunker::Context::default(); while pos < buffer.len() { - let k = chunker.scan(&buffer[pos..]); + let k = chunker.scan(&buffer[pos..], &ctx); if k == 0 { //println!("LAST {}", pos); break; diff --git a/examples/test_chunk_speed2.rs b/examples/test_chunk_speed2.rs index 3f69b436d..ee349a53c 100644 --- a/examples/test_chunk_speed2.rs +++ b/examples/test_chunk_speed2.rs @@ -1,9 +1,10 @@ +use std::str::FromStr; + use anyhow::Error; use futures::*; -extern crate proxmox_backup; - use pbs_client::ChunkStream; +use proxmox_human_byte::HumanByte; // Test Chunker with real data read from a file. // @@ -21,12 +22,22 @@ fn main() { async fn run() -> Result<(), Error> { let file = tokio::fs::File::open("random-test.dat").await?; - let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) - .map_ok(|bytes| bytes.to_vec()) - .map_err(Error::from); + let mut args = std::env::args(); + args.next(); + + let buffer_size = args.next().unwrap_or("8k".to_string()); + let buffer_size = HumanByte::from_str(&buffer_size)?; + println!("Using buffer size {buffer_size}"); + + let stream = tokio_util::codec::FramedRead::with_capacity( + file, + tokio_util::codec::BytesCodec::new(), + buffer_size.as_u64() as usize, + ) + .map_err(Error::from); //let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024); - let mut chunk_stream = ChunkStream::new(stream, None); + let mut chunk_stream = ChunkStream::new(stream, None, None, None); let start_time = std::time::Instant::now(); @@ -40,7 +51,7 @@ async fn run() -> Result<(), Error> { repeat += 1; stream_len += chunk.len(); - println!("Got chunk {}", chunk.len()); + //println!("Got chunk {}", chunk.len()); } let speed = diff --git a/examples/upload-speed.rs b/examples/upload-speed.rs index f9fc52a85..e4b570ec5 100644 --- a/examples/upload-speed.rs +++ b/examples/upload-speed.rs @@ -18,7 +18,7 @@ async fn upload_speed() -> Result { let backup_time = proxmox_time::epoch_i64(); let client = BackupWriter::start( - client, + &client, None, datastore, &BackupNamespace::root(), diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml index 94ab583b1..17c946fe3 100644 --- a/pbs-api-types/Cargo.toml +++ b/pbs-api-types/Cargo.toml @@ -9,13 +9,13 @@ description = "general API type helpers for PBS" anyhow.workspace = true const_format.workspace = true hex.workspace = true -lazy_static.workspace = true percent-encoding.workspace = true regex.workspace = true serde.workspace = true serde_plain.workspace = true proxmox-auth-api = { workspace = true, features = [ "api-types" ] } +proxmox-apt-api-types.workspace = true proxmox-human-byte.workspace = true proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index ef6398629..a8ae57a9d 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -223,7 +223,7 @@ pub enum Role { RemoteAudit = ROLE_REMOTE_AUDIT, /// Remote Administrator RemoteAdmin = ROLE_REMOTE_ADMIN, - /// Syncronisation Opertator + /// Synchronization Operator RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, /// Tape Auditor TapeAudit = ROLE_TAPE_AUDIT, diff --git a/pbs-api-types/src/ad.rs b/pbs-api-types/src/ad.rs new file mode 100644 index 000000000..910571a03 --- /dev/null +++ b/pbs-api-types/src/ad.rs @@ -0,0 +1,98 @@ +use serde::{Deserialize, Serialize}; + +use proxmox_schema::{api, Updater}; + +use super::{ + LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, + SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA, +}; + +#[api( + properties: { + "realm": { + schema: REALM_ID_SCHEMA, + }, + "comment": { + optional: true, + schema: SINGLE_LINE_COMMENT_SCHEMA, + }, + "verify": { + optional: true, + default: false, + }, + "sync-defaults-options": { + schema: SYNC_DEFAULTS_STRING_SCHEMA, + optional: true, + }, + "sync-attributes": { + schema: SYNC_ATTRIBUTES_SCHEMA, + optional: true, + }, + "user-classes" : { + optional: true, + schema: USER_CLASSES_SCHEMA, + }, + "base-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + optional: true, + }, + "bind-dn" : { + schema: LDAP_DOMAIN_SCHEMA, + optional: true, + } + }, +)] +#[derive(Serialize, Deserialize, Updater, Clone)] +#[serde(rename_all = "kebab-case")] +/// AD realm configuration properties. +pub struct AdRealmConfig { + #[updater(skip)] + pub realm: String, + /// AD server address + pub server1: String, + /// Fallback AD server address + #[serde(skip_serializing_if = "Option::is_none")] + pub server2: Option, + /// AD server Port + #[serde(skip_serializing_if = "Option::is_none")] + pub port: Option, + /// Base domain name. Users are searched under this domain using a `subtree search`. + /// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be + /// overridden if the need arises. + #[serde(skip_serializing_if = "Option::is_none")] + pub base_dn: Option, + /// Comment + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// Connection security + #[serde(skip_serializing_if = "Option::is_none")] + pub mode: Option, + /// Verify server certificate + #[serde(skip_serializing_if = "Option::is_none")] + pub verify: Option, + /// CA certificate to use for the server. The path can point to + /// either a file, or a directory. If it points to a file, + /// the PEM-formatted X.509 certificate stored at the path + /// will be added as a trusted certificate. + /// If the path points to a directory, + /// the directory replaces the system's default certificate + /// store at `/etc/ssl/certs` - Every file in the directory + /// will be loaded as a trusted certificate. + #[serde(skip_serializing_if = "Option::is_none")] + pub capath: Option, + /// Bind domain to use for looking up users + #[serde(skip_serializing_if = "Option::is_none")] + pub bind_dn: Option, + /// Custom LDAP search filter for user sync + #[serde(skip_serializing_if = "Option::is_none")] + pub filter: Option, + /// Default options for AD sync + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_defaults_options: Option, + /// List of LDAP attributes to sync from AD to user config + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_attributes: Option, + /// User ``objectClass`` classes to sync + #[serde(skip_serializing_if = "Option::is_none")] + pub user_classes: Option, +} diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 5e13c157e..31767417a 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -11,8 +11,8 @@ use proxmox_schema::{ }; use crate::{ - Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, BACKUP_ID_RE, - BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, + Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, MaintenanceType, Userid, + BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, SNAPSHOT_PATH_REGEX_STR, UPID, @@ -309,6 +309,10 @@ pub struct DataStoreConfig { #[serde(skip_serializing_if = "Option::is_none")] pub notify: Option, + /// Opt in to the new notification system + #[serde(skip_serializing_if = "Option::is_none")] + pub notification_mode: Option, + /// Datastore tuning options #[serde(skip_serializing_if = "Option::is_none")] pub tuning: Option, @@ -318,6 +322,23 @@ pub struct DataStoreConfig { pub maintenance_mode: Option, } +#[api] +#[derive(Serialize, Deserialize, Updater, Clone, PartialEq, Default)] +#[serde(rename_all = "kebab-case")] +/// Configure how notifications for this datastore should be sent. +/// `legacy-sendmail` sends email notifications to the user configured +/// in `notify-user` via the system's `sendmail` executable. +/// `notification-system` emits matchable notification events to the +/// notification system. +pub enum NotificationMode { + /// Send notifications via the system's sendmail command to the user + /// configured in `notify-user` + #[default] + LegacySendmail, + /// Emit notification events to the notification system + NotificationSystem, +} + impl DataStoreConfig { pub fn new(name: String, path: String) -> Self { Self { @@ -330,16 +351,51 @@ impl DataStoreConfig { verify_new: None, notify_user: None, notify: None, + notification_mode: None, tuning: None, maintenance_mode: None, } } pub fn get_maintenance_mode(&self) -> Option { - self.maintenance_mode - .as_ref() - .and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok()) - .and_then(|value| MaintenanceMode::deserialize(value).ok()) + self.maintenance_mode.as_ref().and_then(|str| { + MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new( + str, + &MaintenanceMode::API_SCHEMA, + )) + .ok() + }) + } + + pub fn set_maintenance_mode(&mut self, new_mode: Option) -> Result<(), Error> { + let current_type = self.get_maintenance_mode().map(|mode| mode.ty); + let new_type = new_mode.as_ref().map(|mode| mode.ty); + + match current_type { + Some(MaintenanceType::ReadOnly) => { /* always OK */ } + Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Delete) => { + match new_type { + Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } + _ => { + bail!("datastore is being deleted") + } + } + } + None => { /* always OK */ } + } + + let new_mode = match new_mode { + Some(new_mode) => Some( + proxmox_schema::property_string::PropertyString::new(new_mode) + .to_property_string()?, + ), + None => None, + }; + + self.maintenance_mode = new_mode; + + Ok(()) } } @@ -1246,7 +1302,7 @@ pub struct TypeCounts { }, }, )] -#[derive(Clone, Default, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] #[serde(rename_all = "kebab-case")] /// Garbage collection status. pub struct GarbageCollectionStatus { @@ -1273,6 +1329,38 @@ pub struct GarbageCollectionStatus { pub still_bad: usize, } +#[api( + properties: { + "status": { + type: GarbageCollectionStatus, + }, + } +)] +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "kebab-case")] +/// Garbage Collection general info +pub struct GarbageCollectionJobStatus { + /// Datastore + pub store: String, + #[serde(flatten)] + pub status: GarbageCollectionStatus, + /// Schedule of the gc job + #[serde(skip_serializing_if = "Option::is_none")] + pub schedule: Option, + /// Time of the next gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub next_run: Option, + /// Endtime of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_endtime: Option, + /// State of the last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub last_run_state: Option, + /// Duration of last gc run + #[serde(skip_serializing_if = "Option::is_none")] + pub duration: Option, +} + #[api( properties: { "gc-status": { diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 6fb9b187d..868702bc0 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize}; use proxmox_schema::*; use crate::{ - Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, + Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid, + BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, + DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA, }; @@ -325,6 +325,8 @@ pub struct TapeBackupJobSetup { #[serde(skip_serializing_if = "Option::is_none")] pub notify_user: Option, #[serde(skip_serializing_if = "Option::is_none")] + pub notification_mode: Option, + #[serde(skip_serializing_if = "Option::is_none")] pub group_filter: Option>, #[serde(skip_serializing_if = "Option::is_none", default)] pub ns: Option, diff --git a/pbs-api-types/src/ldap.rs b/pbs-api-types/src/ldap.rs index f3df90a09..a3e0407b5 100644 --- a/pbs-api-types/src/ldap.rs +++ b/pbs-api-types/src/ldap.rs @@ -149,7 +149,7 @@ pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults .schema(); const REMOVE_VANISHED_DESCRIPTION: &str = - "A semicolon-seperated list of things to remove when they or the user \ + "A semicolon-separated list of things to remove when they or the user \ vanishes during user synchronization. The following values are possible: ``entry`` removes the \ user when not returned from the sync; ``properties`` removes any \ properties on existing user that do not appear in the source. \ diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 7eb836ed8..635292a54 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -52,6 +52,13 @@ pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA}; use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR}; +// re-export APT API types +pub use proxmox_apt_api_types::{ + APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile, + APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository, + APTUpdateInfo, APTUpdateOptions, +}; + #[rustfmt::skip] pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*"; @@ -130,6 +137,9 @@ pub use openid::*; mod ldap; pub use ldap::*; +mod ad; +pub use ad::*; + mod remote; pub use remote::*; @@ -246,34 +256,6 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.") .max_length(64) .schema(); -#[api()] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "PascalCase")] -/// Describes a package for which an update is available. -pub struct APTUpdateInfo { - /// Package name - pub package: String, - /// Package title - pub title: String, - /// Package architecture - pub arch: String, - /// Human readable package description - pub description: String, - /// New version to be updated to - pub version: String, - /// Old version currently installed - pub old_version: String, - /// Package origin - pub origin: String, - /// Package priority in human-readable form - pub priority: String, - /// Package section - pub section: String, - /// Custom extra field for additional package information - #[serde(skip_serializing_if = "Option::is_none")] - pub extra_info: Option, -} - #[api()] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -335,36 +317,6 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType { schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(), }; -#[api()] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "UPPERCASE")] -/// RRD consolidation mode -pub enum RRDMode { - /// Maximum - Max, - /// Average - Average, -} - -#[api()] -#[derive(Copy, Clone, Serialize, Deserialize)] -#[serde(rename_all = "lowercase")] -/// RRD time frame -pub enum RRDTimeFrame { - /// Hour - Hour, - /// Day - Day, - /// Week - Week, - /// Month - Month, - /// Year - Year, - /// Decade (10 years) - Decade, -} - #[api] #[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)] #[serde(rename_all = "lowercase")] @@ -378,8 +330,13 @@ pub enum RealmType { OpenId, /// An LDAP realm Ldap, + /// An Active Directory (AD) realm + Ad, } +serde_plain::derive_display_from_serialize!(RealmType); +serde_plain::derive_fromstr_from_deserialize!(RealmType); + #[api( properties: { realm: { diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index a605cc17a..1e3413dca 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -33,7 +33,7 @@ pub enum Operation { } #[api] -#[derive(Deserialize, Serialize, PartialEq, Eq)] +#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] /// Maintenance type. pub enum MaintenanceType { @@ -69,11 +69,11 @@ serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); pub struct MaintenanceMode { /// Type of maintenance ("read-only" or "offline"). #[serde(rename = "type")] - ty: MaintenanceType, + pub ty: MaintenanceType, /// Reason for maintenance. #[serde(skip_serializing_if = "Option::is_none")] - message: Option, + pub message: Option, } impl MaintenanceMode { diff --git a/pbs-api-types/src/metrics.rs b/pbs-api-types/src/metrics.rs index 6800c23be..262665292 100644 --- a/pbs-api-types/src/metrics.rs +++ b/pbs-api-types/src/metrics.rs @@ -12,14 +12,12 @@ pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID .schema(); pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.") - .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) .max_length(32) .default("proxmox") .schema(); pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.") - .format(&PROXMOX_SAFE_ID_FORMAT) .min_length(3) .max_length(32) .default("proxmox") @@ -129,13 +127,14 @@ pub struct InfluxDbHttp { pub enable: bool, /// The base url of the influxdb server pub url: String, - /// The Optional Token #[serde(skip_serializing_if = "Option::is_none")] /// The (optional) API token pub token: Option, #[serde(skip_serializing_if = "Option::is_none")] + /// Named location where time series data is stored pub bucket: Option, #[serde(skip_serializing_if = "Option::is_none")] + /// Workspace for a group of users pub organization: Option, #[serde(skip_serializing_if = "Option::is_none")] /// The (optional) maximum body size @@ -188,3 +187,69 @@ pub struct MetricServerInfo { #[serde(skip_serializing_if = "Option::is_none")] pub comment: Option, } + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[api( + properties: { + data: { + type: Array, + items: { + type: MetricDataPoint, + } + } + } +)] +/// Return type for the metric API endpoint +pub struct Metrics { + /// List of metric data points, sorted by timestamp + pub data: Vec, +} + +#[api( + properties: { + id: { + type: String, + }, + metric: { + type: String, + }, + timestamp: { + type: Integer, + }, + }, +)] +/// Metric data point +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct MetricDataPoint { + /// Unique identifier for this metric object, for instance 'node/' + /// or 'qemu/'. + pub id: String, + + /// Name of the metric. + pub metric: String, + + /// Time at which this metric was observed + pub timestamp: i64, + + #[serde(rename = "type")] + pub ty: MetricDataType, + + /// Metric value. + pub value: f64, +} + +#[api] +/// Type of the metric. +#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)] +#[serde(rename_all = "lowercase")] +pub enum MetricDataType { + /// gauge. + Gauge, + /// counter. + Counter, + /// derive. + Derive, +} + +serde_plain::derive_display_from_serialize!(MetricDataType); +serde_plain::derive_fromstr_from_deserialize!(MetricDataType); diff --git a/pbs-api-types/src/network.rs b/pbs-api-types/src/network.rs index e3a5e4815..fe083dc6c 100644 --- a/pbs-api-types/src/network.rs +++ b/pbs-api-types/src/network.rs @@ -224,6 +224,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema = schema: NETWORK_INTERFACE_ARRAY_SCHEMA, optional: true, }, + "vlan-id": { + description: "VLAN ID.", + type: u16, + optional: true, + }, + "vlan-raw-device": { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + optional: true, + }, bond_mode: { type: LinuxBondMode, optional: true, @@ -287,6 +296,12 @@ pub struct Interface { /// Enable bridge vlan support. #[serde(skip_serializing_if = "Option::is_none")] pub bridge_vlan_aware: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "vlan-id")] + pub vlan_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "vlan-raw-device")] + pub vlan_raw_device: Option, #[serde(skip_serializing_if = "Option::is_none")] pub slaves: Option>, @@ -319,6 +334,8 @@ impl Interface { mtu: None, bridge_ports: None, bridge_vlan_aware: None, + vlan_id: None, + vlan_raw_device: None, slaves: None, bond_mode: None, bond_primary: None, diff --git a/pbs-api-types/src/tape/drive.rs b/pbs-api-types/src/tape/drive.rs index 626c5d9c3..2b788bd67 100644 --- a/pbs-api-types/src/tape/drive.rs +++ b/pbs-api-types/src/tape/drive.rs @@ -93,6 +93,9 @@ pub struct DriveListEntry { /// the state of the drive if locked #[serde(skip_serializing_if = "Option::is_none")] pub state: Option, + /// Current device activity + #[serde(skip_serializing_if = "Option::is_none")] + pub activity: Option, } #[api()] @@ -216,6 +219,9 @@ pub struct LtoDriveAndMediaStatus { /// Estimated tape wearout factor (assuming max. 16000 end-to-end passes) #[serde(skip_serializing_if = "Option::is_none")] pub medium_wearout: Option, + /// Current device activity + #[serde(skip_serializing_if = "Option::is_none")] + pub drive_activity: Option, } #[api()] @@ -276,3 +282,68 @@ pub struct Lp17VolumeStatistics { /// Volume serial number pub serial: String, } + +/// The DT Device Activity from DT Device Status LP page +#[api] +#[derive(Copy, Clone, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub enum DeviceActivity { + /// No activity + NoActivity, + /// Cleaning + Cleaning, + /// Loading + Loading, + /// Unloading + Unloading, + /// Other unspecified activity + Other, + /// Reading + Reading, + /// Writing + Writing, + /// Locating + Locating, + /// Rewinding + Rewinding, + /// Erasing + Erasing, + /// Formatting + Formatting, + /// Calibrating + Calibrating, + /// Other (DT) + OtherDT, + /// Updating microcode + MicrocodeUpdate, + /// Reading encrypted data + ReadingEncrypted, + /// Writing encrypted data + WritingEncrypted, +} + +impl TryFrom for DeviceActivity { + type Error = Error; + + fn try_from(value: u8) -> Result { + Ok(match value { + 0x00 => DeviceActivity::NoActivity, + 0x01 => DeviceActivity::Cleaning, + 0x02 => DeviceActivity::Loading, + 0x03 => DeviceActivity::Unloading, + 0x04 => DeviceActivity::Other, + 0x05 => DeviceActivity::Reading, + 0x06 => DeviceActivity::Writing, + 0x07 => DeviceActivity::Locating, + 0x08 => DeviceActivity::Rewinding, + 0x09 => DeviceActivity::Erasing, + 0x0A => DeviceActivity::Formatting, + 0x0B => DeviceActivity::Calibrating, + 0x0C => DeviceActivity::OtherDT, + 0x0D => DeviceActivity::MicrocodeUpdate, + 0x0E => DeviceActivity::ReadingEncrypted, + 0x0F => DeviceActivity::WritingEncrypted, + other => bail!("invalid DT device activity value: {:x}", other), + }) + } +} diff --git a/pbs-api-types/src/tape/media.rs b/pbs-api-types/src/tape/media.rs index 6792cd3c9..6227f4634 100644 --- a/pbs-api-types/src/tape/media.rs +++ b/pbs-api-types/src/tape/media.rs @@ -81,6 +81,9 @@ pub struct MediaListEntry { /// Media Pool #[serde(skip_serializing_if = "Option::is_none")] pub pool: Option, + #[serde(skip_serializing_if = "Option::is_none")] + /// Bytes currently used + pub bytes_used: Option, } #[api( diff --git a/pbs-api-types/src/traffic_control.rs b/pbs-api-types/src/traffic_control.rs index fb264531e..0da327f2c 100644 --- a/pbs-api-types/src/traffic_control.rs +++ b/pbs-api-types/src/traffic_control.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use proxmox_human_byte::HumanByte; -use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater}; +use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater}; use crate::{ CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA, @@ -18,16 +18,6 @@ pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.") .max_length(32) .schema(); -pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema = - IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.") - .minimum(100_000) - .schema(); - -pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema = - IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.") - .minimum(1000) - .schema(); - #[api( properties: { "rate-in": { @@ -71,6 +61,45 @@ impl RateLimitConfig { burst_out: burst, } } + + /// Create a [RateLimitConfig] from a [ClientRateLimitConfig] + pub fn from_client_config(limit: ClientRateLimitConfig) -> Self { + Self::with_same_inout(limit.rate, limit.burst) + } +} + +const CLIENT_RATE_LIMIT_SCHEMA: Schema = StringSchema { + description: "Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", + ..*HumanByte::API_SCHEMA.unwrap_string_schema() +} +.schema(); + +const CLIENT_BURST_SCHEMA: Schema = StringSchema { + description: "Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).", + ..*HumanByte::API_SCHEMA.unwrap_string_schema() +} +.schema(); + +#[api( + properties: { + rate: { + schema: CLIENT_RATE_LIMIT_SCHEMA, + optional: true, + }, + burst: { + schema: CLIENT_BURST_SCHEMA, + optional: true, + }, + }, +)] +#[derive(Serialize, Deserialize, Default, Clone)] +#[serde(rename_all = "kebab-case")] +/// Client Rate Limit Configuration +pub struct ClientRateLimitConfig { + #[serde(skip_serializing_if = "Option::is_none")] + rate: Option, + #[serde(skip_serializing_if = "Option::is_none")] + burst: Option, } #[api( diff --git a/pbs-buildcfg/src/lib.rs b/pbs-buildcfg/src/lib.rs index ede3b1d13..b77c8fc6d 100644 --- a/pbs-buildcfg/src/lib.rs +++ b/pbs-buildcfg/src/lib.rs @@ -1,5 +1,11 @@ //! Exports configuration data from the build system +pub const PROXMOX_BACKUP_CRATE_VERSION: &str = env!("CARGO_PKG_VERSION"); + +// TODO: clean-up, drop the RELEASE one, should not be required on its own and if it would be just +// the X.Y part, also add the Debian package revision (extracted through build.rs) in an existing +// or new constant. + pub const PROXMOX_PKG_VERSION: &str = concat!( env!("CARGO_PKG_VERSION_MAJOR"), ".", @@ -90,6 +96,8 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str = pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription"); +pub const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json"); + /// Prepend configuration directory to a file name /// /// This is a simply way to get the full path for configuration files. diff --git a/pbs-client/Cargo.toml b/pbs-client/Cargo.toml index ed7d651de..00c18b83a 100644 --- a/pbs-client/Cargo.toml +++ b/pbs-client/Cargo.toml @@ -14,7 +14,6 @@ h2.workspace = true hex.workspace = true http.workspace = true hyper.workspace = true -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true @@ -39,7 +38,6 @@ proxmox-compression.workspace = true proxmox-http = { workspace = true, features = [ "rate-limiter" ] } proxmox-human-byte.workspace = true proxmox-io = { workspace = true, features = [ "tokio" ] } -proxmox-lang.workspace = true proxmox-router = { workspace = true, features = [ "cli", "server" ] } proxmox-schema.workspace = true proxmox-sys.workspace = true @@ -48,6 +46,5 @@ proxmox-time.workspace = true pxar.workspace = true pbs-api-types.workspace = true -pbs-buildcfg.workspace = true pbs-datastore.workspace = true pbs-tools.workspace = true diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs index 36d8ebcf3..4706abc78 100644 --- a/pbs-client/src/backup_reader.rs +++ b/pbs-client/src/backup_reader.rs @@ -1,7 +1,6 @@ use anyhow::{format_err, Error}; use std::fs::File; use std::io::{Seek, SeekFrom, Write}; -use std::os::unix::fs::OpenOptionsExt; use std::sync::Arc; use futures::future::AbortHandle; @@ -141,18 +140,14 @@ impl BackupReader { /// Download a .blob file /// - /// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using - /// the provided manifest. + /// This creates a temporary file (See [`crate::tools::create_tmp_file`] for + /// details). The data is verified using the provided manifest. pub async fn download_blob( &self, manifest: &BackupManifest, name: &str, ) -> Result, Error> { - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = crate::tools::create_tmp_file()?; self.download(name, &mut tmpfile).await?; @@ -167,18 +162,14 @@ impl BackupReader { /// Download dynamic index file /// - /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using - /// the provided manifest. + /// This creates a temporary file (See [`crate::tools::create_tmp_file`] for + /// details). The index is verified using the provided manifest. pub async fn download_dynamic_index( &self, manifest: &BackupManifest, name: &str, ) -> Result { - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = crate::tools::create_tmp_file()?; self.download(name, &mut tmpfile).await?; @@ -194,18 +185,14 @@ impl BackupReader { /// Download fixed index file /// - /// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using - /// the provided manifest. + /// This creates a temporary file (See [`crate::tools::create_tmp_file`] for + /// details). The index is verified using the provided manifest. pub async fn download_fixed_index( &self, manifest: &BackupManifest, name: &str, ) -> Result { - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = crate::tools::create_tmp_file()?; self.download(name, &mut tmpfile).await?; diff --git a/pbs-client/src/backup_specification.rs b/pbs-client/src/backup_specification.rs index 619a3a9da..cd9e34243 100644 --- a/pbs-client/src/backup_specification.rs +++ b/pbs-client/src/backup_specification.rs @@ -1,4 +1,5 @@ use anyhow::{bail, Error}; +use serde::{Deserialize, Serialize}; use proxmox_schema::*; @@ -45,3 +46,28 @@ pub fn parse_backup_specification(value: &str) -> Result bool { + matches!(self, Self::Data) + } + /// Selected mode is metadata based file change detection + pub fn is_metadata(&self) -> bool { + matches!(self, Self::Metadata) + } +} diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 8a03d8ea6..4d2e8a801 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -1,6 +1,5 @@ use std::collections::HashSet; use std::future::Future; -use std::os::unix::fs::OpenOptionsExt; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; @@ -22,7 +21,9 @@ use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; use proxmox_human_byte::HumanByte; +use proxmox_time::TimeSpan; +use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo}; use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo}; use super::{H2Client, HttpClient}; @@ -56,14 +57,21 @@ pub struct UploadOptions { struct UploadStats { chunk_count: usize, chunk_reused: usize, + chunk_injected: usize, size: usize, size_reused: usize, + size_injected: usize, size_compressed: usize, duration: std::time::Duration, csum: [u8; 32], } -type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option)>; +struct ChunkUploadResponse { + future: h2::client::ResponseFuture, + size: usize, +} + +type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option)>; type UploadResultReceiver = oneshot::Receiver>; impl BackupWriter { @@ -78,7 +86,7 @@ impl BackupWriter { // FIXME: extract into (flattened) parameter struct? #[allow(clippy::too_many_arguments)] pub async fn start( - client: HttpClient, + client: &HttpClient, crypt_config: Option>, datastore: &str, ns: &BackupNamespace, @@ -265,6 +273,7 @@ impl BackupWriter { archive_name: &str, stream: impl Stream>, options: UploadOptions, + injections: Option>, ) -> Result { let known_chunks = Arc::new(Mutex::new(HashSet::new())); @@ -329,6 +338,12 @@ impl BackupWriter { .as_u64() .unwrap(); + let archive = if log::log_enabled!(log::Level::Debug) { + archive_name + } else { + pbs_tools::format::strip_server_file_extension(archive_name) + }; + let upload_stats = Self::upload_chunk_info_stream( self.h2.clone(), wid, @@ -341,16 +356,21 @@ impl BackupWriter { None }, options.compress, + injections, + archive, ) .await?; let size_dirty = upload_stats.size - upload_stats.size_reused; let size: HumanByte = upload_stats.size.into(); - let archive = if log::log_enabled!(log::Level::Debug) { - archive_name - } else { - pbs_tools::format::strip_server_file_extension(archive_name) - }; + + if upload_stats.chunk_injected > 0 { + log::info!( + "{archive}: reused {} from previous snapshot for unchanged files ({} chunks)", + HumanByte::from(upload_stats.size_injected), + upload_stats.chunk_injected, + ); + } if archive_name != CATALOG_NAME { let speed: HumanByte = @@ -358,14 +378,9 @@ impl BackupWriter { let size_dirty: HumanByte = size_dirty.into(); let size_compressed: HumanByte = upload_stats.size_compressed.into(); log::info!( - "{}: had to backup {} of {} (compressed {}) in {:.2}s", - archive, - size_dirty, - size, - size_compressed, + "{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)", upload_stats.duration.as_secs_f64() ); - log::info!("{}: average backup speed: {}/s", archive, speed); } else { log::info!("Uploaded backup catalog ({})", size); } @@ -455,6 +470,7 @@ impl BackupWriter { h2: H2Client, wid: u64, path: String, + uploaded: Arc, ) -> (UploadQueueSender, UploadResultReceiver) { let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64); let (verify_result_tx, verify_result_rx) = oneshot::channel(); @@ -463,15 +479,21 @@ impl BackupWriter { tokio::spawn( ReceiverStream::new(verify_queue_rx) .map(Ok::<_, Error>) - .and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option)| { + .and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option)| { match (response, merged_chunk_info) { (Some(response), MergedChunkInfo::Known(list)) => { Either::Left( response + .future .map_err(Error::from) .and_then(H2Client::h2api_response) - .and_then(move |_result| { - future::ok(MergedChunkInfo::Known(list)) + .and_then({ + let uploaded = uploaded.clone(); + move |_result| { + // account for uploaded bytes for progress output + uploaded.fetch_add(response.size, Ordering::SeqCst); + future::ok(MergedChunkInfo::Known(list)) + } }) ) } @@ -523,11 +545,7 @@ impl BackupWriter { manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = crate::tools::create_tmp_file()?; let param = json!({ "archive-name": archive_name }); self.h2 @@ -562,20 +580,15 @@ impl BackupWriter { manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = crate::tools::create_tmp_file()?; let param = json!({ "archive-name": archive_name }); self.h2 .download("previous", Some(param), &mut tmpfile) .await?; - let index = DynamicIndexReader::new(tmpfile).map_err(|err| { - format_err!("unable to read dynmamic index '{}' - {}", archive_name, err) - })?; + let index = DynamicIndexReader::new(tmpfile) + .map_err(|err| format_err!("unable to read dynamic index '{archive_name}' - {err}"))?; // Note: do not use values stored in index (not trusted) - instead, computed them again let (csum, size) = index.compute_csum(); manifest.verify_file(archive_name, &csum, size)?; @@ -637,74 +650,127 @@ impl BackupWriter { known_chunks: Arc>>, crypt_config: Option>, compress: bool, + injections: Option>, + archive: &str, ) -> impl Future> { let total_chunks = Arc::new(AtomicUsize::new(0)); let total_chunks2 = total_chunks.clone(); let known_chunk_count = Arc::new(AtomicUsize::new(0)); let known_chunk_count2 = known_chunk_count.clone(); + let injected_chunk_count = Arc::new(AtomicUsize::new(0)); + let injected_chunk_count2 = injected_chunk_count.clone(); let stream_len = Arc::new(AtomicUsize::new(0)); let stream_len2 = stream_len.clone(); + let stream_len3 = stream_len.clone(); let compressed_stream_len = Arc::new(AtomicU64::new(0)); let compressed_stream_len2 = compressed_stream_len.clone(); let reused_len = Arc::new(AtomicUsize::new(0)); let reused_len2 = reused_len.clone(); + let injected_len = Arc::new(AtomicUsize::new(0)); + let injected_len2 = injected_len.clone(); + let uploaded_len = Arc::new(AtomicUsize::new(0)); let append_chunk_path = format!("{}_index", prefix); let upload_chunk_path = format!("{}_chunk", prefix); let is_fixed_chunk_size = prefix == "fixed"; let (upload_queue, upload_result) = - Self::append_chunk_queue(h2.clone(), wid, append_chunk_path); + Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone()); let start_time = std::time::Instant::now(); let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new()))); let index_csum_2 = index_csum.clone(); + let progress_handle = if archive.ends_with(".img") + || archive.ends_with(".pxar") + || archive.ends_with(".ppxar") + { + Some(tokio::spawn(async move { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + + let size = HumanByte::from(stream_len3.load(Ordering::SeqCst)); + let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst)); + let elapsed = TimeSpan::from(start_time.elapsed()); + + log::info!("processed {size} in {elapsed}, uploaded {size_uploaded}"); + } + })) + } else { + None + }; + stream - .and_then(move |data| { - let chunk_len = data.len(); + .inject_reused_chunks(injections, stream_len.clone()) + .and_then(move |chunk_info| match chunk_info { + InjectedChunksInfo::Known(chunks) => { + // account for injected chunks + let count = chunks.len(); + total_chunks.fetch_add(count, Ordering::SeqCst); + injected_chunk_count.fetch_add(count, Ordering::SeqCst); - total_chunks.fetch_add(1, Ordering::SeqCst); - let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64; - - let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress); - - if let Some(ref crypt_config) = crypt_config { - chunk_builder = chunk_builder.crypt_config(crypt_config); + let mut known = Vec::new(); + let mut guard = index_csum.lock().unwrap(); + let csum = guard.as_mut().unwrap(); + for chunk in chunks { + let offset = + stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64; + reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); + injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); + let digest = chunk.digest(); + known.push((offset, digest)); + let end_offset = offset + chunk.size(); + csum.update(&end_offset.to_le_bytes()); + csum.update(&digest); + } + future::ok(MergedChunkInfo::Known(known)) } + InjectedChunksInfo::Raw(data) => { + // account for not injected chunks (new and known) + let chunk_len = data.len(); - let mut known_chunks = known_chunks.lock().unwrap(); - let digest = chunk_builder.digest(); + total_chunks.fetch_add(1, Ordering::SeqCst); + let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64; - let mut guard = index_csum.lock().unwrap(); - let csum = guard.as_mut().unwrap(); + let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress); - let chunk_end = offset + chunk_len as u64; + if let Some(ref crypt_config) = crypt_config { + chunk_builder = chunk_builder.crypt_config(crypt_config); + } - if !is_fixed_chunk_size { - csum.update(&chunk_end.to_le_bytes()); - } - csum.update(digest); + let mut known_chunks = known_chunks.lock().unwrap(); + let digest = chunk_builder.digest(); - let chunk_is_known = known_chunks.contains(digest); - if chunk_is_known { - known_chunk_count.fetch_add(1, Ordering::SeqCst); - reused_len.fetch_add(chunk_len, Ordering::SeqCst); - future::ok(MergedChunkInfo::Known(vec![(offset, *digest)])) - } else { - let compressed_stream_len2 = compressed_stream_len.clone(); - known_chunks.insert(*digest); - future::ready(chunk_builder.build().map(move |(chunk, digest)| { - compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst); - MergedChunkInfo::New(ChunkInfo { - chunk, - digest, - chunk_len: chunk_len as u64, - offset, - }) - })) + let mut guard = index_csum.lock().unwrap(); + let csum = guard.as_mut().unwrap(); + + let chunk_end = offset + chunk_len as u64; + + if !is_fixed_chunk_size { + csum.update(&chunk_end.to_le_bytes()); + } + csum.update(digest); + + let chunk_is_known = known_chunks.contains(digest); + if chunk_is_known { + known_chunk_count.fetch_add(1, Ordering::SeqCst); + reused_len.fetch_add(chunk_len, Ordering::SeqCst); + future::ok(MergedChunkInfo::Known(vec![(offset, *digest)])) + } else { + let compressed_stream_len2 = compressed_stream_len.clone(); + known_chunks.insert(*digest); + future::ready(chunk_builder.build().map(move |(chunk, digest)| { + compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst); + MergedChunkInfo::New(ChunkInfo { + chunk, + digest, + chunk_len: chunk_len as u64, + offset, + }) + })) + } } }) .merge_known_chunks() @@ -747,7 +813,13 @@ impl BackupWriter { Either::Left(h2.send_request(request, upload_data).and_then( move |response| async move { upload_queue - .send((new_info, Some(response))) + .send(( + new_info, + Some(ChunkUploadResponse { + future: response, + size: chunk_info.chunk_len as usize, + }), + )) .await .map_err(|err| { format_err!("failed to send to upload queue: {}", err) @@ -768,18 +840,26 @@ impl BackupWriter { let duration = start_time.elapsed(); let chunk_count = total_chunks2.load(Ordering::SeqCst); let chunk_reused = known_chunk_count2.load(Ordering::SeqCst); + let chunk_injected = injected_chunk_count2.load(Ordering::SeqCst); let size = stream_len2.load(Ordering::SeqCst); let size_reused = reused_len2.load(Ordering::SeqCst); + let size_injected = injected_len2.load(Ordering::SeqCst); let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize; let mut guard = index_csum_2.lock().unwrap(); let csum = guard.take().unwrap().finish(); + if let Some(handle) = progress_handle { + handle.abort(); + } + futures::future::ok(UploadStats { chunk_count, chunk_reused, + chunk_injected, size, size_reused, + size_injected, size_compressed, duration, csum, diff --git a/pbs-client/src/chunk_stream.rs b/pbs-client/src/chunk_stream.rs index 895f6eae2..e3f0980c6 100644 --- a/pbs-client/src/chunk_stream.rs +++ b/pbs-client/src/chunk_stream.rs @@ -1,4 +1,5 @@ use std::pin::Pin; +use std::sync::mpsc; use std::task::{Context, Poll}; use anyhow::Error; @@ -6,23 +7,59 @@ use bytes::BytesMut; use futures::ready; use futures::stream::{Stream, TryStream}; -use pbs_datastore::Chunker; +use pbs_datastore::{Chunker, ChunkerImpl, PayloadChunker}; + +use crate::inject_reused_chunks::InjectChunks; + +/// Holds the queues for optional injection of reused dynamic index entries +pub struct InjectionData { + boundaries: mpsc::Receiver, + next_boundary: Option, + injections: mpsc::Sender, +} + +impl InjectionData { + pub fn new( + boundaries: mpsc::Receiver, + injections: mpsc::Sender, + ) -> Self { + Self { + boundaries, + next_boundary: None, + injections, + } + } +} /// Split input stream into dynamic sized chunks pub struct ChunkStream { input: S, - chunker: Chunker, + chunker: Box, buffer: BytesMut, scan_pos: usize, + consumed: u64, + injection_data: Option, } impl ChunkStream { - pub fn new(input: S, chunk_size: Option) -> Self { + pub fn new( + input: S, + chunk_size: Option, + injection_data: Option, + suggested_boundaries: Option>, + ) -> Self { + let chunk_size = chunk_size.unwrap_or(4 * 1024 * 1024); Self { input, - chunker: Chunker::new(chunk_size.unwrap_or(4 * 1024 * 1024)), + chunker: if let Some(suggested) = suggested_boundaries { + Box::new(PayloadChunker::new(chunk_size, suggested)) + } else { + Box::new(ChunkerImpl::new(chunk_size)) + }, buffer: BytesMut::new(), scan_pos: 0, + consumed: 0, + injection_data, } } } @@ -39,19 +76,87 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { let this = self.get_mut(); + loop { + let ctx = pbs_datastore::chunker::Context { + base: this.consumed, + total: this.buffer.len() as u64, + }; + + if let Some(InjectionData { + boundaries, + next_boundary, + injections, + }) = this.injection_data.as_mut() + { + if next_boundary.is_none() { + if let Ok(boundary) = boundaries.try_recv() { + *next_boundary = Some(boundary); + } + } + + if let Some(inject) = next_boundary.take() { + // require forced boundary, lookup next regular boundary + let pos = if this.scan_pos < this.buffer.len() { + this.chunker.scan(&this.buffer[this.scan_pos..], &ctx) + } else { + 0 + }; + + let chunk_boundary = if pos == 0 { + this.consumed + this.buffer.len() as u64 + } else { + this.consumed + (this.scan_pos + pos) as u64 + }; + + if inject.boundary <= chunk_boundary { + // forced boundary is before next boundary, force within current buffer + let chunk_size = (inject.boundary - this.consumed) as usize; + let raw_chunk = this.buffer.split_to(chunk_size); + this.chunker.reset(); + this.scan_pos = 0; + + this.consumed += chunk_size as u64; + + // add the size of the injected chunks to consumed, so chunk stream offsets + // are in sync with the rest of the archive. + this.consumed += inject.size as u64; + + injections.send(inject).unwrap(); + + // the chunk can be empty, return nevertheless to allow the caller to + // make progress by consuming from the injection queue + return Poll::Ready(Some(Ok(raw_chunk))); + } else if pos != 0 { + *next_boundary = Some(inject); + // forced boundary is after next boundary, split off chunk from buffer + let chunk_size = this.scan_pos + pos; + let raw_chunk = this.buffer.split_to(chunk_size); + this.consumed += chunk_size as u64; + this.scan_pos = 0; + + return Poll::Ready(Some(Ok(raw_chunk))); + } else { + // forced boundary is after current buffer length, continue reading + *next_boundary = Some(inject); + this.scan_pos = this.buffer.len(); + } + } + } + if this.scan_pos < this.buffer.len() { - let boundary = this.chunker.scan(&this.buffer[this.scan_pos..]); + let boundary = this.chunker.scan(&this.buffer[this.scan_pos..], &ctx); let chunk_size = this.scan_pos + boundary; if boundary == 0 { this.scan_pos = this.buffer.len(); - // continue poll } else if chunk_size <= this.buffer.len() { - let result = this.buffer.split_to(chunk_size); + // found new chunk boundary inside buffer, split off chunk from buffer + let raw_chunk = this.buffer.split_to(chunk_size); + this.consumed += chunk_size as u64; this.scan_pos = 0; - return Poll::Ready(Some(Ok(result))); + return Poll::Ready(Some(Ok(raw_chunk))); } else { panic!("got unexpected chunk boundary from chunker"); } @@ -132,3 +237,120 @@ where } } } + +#[cfg(test)] +mod test { + use futures::stream::StreamExt; + + use super::*; + + struct DummyInput { + data: Vec, + } + + impl DummyInput { + fn new(data: Vec) -> Self { + Self { data } + } + } + + impl Stream for DummyInput { + type Item = Result, Error>; + + fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + let this = self.get_mut(); + match this.data.len() { + 0 => Poll::Ready(None), + size if size > 10 => Poll::Ready(Some(Ok(this.data.split_off(10)))), + _ => Poll::Ready(Some(Ok(std::mem::take(&mut this.data)))), + } + } + } + + #[test] + fn test_chunk_stream_forced_boundaries() { + let mut data = Vec::new(); + for i in 0..(256 * 1024) { + for j in 0..4 { + let byte = ((i >> (j << 3)) & 0xff) as u8; + data.push(byte); + } + } + + let mut input = DummyInput::new(data); + let input = Pin::new(&mut input); + + let (injections_tx, injections_rx) = mpsc::channel(); + let (boundaries_tx, boundaries_rx) = mpsc::channel(); + let (suggested_tx, suggested_rx) = mpsc::channel(); + let injection_data = InjectionData::new(boundaries_rx, injections_tx); + + let mut chunk_stream = ChunkStream::new( + input, + Some(64 * 1024), + Some(injection_data), + Some(suggested_rx), + ); + let chunks = std::sync::Arc::new(std::sync::Mutex::new(Vec::new())); + let chunks_clone = chunks.clone(); + + // Suggested boundary matching forced boundary + suggested_tx.send(32 * 1024).unwrap(); + // Suggested boundary not matching forced boundary + suggested_tx.send(64 * 1024).unwrap(); + // Force chunk boundary at suggested boundary + boundaries_tx + .send(InjectChunks { + boundary: 32 * 1024, + chunks: Vec::new(), + size: 1024, + }) + .unwrap(); + // Force chunk boundary within regular chunk + boundaries_tx + .send(InjectChunks { + boundary: 128 * 1024, + chunks: Vec::new(), + size: 2048, + }) + .unwrap(); + // Force chunk boundary aligned with regular boundary + boundaries_tx + .send(InjectChunks { + boundary: 657408, + chunks: Vec::new(), + size: 512, + }) + .unwrap(); + // Force chunk boundary within regular chunk, without injecting data + boundaries_tx + .send(InjectChunks { + boundary: 657408 + 1024, + chunks: Vec::new(), + size: 0, + }) + .unwrap(); + + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async move { + while let Some(chunk) = chunk_stream.next().await { + let chunk = chunk.unwrap(); + let mut chunks = chunks.lock().unwrap(); + chunks.push(chunk); + } + }); + + let mut total = 0; + let chunks = chunks_clone.lock().unwrap(); + let expected = [32768, 31744, 65536, 262144, 262144, 512, 262144, 131584]; + for (chunk, expected) in chunks.as_slice().iter().zip(expected.iter()) { + assert_eq!(chunk.len(), *expected); + total += chunk.len(); + } + while let Ok(injection) = injections_rx.recv() { + total += injection.size; + } + + assert_eq!(total, 4 * 256 * 1024 + 1024 + 2048 + 512); + } +} diff --git a/pbs-client/src/http_client.rs b/pbs-client/src/http_client.rs index ab67bba29..8ae5edaa0 100644 --- a/pbs-client/src/http_client.rs +++ b/pbs-client/src/http_client.rs @@ -332,6 +332,7 @@ impl HttpClient { let interactive = options.interactive; let fingerprint_cache = options.fingerprint_cache; let prefix = options.prefix.clone(); + let trust_openssl_valid = Arc::new(Mutex::new(true)); ssl_connector_builder.set_verify_callback( openssl::ssl::SslVerifyMode::PEER, move |valid, ctx| match Self::verify_callback( @@ -339,6 +340,7 @@ impl HttpClient { ctx, expected_fingerprint.as_ref(), interactive, + Arc::clone(&trust_openssl_valid), ) { Ok(None) => true, Ok(Some(fingerprint)) => { @@ -467,7 +469,6 @@ impl HttpClient { } Err(err) => { log::error!("re-authentication failed: {}", err); - return; } } } @@ -561,8 +562,12 @@ impl HttpClient { ctx: &mut X509StoreContextRef, expected_fingerprint: Option<&String>, interactive: bool, + trust_openssl: Arc>, ) -> Result, Error> { - if openssl_valid { + let mut trust_openssl_valid = trust_openssl.lock().unwrap(); + + // we can only rely on openssl's prevalidation if we haven't forced it earlier + if openssl_valid && *trust_openssl_valid { return Ok(None); } @@ -571,11 +576,13 @@ impl HttpClient { None => bail!("context lacks current certificate."), }; - let depth = ctx.error_depth(); - if depth != 0 { - bail!("context depth != 0") + // force trust in case of a chain, but set flag to no longer trust prevalidation by openssl + if ctx.error_depth() > 0 { + *trust_openssl_valid = false; + return Ok(None); } + // leaf certificate - if we end up here, we have to verify the fingerprint! let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) { Ok(fp) => fp, Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen diff --git a/pbs-client/src/inject_reused_chunks.rs b/pbs-client/src/inject_reused_chunks.rs new file mode 100644 index 000000000..4b2922012 --- /dev/null +++ b/pbs-client/src/inject_reused_chunks.rs @@ -0,0 +1,127 @@ +use std::cmp; +use std::pin::Pin; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{mpsc, Arc}; +use std::task::{Context, Poll}; + +use anyhow::{anyhow, Error}; +use futures::{ready, Stream}; +use pin_project_lite::pin_project; + +use crate::pxar::create::ReusableDynamicEntry; + +pin_project! { + pub struct InjectReusedChunksQueue { + #[pin] + input: S, + next_injection: Option, + injections: Option>, + stream_len: Arc, + } +} + +type StreamOffset = u64; +#[derive(Debug)] +/// Holds a list of chunks to inject at the given boundary by forcing a chunk boundary. +pub struct InjectChunks { + /// Offset at which to force the boundary + pub boundary: StreamOffset, + /// List of chunks to inject + pub chunks: Vec, + /// Cumulative size of the chunks in the list + pub size: usize, +} + +/// Variants for stream consumer to distinguish between raw data chunks and injected ones. +pub enum InjectedChunksInfo { + Known(Vec), + Raw(bytes::BytesMut), +} + +pub trait InjectReusedChunks: Sized { + fn inject_reused_chunks( + self, + injections: Option>, + stream_len: Arc, + ) -> InjectReusedChunksQueue; +} + +impl InjectReusedChunks for S +where + S: Stream>, +{ + fn inject_reused_chunks( + self, + injections: Option>, + stream_len: Arc, + ) -> InjectReusedChunksQueue { + InjectReusedChunksQueue { + input: self, + next_injection: None, + injections, + stream_len, + } + } +} + +impl Stream for InjectReusedChunksQueue +where + S: Stream>, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + // loop to skip over possible empty chunks + loop { + if this.next_injection.is_none() { + if let Some(injections) = this.injections.as_mut() { + if let Ok(injection) = injections.try_recv() { + *this.next_injection = Some(injection); + } + } + } + + if let Some(inject) = this.next_injection.take() { + // got reusable dynamic entries to inject + let offset = this.stream_len.load(Ordering::SeqCst) as u64; + + match inject.boundary.cmp(&offset) { + // inject now + cmp::Ordering::Equal => { + let chunk_info = InjectedChunksInfo::Known(inject.chunks); + return Poll::Ready(Some(Ok(chunk_info))); + } + // inject later + cmp::Ordering::Greater => *this.next_injection = Some(inject), + // incoming new chunks and injections didn't line up? + cmp::Ordering::Less => { + return Poll::Ready(Some(Err(anyhow!("invalid injection boundary")))) + } + } + } + + // nothing to inject now, await further input + match ready!(this.input.as_mut().poll_next(cx)) { + None => { + if let Some(injections) = this.injections.as_mut() { + if this.next_injection.is_some() || injections.try_recv().is_ok() { + // stream finished, but remaining dynamic entries to inject + return Poll::Ready(Some(Err(anyhow!( + "injection queue not fully consumed" + )))); + } + } + // stream finished and all dynamic entries already injected + return Poll::Ready(None); + } + Some(Err(err)) => return Poll::Ready(Some(Err(err))), + // ignore empty chunks, injected chunks from queue at forced boundary, but boundary + // did not require splitting of the raw stream buffer to force the boundary + Some(Ok(raw)) if raw.is_empty() => continue, + Some(Ok(raw)) => return Poll::Ready(Some(Ok(InjectedChunksInfo::Raw(raw)))), + } + } + } +} diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs index 21cf8556b..3d2da27b9 100644 --- a/pbs-client/src/lib.rs +++ b/pbs-client/src/lib.rs @@ -7,6 +7,7 @@ pub mod catalog_shell; pub mod pxar; pub mod tools; +mod inject_reused_chunks; mod merge_known_chunks; pub mod pipe_to_stream; @@ -38,6 +39,6 @@ mod backup_specification; pub use backup_specification::*; mod chunk_stream; -pub use chunk_stream::{ChunkStream, FixedChunkStream}; +pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData}; pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120; diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 60efb0ce5..c48524c4c 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -1,11 +1,13 @@ use std::collections::{HashMap, HashSet}; use std::ffi::{CStr, CString, OsStr}; -use std::fmt; +use std::fmt::Display; use std::io::{self, Read}; +use std::mem::size_of; +use std::ops::Range; use std::os::unix::ffi::OsStrExt; use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd}; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::{mpsc, Arc, Mutex}; use anyhow::{bail, Context, Error}; use futures::future::BoxFuture; @@ -14,24 +16,33 @@ use nix::dir::Dir; use nix::errno::Errno; use nix::fcntl::OFlag; use nix::sys::stat::{FileStat, Mode}; +use serde::{Deserialize, Serialize}; use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag}; use proxmox_sys::error::SysError; -use pxar::encoder::{LinkOffset, SeqWrite}; -use pxar::Metadata; +use pxar::accessor::aio::{Accessor, Directory}; +use pxar::accessor::ReadAt; +use pxar::encoder::{LinkOffset, PayloadOffset, SeqWrite}; +use pxar::{EntryKind, Metadata, PxarVariant}; +use proxmox_human_byte::HumanByte; use proxmox_io::vec; -use proxmox_lang::c_str; use proxmox_sys::fs::{self, acl, xattr}; use pbs_datastore::catalog::BackupCatalogWriter; +use pbs_datastore::dynamic_index::DynamicIndexReader; +use pbs_datastore::index::IndexFile; +use crate::inject_reused_chunks::InjectChunks; +use crate::pxar::look_ahead_cache::{CacheEntry, CacheEntryData, PxarLookaheadCache}; use crate::pxar::metadata::errno_is_unsupported; use crate::pxar::tools::assert_single_path_component; use crate::pxar::Flags; +const CHUNK_PADDING_THRESHOLD: f64 = 0.1; + /// Pxar options for creating a pxar archive/stream -#[derive(Default, Clone)] +#[derive(Default)] pub struct PxarCreateOptions { /// Device/mountpoint st_dev numbers that should be included. None for no limitation. pub device_set: Option>, @@ -43,6 +54,22 @@ pub struct PxarCreateOptions { pub skip_lost_and_found: bool, /// Skip xattrs of files that return E2BIG error pub skip_e2big_xattr: bool, + /// Reference state for partial backups + pub previous_ref: Option, + /// Maximum number of lookahead cache entries + pub max_cache_size: Option, +} + +pub type MetadataArchiveReader = Arc; + +/// Stateful information of previous backups snapshots for partial backups +pub struct PxarPrevRef { + /// Reference accessor for metadata comparison + pub accessor: Accessor, + /// Reference index for reusing payload chunks + pub payload_index: DynamicIndexReader, + /// Reference archive name for partial backups + pub archive_name: String, } fn detect_fs_type(fd: RawFd) -> Result { @@ -89,32 +116,57 @@ pub fn is_virtual_file_system(magic: i64) -> bool { SYSFS_MAGIC) } -#[derive(Debug)] -struct ArchiveError { - path: PathBuf, - error: Error, +trait UniqueContext { + fn unique_context(self, context: S) -> Result + where + S: Display + Send + Sync + 'static; } -impl ArchiveError { - fn new(path: PathBuf, error: Error) -> Self { - Self { path, error } - } -} - -impl std::error::Error for ArchiveError {} - -impl fmt::Display for ArchiveError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "error at {:?}: {}", self.path, self.error) +impl UniqueContext for Result { + fn unique_context(self, context: S) -> Result + where + S: Display + Send + Sync + 'static, + { + match self { + Ok(ok) => Ok(ok), + Err(err) => { + let last_error = err.chain().next(); + if let Some(e) = last_error { + if e.to_string() == context.to_string() { + return Err(err); + } + } + Err(err.context(context)) + } + } } } #[derive(Eq, PartialEq, Hash)] -struct HardLinkInfo { +pub(crate) struct HardLinkInfo { st_dev: u64, st_ino: u64, } +#[derive(Default)] +struct ReuseStats { + files_reused_count: u64, + files_hardlink_count: u64, + files_reencoded_count: u64, + total_injected_count: u64, + partial_chunks_count: u64, + total_injected_size: u64, + total_reused_payload_size: u64, + total_reencoded_size: u64, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub(crate) struct PbsClientPrelude { + #[serde(skip_serializing_if = "Option::is_none")] + exclude_patterns: Option, +} + struct Archiver { feature_flags: Flags, fs_feature_flags: Flags, @@ -131,17 +183,38 @@ struct Archiver { hardlinks: HashMap, file_copy_buffer: Vec, skip_e2big_xattr: bool, + forced_boundaries: Option>, + suggested_boundaries: Option>, + previous_payload_index: Option, + cache: PxarLookaheadCache, + reuse_stats: ReuseStats, + split_archive: bool, } type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>; +pub struct PxarWriters { + archive: PxarVariant, + catalog: Option>>, +} + +impl PxarWriters { + pub fn new( + archive: PxarVariant, + catalog: Option>>, + ) -> Self { + Self { archive, catalog } + } +} + pub async fn create_archive( source_dir: Dir, - mut writer: T, + writers: PxarWriters, feature_flags: Flags, callback: F, - catalog: Option>>, options: PxarCreateOptions, + forced_boundaries: Option>, + suggested_boundaries: Option>, ) -> Result<(), Error> where T: SeqWrite + Send, @@ -170,8 +243,6 @@ where set.insert(stat.st_dev); } - let mut encoder = Encoder::new(&mut writer, &metadata).await?; - let mut patterns = options.patterns; if options.skip_lost_and_found { @@ -182,13 +253,38 @@ where )?); } + let split_archive = writers.archive.payload().is_some(); + let prelude = if split_archive && !patterns.is_empty() { + let prelude = PbsClientPrelude { + exclude_patterns: Some(String::from_utf8(generate_pxar_excludes_cli( + &patterns[..], + ))?), + }; + Some(serde_json::to_vec(&prelude)?) + } else { + None + }; + + let metadata_mode = options.previous_ref.is_some() && split_archive; + let (previous_payload_index, previous_metadata_accessor) = + if let Some(refs) = options.previous_ref { + ( + Some(refs.payload_index), + refs.accessor.open_root().await.ok(), + ) + } else { + (None, None) + }; + + let mut encoder = Encoder::new(writers.archive, &metadata, prelude.as_deref()).await?; + let mut archiver = Archiver { feature_flags, fs_feature_flags, fs_magic, callback: Box::new(callback), patterns, - catalog, + catalog: writers.catalog, path: PathBuf::new(), entry_counter: 0, entry_limit: options.entries_max, @@ -197,12 +293,55 @@ where hardlinks: HashMap::new(), file_copy_buffer: vec::undefined(4 * 1024 * 1024), skip_e2big_xattr: options.skip_e2big_xattr, + forced_boundaries, + suggested_boundaries, + previous_payload_index, + cache: PxarLookaheadCache::new(options.max_cache_size), + reuse_stats: ReuseStats::default(), + split_archive, }; archiver - .archive_dir_contents(&mut encoder, source_dir, true) + .archive_dir_contents(&mut encoder, previous_metadata_accessor, source_dir, true) .await?; + + if metadata_mode { + archiver + .flush_cached_reusing_if_below_threshold(&mut encoder, false) + .await?; + } + encoder.finish().await?; + encoder.close().await?; + + if metadata_mode { + log::info!("Change detection summary:"); + log::info!( + " - {} total files ({} hardlinks)", + archiver.reuse_stats.files_reused_count + + archiver.reuse_stats.files_reencoded_count + + archiver.reuse_stats.files_hardlink_count, + archiver.reuse_stats.files_hardlink_count, + ); + log::info!( + " - {} unchanged, reusable files with {} data", + archiver.reuse_stats.files_reused_count, + HumanByte::from(archiver.reuse_stats.total_reused_payload_size), + ); + log::info!( + " - {} changed or non-reusable files with {} data", + archiver.reuse_stats.files_reencoded_count, + HumanByte::from(archiver.reuse_stats.total_reencoded_size), + ); + log::info!( + " - {} padding in {} partially reused chunks", + HumanByte::from( + archiver.reuse_stats.total_injected_size + - archiver.reuse_stats.total_reused_payload_size + ), + archiver.reuse_stats.partial_chunks_count, + ); + } Ok(()) } @@ -219,17 +358,10 @@ impl Archiver { self.feature_flags & self.fs_feature_flags } - fn wrap_err(&self, err: Error) -> Error { - if err.downcast_ref::().is_some() { - err - } else { - ArchiveError::new(self.path.clone(), err).into() - } - } - fn archive_dir_contents<'a, T: SeqWrite + Send>( &'a mut self, encoder: &'a mut Encoder<'_, T>, + mut previous_metadata_accessor: Option>, mut dir: Dir, is_root: bool, ) -> BoxFuture<'a, Result<(), Error>> { @@ -241,7 +373,7 @@ impl Archiver { let mut file_list = self.generate_directory_file_list(&mut dir, is_root)?; - if is_root && old_patterns_count > 0 { + if is_root && old_patterns_count > 0 && previous_metadata_accessor.is_none() { file_list.push(FileListEntry { name: CString::new(".pxarexclude-cli").unwrap(), path: PathBuf::new(), @@ -257,16 +389,24 @@ impl Archiver { let file_name = file_entry.name.to_bytes(); if is_root && file_name == b".pxarexclude-cli" { - self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count) - .await?; + if !self.split_archive { + self.encode_pxarexclude_cli(encoder, &file_entry.name, old_patterns_count) + .await?; + } continue; } (self.callback)(&file_entry.path)?; self.path = file_entry.path; - self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat) - .await - .map_err(|err| self.wrap_err(err))?; + self.add_entry( + encoder, + &mut previous_metadata_accessor, + dir_fd, + &file_entry.name, + &file_entry.stat, + ) + .await + .unique_context(format!("error at {:?}", self.path))?; } self.path = old_path; self.entry_counter = entry_counter; @@ -277,6 +417,40 @@ impl Archiver { .boxed() } + async fn is_reusable_entry( + &mut self, + previous_metadata_accessor: &Option>, + file_name: &Path, + metadata: &Metadata, + ) -> Result>, Error> { + if let Some(previous_metadata_accessor) = previous_metadata_accessor { + if let Some(file_entry) = previous_metadata_accessor.lookup(file_name).await? { + if metadata == file_entry.metadata() { + if let EntryKind::File { + payload_offset: Some(offset), + size, + .. + } = file_entry.entry().kind() + { + let range = + *offset..*offset + size + size_of::() as u64; + log::debug!( + "reusable: {file_name:?} at range {range:?} has unchanged metadata." + ); + return Ok(Some(range)); + } + log::debug!("reencode: {file_name:?} not a regular file."); + return Ok(None); + } + log::debug!("reencode: {file_name:?} metadata did not match."); + return Ok(None); + } + log::debug!("reencode: {file_name:?} not found in previous archive."); + } + + Ok(None) + } + /// openat() wrapper which allows but logs `EACCES` and turns `ENOENT` into `None`. /// /// The `existed` flag is set when iterating through a directory to note that we know the file @@ -321,7 +495,7 @@ impl Archiver { } fn read_pxar_excludes(&mut self, parent: RawFd) -> Result<(), Error> { - let fd = match self.open_file(parent, c_str!(".pxarexclude"), OFlag::O_RDONLY, false)? { + let fd = match self.open_file(parent, c".pxarexclude", OFlag::O_RDONLY, false)? { Some(fd) => fd, None => return Ok(()), }; @@ -404,6 +578,10 @@ impl Archiver { let mut metadata = Metadata::default(); metadata.stat.mode = pxar::format::mode::IFREG | 0o600; + // use uid/gid of client process so the backup snapshot might be restored by the same + // potentially non-root user + metadata.stat.uid = unsafe { libc::getuid() }; + metadata.stat.gid = unsafe { libc::getgid() }; let mut file = encoder .create_file(&metadata, ".pxarexclude-cli", content.len() as u64) @@ -514,12 +692,11 @@ impl Archiver { async fn add_entry( &mut self, encoder: &mut Encoder<'_, T>, + previous_metadata: &mut Option>, parent: RawFd, c_file_name: &CStr, stat: &FileStat, ) -> Result<(), Error> { - use pxar::format::mode; - let file_mode = stat.st_mode & libc::S_IFMT; let open_mode = if file_mode == libc::S_IFREG || file_mode == libc::S_IFDIR { OFlag::empty() @@ -557,6 +734,126 @@ impl Archiver { self.skip_e2big_xattr, )?; + if self.previous_payload_index.is_none() { + return self + .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) + .await; + } + + // Avoid having to many open file handles in cached entries + if self.cache.is_full() { + log::debug!("Max cache size reached, reuse cached entries"); + self.flush_cached_reusing_if_below_threshold(encoder, true) + .await?; + } + + if metadata.is_regular_file() { + if stat.st_nlink > 1 { + let link_info = HardLinkInfo { + st_dev: stat.st_dev, + st_ino: stat.st_ino, + }; + if self.cache.contains_hardlink(&link_info) { + // This hardlink has been seen by the lookahead cache already, put it on the cache + // with a dummy offset and continue without lookup and chunk injection. + // On flushing or re-encoding, the logic there will store the actual hardlink with + // offset. + self.cache.insert( + fd, + c_file_name.into(), + *stat, + metadata.clone(), + PayloadOffset::default(), + self.path.clone(), + ); + return Ok(()); + } else { + // mark this hardlink as seen by the lookahead cache + self.cache.insert_hardlink(link_info); + } + } + + let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref(); + if let Some(payload_range) = self + .is_reusable_entry(previous_metadata, file_name, &metadata) + .await? + { + if !self.cache.try_extend_range(payload_range.clone()) { + log::debug!("Cache range has hole, new range: {payload_range:?}"); + self.flush_cached_reusing_if_below_threshold(encoder, true) + .await?; + // range has to be set after flushing of cached entries, which resets the range + self.cache.update_range(payload_range.clone()); + } + + // offset relative to start of current range, does not include possible padding of + // actual chunks, which needs to be added before encoding the payload reference + let offset = + PayloadOffset::default().add(payload_range.start - self.cache.range().start); + log::debug!("Offset relative to range start: {offset:?}"); + + self.cache.insert( + fd, + c_file_name.into(), + *stat, + metadata.clone(), + offset, + self.path.clone(), + ); + return Ok(()); + } else { + self.flush_cached_reusing_if_below_threshold(encoder, false) + .await?; + } + } else if self.cache.caching_enabled() { + self.cache.insert( + fd.try_clone()?, + c_file_name.into(), + *stat, + metadata.clone(), + PayloadOffset::default(), + self.path.clone(), + ); + + if metadata.is_dir() { + self.add_directory( + encoder, + previous_metadata, + Dir::from_fd(fd.into_raw_fd())?, + c_file_name, + &metadata, + stat, + ) + .await?; + } + return Ok(()); + } + + self.encode_entries_to_archive(encoder, None).await?; + self.add_entry_to_archive( + encoder, + previous_metadata, + c_file_name, + stat, + fd, + &metadata, + None, + ) + .await + } + + async fn add_entry_to_archive( + &mut self, + encoder: &mut Encoder<'_, T>, + previous_metadata: &mut Option>, + c_file_name: &CStr, + stat: &FileStat, + fd: OwnedFd, + metadata: &Metadata, + payload_offset: Option, + ) -> Result<(), Error> { + use pxar::format::mode; + let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref(); match metadata.file_type() { mode::IFREG => { @@ -585,11 +882,30 @@ impl Archiver { .add_file(c_file_name, file_size, stat.st_mtime)?; } - let offset: LinkOffset = self - .add_regular_file(encoder, fd, file_name, &metadata, file_size) - .await?; + if let Some(sender) = self.suggested_boundaries.as_mut() { + let offset = encoder.payload_position()?.raw(); + sender.send(offset)?; + } + + let offset: LinkOffset = if let Some(payload_offset) = payload_offset { + self.reuse_stats.total_reused_payload_size += + file_size + size_of::() as u64; + self.reuse_stats.files_reused_count += 1; + + encoder + .add_payload_ref(metadata, file_name, file_size, payload_offset) + .await? + } else { + self.reuse_stats.total_reencoded_size += + file_size + size_of::() as u64; + self.reuse_stats.files_reencoded_count += 1; + + self.add_regular_file(encoder, fd, file_name, metadata, file_size) + .await? + }; if stat.st_nlink > 1 { + self.reuse_stats.files_hardlink_count += 1; self.hardlinks .insert(link_info, (self.path.clone(), offset)); } @@ -598,52 +914,43 @@ impl Archiver { } mode::IFDIR => { let dir = Dir::from_fd(fd.into_raw_fd())?; - - if let Some(ref catalog) = self.catalog { - catalog.lock().unwrap().start_directory(c_file_name)?; - } - let result = self - .add_directory(encoder, dir, c_file_name, &metadata, stat) - .await; - if let Some(ref catalog) = self.catalog { - catalog.lock().unwrap().end_directory()?; - } - result + self.add_directory(encoder, previous_metadata, dir, c_file_name, metadata, stat) + .await } mode::IFSOCK => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_socket(c_file_name)?; } - Ok(encoder.add_socket(&metadata, file_name).await?) + Ok(encoder.add_socket(metadata, file_name).await?) } mode::IFIFO => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_fifo(c_file_name)?; } - Ok(encoder.add_fifo(&metadata, file_name).await?) + Ok(encoder.add_fifo(metadata, file_name).await?) } mode::IFLNK => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_symlink(c_file_name)?; } - self.add_symlink(encoder, fd, file_name, &metadata).await + self.add_symlink(encoder, fd, file_name, metadata).await } mode::IFBLK => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_block_device(c_file_name)?; } - self.add_device(encoder, file_name, &metadata, stat).await + self.add_device(encoder, file_name, metadata, stat).await } mode::IFCHR => { if let Some(ref catalog) = self.catalog { catalog.lock().unwrap().add_char_device(c_file_name)?; } - self.add_device(encoder, file_name, &metadata, stat).await + self.add_device(encoder, file_name, metadata, stat).await } other => bail!( "encountered unknown file type: 0x{:x} (0o{:o})", @@ -653,17 +960,221 @@ impl Archiver { } } + async fn flush_cached_reusing_if_below_threshold( + &mut self, + encoder: &mut Encoder<'_, T>, + keep_last_chunk: bool, + ) -> Result<(), Error> { + if self.cache.range().is_empty() { + // only non regular file entries (e.g. directories) in cache, allows to do regular encoding + self.encode_entries_to_archive(encoder, None).await?; + return Ok(()); + } + + if let Some(ref ref_payload_index) = self.previous_payload_index { + // Take ownership of previous last chunk, only update where it must be injected + let prev_last_chunk = self.cache.take_last_chunk(); + let range = self.cache.range(); + let (mut indices, start_padding, end_padding) = + lookup_dynamic_entries(ref_payload_index, range)?; + let mut padding = start_padding + end_padding; + let total_size = (range.end - range.start) + padding; + + // take into account used bytes of kept back chunk for padding + if let (Some(first), Some(last)) = (indices.first(), prev_last_chunk.as_ref()) { + if last.digest() == first.digest() { + // Update padding used for threshold calculation only + let used = last.size() - last.padding; + padding -= used; + } + } + + let ratio = padding as f64 / total_size as f64; + + // do not reuse chunks if introduced padding higher than threshold + // opt for re-encoding in that case + if ratio > CHUNK_PADDING_THRESHOLD { + log::debug!( + "Padding ratio: {ratio} > {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}", + HumanByte::from(padding), + HumanByte::from(total_size), + indices.len(), + ); + self.cache.update_last_chunk(prev_last_chunk); + self.encode_entries_to_archive(encoder, None).await?; + } else { + log::debug!( + "Padding ratio: {ratio} < {CHUNK_PADDING_THRESHOLD}, padding: {}, total {}, chunks: {}", + HumanByte::from(padding), + HumanByte::from(total_size), + indices.len(), + ); + + // check for cases where kept back last is not equal first chunk because the range + // end aligned with a chunk boundary, and the chunks therefore needs to be injected + if let (Some(first), Some(last)) = (indices.first_mut(), prev_last_chunk) { + if last.digest() != first.digest() { + // make sure to inject previous last chunk before encoding entries + self.inject_chunks_at_current_payload_position(encoder, vec![last])?; + } else { + let used = last.size() - last.padding; + first.padding -= used; + } + } + + let base_offset = Some(encoder.payload_position()?.add(start_padding)); + self.encode_entries_to_archive(encoder, base_offset).await?; + + if keep_last_chunk { + self.cache.update_last_chunk(indices.pop()); + } + + self.inject_chunks_at_current_payload_position(encoder, indices)?; + } + + Ok(()) + } else { + bail!("cannot reuse chunks without previous index reader"); + } + } + + // Take ownership of cached entries and encode them to the archive + // Encode with reused payload chunks when base offset is some, reencode otherwise + async fn encode_entries_to_archive( + &mut self, + encoder: &mut Encoder<'_, T>, + base_offset: Option, + ) -> Result<(), Error> { + if let Some(prev) = self.cache.take_last_chunk() { + // make sure to inject previous last chunk before encoding entries + self.inject_chunks_at_current_payload_position(encoder, vec![prev])?; + } + + // take ownership of cached entries and reset caching state + let (entries, start_path) = self.cache.take_and_reset(); + let old_path = self.path.clone(); + self.path = start_path; + log::debug!( + "Got {} cache entries to encode: reuse is {}", + entries.len(), + base_offset.is_some() + ); + + for entry in entries { + match entry { + CacheEntry::RegEntry(CacheEntryData { + fd, + c_file_name, + stat, + metadata, + payload_offset, + }) => { + let file_name = OsStr::from_bytes(c_file_name.to_bytes()); + self.path.push(file_name); + self.add_entry_to_archive( + encoder, + &mut None, + &c_file_name, + &stat, + fd, + &metadata, + base_offset.map(|base_offset| payload_offset.add(base_offset.raw())), + ) + .await?; + self.path.pop(); + } + CacheEntry::DirEntry(CacheEntryData { + c_file_name, + metadata, + .. + }) => { + let file_name = OsStr::from_bytes(c_file_name.to_bytes()); + self.path.push(file_name); + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().start_directory(&c_file_name)?; + } + let dir_name = OsStr::from_bytes(c_file_name.to_bytes()); + encoder.create_directory(dir_name, &metadata).await?; + } + CacheEntry::DirEnd => { + encoder.finish().await?; + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().end_directory()?; + } + self.path.pop(); + } + } + } + + self.path = old_path; + + Ok(()) + } + + fn inject_chunks_at_current_payload_position( + &mut self, + encoder: &mut Encoder<'_, T>, + reused_chunks: Vec, + ) -> Result<(), Error> { + let mut injection_boundary = encoder.payload_position()?; + + for chunks in reused_chunks.chunks(128) { + let chunks = chunks.to_vec(); + let mut size = PayloadOffset::default(); + + for chunk in chunks.iter() { + log::debug!( + "Injecting chunk with {} padding (chunk size {})", + HumanByte::from(chunk.padding), + HumanByte::from(chunk.size()), + ); + self.reuse_stats.total_injected_size += chunk.size(); + self.reuse_stats.total_injected_count += 1; + + if chunk.padding > 0 { + self.reuse_stats.partial_chunks_count += 1; + } + + size = size.add(chunk.size()); + } + + let inject_chunks = InjectChunks { + boundary: injection_boundary.raw(), + chunks, + size: size.raw() as usize, + }; + + if let Some(sender) = self.forced_boundaries.as_mut() { + sender.send(inject_chunks)?; + } else { + bail!("missing injection queue"); + }; + + injection_boundary = injection_boundary.add(size.raw()); + log::debug!("Advance payload position by: {size:?}"); + encoder.advance(size)?; + } + + Ok(()) + } + async fn add_directory( &mut self, encoder: &mut Encoder<'_, T>, + previous_metadata_accessor: &mut Option>, dir: Dir, - dir_name: &CStr, + c_dir_name: &CStr, metadata: &Metadata, stat: &FileStat, ) -> Result<(), Error> { - let dir_name = OsStr::from_bytes(dir_name.to_bytes()); + let dir_name = OsStr::from_bytes(c_dir_name.to_bytes()); - let mut encoder = encoder.create_directory(dir_name, metadata).await?; + if !self.cache.caching_enabled() { + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().start_directory(c_dir_name)?; + } + encoder.create_directory(dir_name, metadata).await?; + } let old_fs_magic = self.fs_magic; let old_fs_feature_flags = self.fs_feature_flags; @@ -686,14 +1197,32 @@ impl Archiver { log::info!("skipping mount point: {:?}", self.path); Ok(()) } else { - self.archive_dir_contents(&mut encoder, dir, false).await + let mut dir_accessor = None; + if let Some(accessor) = previous_metadata_accessor.as_mut() { + if let Some(file_entry) = accessor.lookup(dir_name).await? { + if file_entry.entry().is_dir() { + let dir = file_entry.enter_directory().await?; + dir_accessor = Some(dir); + } + } + } + self.archive_dir_contents(encoder, dir_accessor, dir, false) + .await }; self.fs_magic = old_fs_magic; self.fs_feature_flags = old_fs_feature_flags; self.current_st_dev = old_st_dev; - encoder.finish().await?; + if !self.cache.caching_enabled() { + encoder.finish().await?; + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().end_directory()?; + } + } else { + self.cache.insert_dir_end(); + } + result } @@ -765,6 +1294,73 @@ impl Archiver { } } +/// Dynamic entry reusable by payload references +#[derive(Clone, Debug)] +#[repr(C)] +pub struct ReusableDynamicEntry { + size: u64, + padding: u64, + digest: [u8; 32], +} + +impl ReusableDynamicEntry { + #[inline] + pub fn size(&self) -> u64 { + self.size + } + + #[inline] + pub fn digest(&self) -> [u8; 32] { + self.digest + } +} + +/// List of dynamic entries containing the data given by an offset range +fn lookup_dynamic_entries( + index: &DynamicIndexReader, + range: &Range, +) -> Result<(Vec, u64, u64), Error> { + let end_idx = index.index_count() - 1; + let chunk_end = index.chunk_end(end_idx); + let start = index.binary_search(0, 0, end_idx, chunk_end, range.start)?; + + let mut prev_end = if start == 0 { + 0 + } else { + index.chunk_end(start - 1) + }; + let padding_start = range.start - prev_end; + let mut padding_end = 0; + + let mut indices = Vec::new(); + for dynamic_entry in &index.index()[start..] { + let end = dynamic_entry.end(); + + let reusable_dynamic_entry = ReusableDynamicEntry { + size: (end - prev_end), + padding: 0, + digest: dynamic_entry.digest(), + }; + indices.push(reusable_dynamic_entry); + + if range.end < end { + padding_end = end - range.end; + break; + } + prev_end = end; + } + + if let Some(first) = indices.first_mut() { + first.padding += padding_start; + } + + if let Some(last) = indices.last_mut() { + last.padding += padding_end; + } + + Ok((indices, padding_start, padding_end)) +} + fn get_metadata( fd: RawFd, stat: &FileStat, @@ -811,7 +1407,7 @@ fn get_fcaps( return Ok(()); } - match xattr::fgetxattr(fd, xattr::xattr_name_fcaps()) { + match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { Ok(data) => { meta.fcaps = Some(pxar::format::FCaps { data }); Ok(()) @@ -1136,3 +1732,251 @@ fn generate_pxar_excludes_cli(patterns: &[MatchEntry]) -> Vec { content } + +#[cfg(test)] +mod tests { + use std::ffi::OsString; + use std::fs::File; + use std::fs::OpenOptions; + use std::io::{self, BufReader, Seek, SeekFrom, Write}; + use std::pin::Pin; + use std::process::Command; + use std::sync::mpsc; + use std::task::{Context, Poll}; + + use pbs_datastore::dynamic_index::DynamicIndexReader; + use pxar::accessor::sync::FileReader; + use pxar::encoder::SeqWrite; + + use crate::pxar::extract::Extractor; + use crate::pxar::OverwriteFlags; + + use super::*; + + struct DummyWriter { + file: Option, + } + + impl DummyWriter { + fn new>(path: Option

) -> Result { + let file = if let Some(path) = path { + Some( + OpenOptions::new() + .read(true) + .write(true) + .truncate(true) + .create(true) + .open(path)?, + ) + } else { + None + }; + Ok(Self { file }) + } + } + + impl Write for DummyWriter { + fn write(&mut self, data: &[u8]) -> io::Result { + if let Some(file) = self.file.as_mut() { + file.write_all(data)?; + } + Ok(data.len()) + } + + fn flush(&mut self) -> io::Result<()> { + if let Some(file) = self.file.as_mut() { + file.flush()?; + } + Ok(()) + } + } + + impl SeqWrite for DummyWriter { + fn poll_seq_write( + mut self: Pin<&mut Self>, + _cx: &mut Context, + buf: &[u8], + ) -> Poll> { + Poll::Ready(self.as_mut().write(buf)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(self.as_mut().flush()) + } + } + + fn prepare>(dir_path: P) -> Result<(), Error> { + let dir = nix::dir::Dir::open(dir_path.as_ref(), OFlag::O_DIRECTORY, Mode::empty())?; + + let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); + let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); + let mut fs_feature_flags = Flags::from_magic(fs_magic); + let metadata = get_metadata( + dir.as_raw_fd(), + &stat, + fs_feature_flags, + fs_magic, + &mut fs_feature_flags, + false, + )?; + + let mut extractor = Extractor::new( + dir, + metadata.clone(), + true, + OverwriteFlags::empty(), + fs_feature_flags, + ); + + let dir_metadata = Metadata { + stat: pxar::Stat::default().mode(0o777u64).set_dir(), + ..Default::default() + }; + + let file_metadata = Metadata { + stat: pxar::Stat::default().mode(0o777u64).set_regular_file(), + ..Default::default() + }; + + extractor.enter_directory( + OsString::from("testdir".to_string()), + dir_metadata.clone(), + true, + )?; + + let size = 1024 * 1024; + let mut cursor = BufReader::new(std::io::Cursor::new(vec![0u8; size])); + for i in 0..10 { + extractor.enter_directory( + OsString::from(format!("folder_{i}")), + dir_metadata.clone(), + true, + )?; + for j in 0..10 { + cursor.seek(SeekFrom::Start(0))?; + extractor.extract_file( + CString::new(format!("file_{j}").as_str())?.as_c_str(), + &file_metadata, + size as u64, + &mut cursor, + true, + )?; + } + extractor.leave_directory()?; + } + + extractor.leave_directory()?; + + Ok(()) + } + + #[test] + fn test_create_archive_with_reference() -> Result<(), Error> { + let euid = unsafe { libc::geteuid() }; + let egid = unsafe { libc::getegid() }; + + if euid != 1000 || egid != 1000 { + // skip test, cannot create test folder structure with correct ownership + return Ok(()); + } + + let mut testdir = PathBuf::from("./target/testout"); + testdir.push(std::module_path!()); + + let _ = std::fs::remove_dir_all(&testdir); + let _ = std::fs::create_dir_all(&testdir); + + prepare(testdir.as_path())?; + + let previous_payload_index = Some(DynamicIndexReader::new(File::open( + "../tests/pxar/backup-client-pxar-data.ppxar.didx", + )?)?); + let metadata_archive = File::open("../tests/pxar/backup-client-pxar-data.mpxar").unwrap(); + let metadata_size = metadata_archive.metadata()?.len(); + let reader: MetadataArchiveReader = Arc::new(FileReader::new(metadata_archive)); + + let rt = tokio::runtime::Runtime::new().unwrap(); + let (suggested_boundaries, _rx) = mpsc::channel(); + let (forced_boundaries, _rx) = mpsc::channel(); + + rt.block_on(async move { + testdir.push("testdir"); + let source_dir = + nix::dir::Dir::open(testdir.as_path(), OFlag::O_DIRECTORY, Mode::empty()).unwrap(); + + let fs_magic = detect_fs_type(source_dir.as_raw_fd()).unwrap(); + let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); + let mut fs_feature_flags = Flags::from_magic(fs_magic); + + let metadata = get_metadata( + source_dir.as_raw_fd(), + &stat, + fs_feature_flags, + fs_magic, + &mut fs_feature_flags, + false, + )?; + + let writer = DummyWriter::new(Some("./target/backup-client-pxar-run.mpxar")).unwrap(); + let payload_writer = DummyWriter::new::(None).unwrap(); + + let mut encoder = Encoder::new( + pxar::PxarVariant::Split(writer, payload_writer), + &metadata, + Some(&[]), + ) + .await?; + + let mut archiver = Archiver { + feature_flags: Flags::from_magic(fs_magic), + fs_feature_flags: Flags::from_magic(fs_magic), + fs_magic, + callback: Box::new(|_| Ok(())), + patterns: Vec::new(), + catalog: None, + path: PathBuf::new(), + entry_counter: 0, + entry_limit: 1024, + current_st_dev: stat.st_dev, + device_set: None, + hardlinks: HashMap::new(), + file_copy_buffer: vec::undefined(4 * 1024 * 1024), + skip_e2big_xattr: false, + forced_boundaries: Some(forced_boundaries), + previous_payload_index, + suggested_boundaries: Some(suggested_boundaries), + cache: PxarLookaheadCache::new(None), + reuse_stats: ReuseStats::default(), + split_archive: true, + }; + + let accessor = Accessor::new(pxar::PxarVariant::Unified(reader), metadata_size) + .await + .unwrap(); + let root = accessor.open_root().await.ok(); + archiver + .archive_dir_contents(&mut encoder, root, source_dir, true) + .await + .unwrap(); + + archiver + .flush_cached_reusing_if_below_threshold(&mut encoder, false) + .await + .unwrap(); + + encoder.finish().await.unwrap(); + encoder.close().await.unwrap(); + + let status = Command::new("diff") + .args([ + "../tests/pxar/backup-client-pxar-expected.mpxar", + "./target/backup-client-pxar-run.mpxar", + ]) + .status() + .expect("failed to execute diff"); + assert!(status.success()); + + Ok::<(), Error>(()) + }) + } +} diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs index 5f5ac6188..b1245c5fc 100644 --- a/pbs-client/src/pxar/extract.rs +++ b/pbs-client/src/pxar/extract.rs @@ -2,7 +2,8 @@ use std::collections::HashMap; use std::ffi::{CStr, CString, OsStr, OsString}; -use std::io; +use std::fs::OpenOptions; +use std::io::{self, Write}; use std::os::unix::ffi::OsStrExt; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; use std::path::{Path, PathBuf}; @@ -29,6 +30,7 @@ use proxmox_compression::zip::{ZipEncoder, ZipEntry}; use crate::pxar::dir_stack::PxarDirStack; use crate::pxar::metadata; use crate::pxar::Flags; +use crate::tools::handle_root_with_optional_format_version_prelude; pub struct PxarExtractOptions<'a> { pub match_list: &'a [MatchEntry], @@ -36,10 +38,11 @@ pub struct PxarExtractOptions<'a> { pub allow_existing_dirs: bool, pub overwrite_flags: OverwriteFlags, pub on_error: Option, + pub prelude_path: Option, } bitflags! { - #[derive(Default)] + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct OverwriteFlags: u8 { /// Overwrite existing entries file content const FILE = 0x1; @@ -124,11 +127,26 @@ where // we use this to keep track of our directory-traversal decoder.enable_goodbye_entries(true); - let root = decoder - .next() - .context("found empty pxar archive")? + let (root, prelude) = handle_root_with_optional_format_version_prelude(&mut decoder) .context("error reading pxar archive")?; + if let Some(ref path) = options.prelude_path { + if let Some(entry) = prelude { + let mut prelude_file = OpenOptions::new() + .create(true) + .write(true) + .open(path) + .with_context(|| format!("error creating prelude file '{path:?}'"))?; + if let pxar::EntryKind::Prelude(ref prelude) = entry.kind() { + prelude_file.write_all(prelude.as_ref())?; + } else { + log::info!("unexpected entry kind for prelude"); + } + } else { + log::info!("No prelude entry found, skip prelude restore."); + } + } + if !root.is_dir() { bail!("pxar archive does not start with a directory entry!"); } @@ -267,6 +285,8 @@ where }; let extract_res = match (did_match, entry.kind()) { + (_, EntryKind::Version(_version)) => Ok(()), + (_, EntryKind::Prelude(_prelude)) => Ok(()), (_, EntryKind::Directory) => { self.callback(entry.path()); @@ -353,26 +373,22 @@ where Ok(()) } } - (true, EntryKind::File { size, .. }) => { - let contents = self.decoder.contents(); - - if let Some(mut contents) = contents { - self.extractor.extract_file( - &file_name, - metadata, - *size, - &mut contents, - self.extractor - .overwrite_flags - .contains(OverwriteFlags::FILE), - ) - } else { - Err(format_err!( - "found regular file entry without contents in archive" - )) - } - .context(PxarExtractContext::ExtractFile) + (true, EntryKind::File { size, .. }) => match self.decoder.contents() { + Ok(Some(mut contents)) => self.extractor.extract_file( + &file_name, + metadata, + *size, + &mut contents, + self.extractor + .overwrite_flags + .contains(OverwriteFlags::FILE), + ), + Ok(None) => Err(format_err!( + "found regular file entry without contents in archive" + )), + Err(err) => Err(err.into()), } + .context(PxarExtractContext::ExtractFile), (false, _) => Ok(()), // skip this }; @@ -852,7 +868,8 @@ where match entry.kind() { EntryKind::File { .. } => { let size = decoder.content_size().unwrap_or(0); - tar_add_file(&mut tarencoder, decoder.contents(), size, metadata, path).await? + let contents = decoder.contents().await?; + tar_add_file(&mut tarencoder, contents, size, metadata, path).await? } EntryKind::Hardlink(link) => { if !link.data.is_empty() { @@ -874,14 +891,9 @@ where path } else { let size = decoder.content_size().unwrap_or(0); - tar_add_file( - &mut tarencoder, - decoder.contents(), - size, - metadata, - path, - ) - .await?; + let contents = decoder.contents().await?; + tar_add_file(&mut tarencoder, contents, size, metadata, path) + .await?; hardlinks.insert(realpath.to_owned(), path.to_owned()); continue; } @@ -1018,7 +1030,8 @@ where metadata.stat.mode as u16, true, ); - zip.add_entry(entry, decoder.contents()) + let contents = decoder.contents().await?; + zip.add_entry(entry, contents) .await .context("could not send file entry")?; } @@ -1036,7 +1049,8 @@ where metadata.stat.mode as u16, true, ); - zip.add_entry(entry, decoder.contents()) + let contents = decoder.contents().await?; + zip.add_entry(entry, contents) .await .context("could not send file entry")?; } @@ -1259,14 +1273,16 @@ where .with_context(|| format!("error at entry {file_name_os:?}"))?; } EntryKind::File { size, .. } => { + let mut contents = decoder + .contents() + .await? + .context("found regular file entry without contents in archive")?; extractor .async_extract_file( &file_name, metadata, *size, - &mut decoder - .contents() - .context("found regular file entry without contents in archive")?, + &mut contents, extractor.overwrite_flags.contains(OverwriteFlags::FILE), ) .await? diff --git a/pbs-client/src/pxar/flags.rs b/pbs-client/src/pxar/flags.rs index b3280de7a..8e9aec04f 100644 --- a/pbs-client/src/pxar/flags.rs +++ b/pbs-client/src/pxar/flags.rs @@ -8,6 +8,7 @@ use libc::c_long; use bitflags::bitflags; bitflags! { + #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct Flags: u64 { /// FAT-style 2s time granularity const WITH_2SEC_TIME = 0x40; diff --git a/pbs-client/src/pxar/look_ahead_cache.rs b/pbs-client/src/pxar/look_ahead_cache.rs new file mode 100644 index 000000000..37c07a9bc --- /dev/null +++ b/pbs-client/src/pxar/look_ahead_cache.rs @@ -0,0 +1,162 @@ +use std::collections::HashSet; +use std::ffi::CString; +use std::ops::Range; +use std::os::unix::io::OwnedFd; +use std::path::PathBuf; + +use nix::sys::stat::FileStat; + +use pxar::encoder::PayloadOffset; +use pxar::Metadata; + +use super::create::*; + +const DEFAULT_CACHE_SIZE: usize = 512; + +pub(crate) struct CacheEntryData { + pub(crate) fd: OwnedFd, + pub(crate) c_file_name: CString, + pub(crate) stat: FileStat, + pub(crate) metadata: Metadata, + pub(crate) payload_offset: PayloadOffset, +} + +pub(crate) enum CacheEntry { + RegEntry(CacheEntryData), + DirEntry(CacheEntryData), + DirEnd, +} + +pub(crate) struct PxarLookaheadCache { + // Current state of the cache + enabled: bool, + // Cached entries + entries: Vec, + // Entries encountered having more than one link given by stat + hardlinks: HashSet, + // Payload range covered by the currently cached entries + range: Range, + // Possible held back last chunk from last flush, used for possible chunk continuation + last_chunk: Option, + // Path when started caching + start_path: PathBuf, + // Number of entries with file descriptors + fd_entries: usize, + // Max number of entries with file descriptors + cache_size: usize, +} + +impl PxarLookaheadCache { + pub(crate) fn new(size: Option) -> Self { + Self { + enabled: false, + entries: Vec::new(), + hardlinks: HashSet::new(), + range: 0..0, + last_chunk: None, + start_path: PathBuf::new(), + fd_entries: 0, + cache_size: size.unwrap_or(DEFAULT_CACHE_SIZE), + } + } + + pub(crate) fn is_full(&self) -> bool { + self.fd_entries >= self.cache_size + } + + pub(crate) fn caching_enabled(&self) -> bool { + self.enabled + } + + pub(crate) fn insert( + &mut self, + fd: OwnedFd, + c_file_name: CString, + stat: FileStat, + metadata: Metadata, + payload_offset: PayloadOffset, + path: PathBuf, + ) { + if !self.enabled { + self.start_path = path; + if !metadata.is_dir() { + self.start_path.pop(); + } + } + self.enabled = true; + self.fd_entries += 1; + if metadata.is_dir() { + self.entries.push(CacheEntry::DirEntry(CacheEntryData { + fd, + c_file_name, + stat, + metadata, + payload_offset, + })) + } else { + self.entries.push(CacheEntry::RegEntry(CacheEntryData { + fd, + c_file_name, + stat, + metadata, + payload_offset, + })) + } + } + + pub(crate) fn insert_dir_end(&mut self) { + self.entries.push(CacheEntry::DirEnd); + } + + pub(crate) fn take_and_reset(&mut self) -> (Vec, PathBuf) { + self.fd_entries = 0; + self.enabled = false; + // keep end for possible continuation if cache has been cleared because + // it was full, but further caching would be fine + self.range = self.range.end..self.range.end; + ( + std::mem::take(&mut self.entries), + std::mem::take(&mut self.start_path), + ) + } + + pub(crate) fn contains_hardlink(&self, info: &HardLinkInfo) -> bool { + self.hardlinks.contains(info) + } + + pub(crate) fn insert_hardlink(&mut self, info: HardLinkInfo) -> bool { + self.hardlinks.insert(info) + } + + pub(crate) fn range(&self) -> &Range { + &self.range + } + + pub(crate) fn update_range(&mut self, range: Range) { + self.range = range; + } + + pub(crate) fn try_extend_range(&mut self, range: Range) -> bool { + if self.range.end == 0 { + // initialize first range to start and end with start of new range + self.range.start = range.start; + self.range.end = range.start; + } + + // range continued, update end + if self.range.end == range.start { + self.range.end = range.end; + return true; + } + + false + } + + pub(crate) fn take_last_chunk(&mut self) -> Option { + self.last_chunk.take() + } + + pub(crate) fn update_last_chunk(&mut self, chunk: Option) { + self.last_chunk = chunk; + } +} diff --git a/pbs-client/src/pxar/metadata.rs b/pbs-client/src/pxar/metadata.rs index 745785bf0..8e7a14312 100644 --- a/pbs-client/src/pxar/metadata.rs +++ b/pbs-client/src/pxar/metadata.rs @@ -188,7 +188,7 @@ fn add_fcaps( c_result!(unsafe { libc::setxattr( c_proc_path, - xattr::xattr_name_fcaps().as_ptr(), + xattr::XATTR_NAME_FCAPS.as_ptr(), fcaps.data.as_ptr() as *const libc::c_void, fcaps.data.len(), 0, diff --git a/pbs-client/src/pxar/mod.rs b/pbs-client/src/pxar/mod.rs index 14674b9b9..334759df6 100644 --- a/pbs-client/src/pxar/mod.rs +++ b/pbs-client/src/pxar/mod.rs @@ -50,13 +50,16 @@ pub(crate) mod create; pub(crate) mod dir_stack; pub(crate) mod extract; +pub(crate) mod look_ahead_cache; pub(crate) mod metadata; pub(crate) mod tools; mod flags; pub use flags::Flags; -pub use create::{create_archive, PxarCreateOptions}; +pub use create::{ + create_archive, MetadataArchiveReader, PxarCreateOptions, PxarPrevRef, PxarWriters, +}; pub use extract::{ create_tar, create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler, OverwriteFlags, PxarExtractContext, PxarExtractOptions, diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs index 0cfbaf5b9..27e5185a3 100644 --- a/pbs-client/src/pxar/tools.rs +++ b/pbs-client/src/pxar/tools.rs @@ -128,25 +128,42 @@ pub fn format_single_line_entry(entry: &Entry) -> String { let meta = entry.metadata(); - let (size, link) = match entry.kind() { - EntryKind::File { size, .. } => (format!("{}", *size), String::new()), - EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())), - EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())), - EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()), - _ => ("0".to_string(), String::new()), + let (size, link, payload_offset) = match entry.kind() { + EntryKind::File { + size, + payload_offset, + .. + } => (format!("{}", *size), String::new(), *payload_offset), + EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str()), None), + EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str()), None), + EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new(), None), + _ => ("0".to_string(), String::new(), None), }; let owner_string = format!("{}/{}", meta.stat.uid, meta.stat.gid); - format!( - "{} {:<13} {} {:>8} {:?}{}", - mode_string, - owner_string, - format_mtime(&meta.stat.mtime), - size, - entry.path(), - link, - ) + if let Some(offset) = payload_offset { + format!( + "{} {:<13} {} {:>8} {:?}{} {}", + mode_string, + owner_string, + format_mtime(&meta.stat.mtime), + size, + entry.path(), + link, + offset, + ) + } else { + format!( + "{} {:<13} {} {:>8} {:?}{}", + mode_string, + owner_string, + format_mtime(&meta.stat.mtime), + size, + entry.path(), + link, + ) + } } pub fn format_multi_line_entry(entry: &Entry) -> String { @@ -154,17 +171,30 @@ pub fn format_multi_line_entry(entry: &Entry) -> String { let meta = entry.metadata(); - let (size, link, type_name) = match entry.kind() { - EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"), + let (size, link, type_name, payload_offset) = match entry.kind() { + EntryKind::Version(version) => (format!("{version:?}"), String::new(), "version", None), + EntryKind::Prelude(prelude) => ( + "0".to_string(), + format!("raw data: {:?} bytes", prelude.data.len()), + "prelude", + None, + ), + EntryKind::File { + size, + payload_offset, + .. + } => (format!("{}", *size), String::new(), "file", *payload_offset), EntryKind::Symlink(link) => ( "0".to_string(), format!(" -> {:?}", link.as_os_str()), "symlink", + None, ), EntryKind::Hardlink(link) => ( "0".to_string(), format!(" -> {:?}", link.as_os_str()), "symlink", + None, ), EntryKind::Device(dev) => ( format!("{},{}", dev.major, dev.minor), @@ -176,11 +206,12 @@ pub fn format_multi_line_entry(entry: &Entry) -> String { } else { "device" }, + None, ), - EntryKind::Socket => ("0".to_string(), String::new(), "socket"), - EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"), - EntryKind::Directory => ("0".to_string(), String::new(), "directory"), - EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"), + EntryKind::Socket => ("0".to_string(), String::new(), "socket", None), + EntryKind::Fifo => ("0".to_string(), String::new(), "fifo", None), + EntryKind::Directory => ("0".to_string(), String::new(), "directory", None), + EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry", None), }; let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) { @@ -188,19 +219,39 @@ pub fn format_multi_line_entry(entry: &Entry) -> String { Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())), }; - format!( - " File: {}{}\n \ - Size: {:<13} Type: {}\n\ - Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\ - Modify: {}\n", - file_name, - link, - size, - type_name, - meta.file_mode(), - mode_string, - meta.stat.uid, - meta.stat.gid, - format_mtime(&meta.stat.mtime), - ) + if let Some(offset) = payload_offset { + format!( + " File: {}{}\n \ + Size: {:<13} Type: {}\n\ + Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\ + Modify: {}\n + PayloadOffset: {}\n", + file_name, + link, + size, + type_name, + meta.file_mode(), + mode_string, + meta.stat.uid, + meta.stat.gid, + format_mtime(&meta.stat.mtime), + offset, + ) + } else { + format!( + " File: {}{}\n \ + Size: {:<13} Type: {}\n\ + Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\ + Modify: {}\n", + file_name, + link, + size, + type_name, + meta.file_mode(), + mode_string, + meta.stat.uid, + meta.stat.gid, + format_mtime(&meta.stat.mtime), + ) + } } diff --git a/pbs-client/src/pxar_backup_stream.rs b/pbs-client/src/pxar_backup_stream.rs index 22a6ffdc2..4370da6cc 100644 --- a/pbs-client/src/pxar_backup_stream.rs +++ b/pbs-client/src/pxar_backup_stream.rs @@ -2,10 +2,10 @@ use std::io::Write; //use std::os::unix::io::FromRawFd; use std::path::Path; use std::pin::Pin; -use std::sync::{Arc, Mutex}; +use std::sync::{mpsc, Arc, Mutex}; use std::task::{Context, Poll}; -use anyhow::{format_err, Error}; +use anyhow::Error; use futures::future::{AbortHandle, Abortable}; use futures::stream::Stream; use nix::dir::Dir; @@ -15,7 +15,10 @@ use nix::sys::stat::Mode; use proxmox_async::blocking::TokioWriterAdapter; use proxmox_io::StdChannelWriter; -use pbs_datastore::catalog::CatalogWriter; +use pbs_datastore::catalog::{BackupCatalogWriter, CatalogWriter}; + +use crate::inject_reused_chunks::InjectChunks; +use crate::pxar::create::PxarWriters; /// Stream implementation to encode and upload .pxar archives. /// @@ -24,8 +27,9 @@ use pbs_datastore::catalog::CatalogWriter; /// consumer. pub struct PxarBackupStream { rx: Option, Error>>>, + pub suggested_boundaries: Option>, handle: Option, - error: Arc>>, + error: Arc>>, } impl Drop for PxarBackupStream { @@ -38,37 +42,63 @@ impl Drop for PxarBackupStream { impl PxarBackupStream { pub fn new( dir: Dir, - catalog: Arc>>, + catalog: Option>>>, options: crate::pxar::PxarCreateOptions, - ) -> Result { - let (tx, rx) = std::sync::mpsc::sync_channel(10); - + boundaries: Option>, + separate_payload_stream: bool, + ) -> Result<(Self, Option), Error> { let buffer_size = 256 * 1024; + let (tx, rx) = std::sync::mpsc::sync_channel(10); + let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity( + buffer_size, + StdChannelWriter::new(tx), + )); + let writer = pxar::encoder::sync::StandardWriter::new(writer); + + let (writer, payload_rx, suggested_boundaries_tx, suggested_boundaries_rx) = + if separate_payload_stream { + let (tx, rx) = std::sync::mpsc::sync_channel(10); + let (suggested_boundaries_tx, suggested_boundaries_rx) = std::sync::mpsc::channel(); + let payload_writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity( + buffer_size, + StdChannelWriter::new(tx), + )); + ( + pxar::PxarVariant::Split( + writer, + pxar::encoder::sync::StandardWriter::new(payload_writer), + ), + Some(rx), + Some(suggested_boundaries_tx), + Some(suggested_boundaries_rx), + ) + } else { + (pxar::PxarVariant::Unified(writer), None, None, None) + }; + let error = Arc::new(Mutex::new(None)); let error2 = Arc::clone(&error); let handler = async move { - let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity( - buffer_size, - StdChannelWriter::new(tx), - )); - - let writer = pxar::encoder::sync::StandardWriter::new(writer); if let Err(err) = crate::pxar::create_archive( dir, - writer, + PxarWriters::new( + writer, + catalog.map(|c| c as Arc>), + ), crate::pxar::Flags::DEFAULT, move |path| { log::debug!("{:?}", path); Ok(()) }, - Some(catalog), options, + boundaries, + suggested_boundaries_tx, ) .await { let mut error = error2.lock().unwrap(); - *error = Some(err.to_string()); + *error = Some(err); } }; @@ -76,21 +106,33 @@ impl PxarBackupStream { let future = Abortable::new(handler, registration); tokio::spawn(future); - Ok(Self { + let backup_stream = Self { rx: Some(rx), + suggested_boundaries: None, + handle: Some(handle.clone()), + error: Arc::clone(&error), + }; + + let backup_payload_stream = payload_rx.map(|rx| Self { + rx: Some(rx), + suggested_boundaries: suggested_boundaries_rx, handle: Some(handle), error, - }) + }); + + Ok((backup_stream, backup_payload_stream)) } pub fn open( dirname: &Path, - catalog: Arc>>, + catalog: Option>>>, options: crate::pxar::PxarCreateOptions, - ) -> Result { + boundaries: Option>, + separate_payload_stream: bool, + ) -> Result<(Self, Option), Error> { let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?; - Self::new(dir, catalog, options) + Self::new(dir, catalog, options, boundaries, separate_payload_stream) } } @@ -100,18 +142,18 @@ impl Stream for PxarBackupStream { fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { { // limit lock scope - let error = self.error.lock().unwrap(); - if let Some(ref msg) = *error { - return Poll::Ready(Some(Err(format_err!("{}", msg)))); + let mut error = self.error.lock().unwrap(); + if let Some(err) = error.take() { + return Poll::Ready(Some(Err(err))); } } match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) { Ok(data) => Poll::Ready(Some(data)), Err(_) => { - let error = self.error.lock().unwrap(); - if let Some(ref msg) = *error { - return Poll::Ready(Some(Err(format_err!("{}", msg)))); + let mut error = self.error.lock().unwrap(); + if let Some(err) = error.take() { + return Poll::Ready(Some(Err(err))); } Poll::Ready(None) // channel closed, no error } diff --git a/pbs-client/src/tools/key_source.rs b/pbs-client/src/tools/key_source.rs index 2c15423fe..c039de26f 100644 --- a/pbs-client/src/tools/key_source.rs +++ b/pbs-client/src/tools/key_source.rs @@ -302,45 +302,43 @@ pub(crate) fn read_optional_default_master_pubkey() -> Result>, Error> = Ok(None); +static TEST_DEFAULT_ENCRYPTION_KEY: std::sync::Mutex>, Error>> = + std::sync::Mutex::new(Ok(None)); #[cfg(test)] pub(crate) fn read_optional_default_encryption_key() -> Result, Error> { // not safe when multiple concurrent test cases end up here! - unsafe { - match &TEST_DEFAULT_ENCRYPTION_KEY { - Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))), - Ok(None) => Ok(None), - Err(_) => bail!("test error"), - } + match &*TEST_DEFAULT_ENCRYPTION_KEY.lock().unwrap() { + Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))), + Ok(None) => Ok(None), + Err(_) => bail!("test error"), } } #[cfg(test)] // not safe when multiple concurrent test cases end up here! pub(crate) unsafe fn set_test_encryption_key(value: Result>, Error>) { - TEST_DEFAULT_ENCRYPTION_KEY = value; + *TEST_DEFAULT_ENCRYPTION_KEY.lock().unwrap() = value; } #[cfg(test)] -static mut TEST_DEFAULT_MASTER_PUBKEY: Result>, Error> = Ok(None); +static TEST_DEFAULT_MASTER_PUBKEY: std::sync::Mutex>, Error>> = + std::sync::Mutex::new(Ok(None)); #[cfg(test)] pub(crate) fn read_optional_default_master_pubkey() -> Result, Error> { // not safe when multiple concurrent test cases end up here! - unsafe { - match &TEST_DEFAULT_MASTER_PUBKEY { - Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))), - Ok(None) => Ok(None), - Err(_) => bail!("test error"), - } + match &*TEST_DEFAULT_MASTER_PUBKEY.lock().unwrap() { + Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))), + Ok(None) => Ok(None), + Err(_) => bail!("test error"), } } #[cfg(test)] // not safe when multiple concurrent test cases end up here! pub(crate) unsafe fn set_test_default_master_pubkey(value: Result>, Error>) { - TEST_DEFAULT_MASTER_PUBKEY = value; + *TEST_DEFAULT_MASTER_PUBKEY.lock().unwrap() = value; } pub fn get_encryption_key_password() -> Result, Error> { diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 1b0123a39..772cc1263 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -1,10 +1,15 @@ //! Shared tools useful for common CLI clients. use std::collections::HashMap; use std::env::VarError::{NotPresent, NotUnicode}; +use std::ffi::OsStr; use std::fs::File; use std::io::{BufRead, BufReader}; +use std::os::unix::ffi::OsStrExt; +use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::FromRawFd; +use std::path::PathBuf; use std::process::Command; +use std::sync::OnceLock; use anyhow::{bail, format_err, Context, Error}; use serde_json::{json, Value}; @@ -16,6 +21,12 @@ use proxmox_schema::*; use proxmox_sys::fs::file_get_json; use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL}; +use pbs_datastore::catalog::{ArchiveEntry, DirEntryAttribute}; +use pbs_datastore::BackupManifest; +use pxar::accessor::aio::Accessor; +use pxar::accessor::ReadAt; +use pxar::format::SignedDuration; +use pxar::{mode, EntryKind}; use crate::{BackupRepository, HttpClient, HttpClientOptions}; @@ -117,6 +128,23 @@ pub fn get_default_repository() -> Option { std::env::var("PBS_REPOSITORY").ok() } +pub fn remove_repository_from_value(param: &mut Value) -> Result { + if let Some(url) = param + .as_object_mut() + .ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))? + .remove("repository") + { + return url + .as_str() + .ok_or_else(|| format_err!("invalid repository value (must be a string)"))? + .parse(); + } + + get_default_repository() + .ok_or_else(|| format_err!("unable to get default repository"))? + .parse() +} + pub fn extract_repository_from_value(param: &Value) -> Result { let repo_url = param["repository"] .as_str() @@ -337,7 +365,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap) -> complete_server_file_name(arg, param) .iter() .filter_map(|name| { - if name.ends_with(".pxar.didx") { + if has_pxar_filename_extension(name, true) { Some(pbs_tools::format::strip_server_file_extension(name).to_owned()) } else { None @@ -526,3 +554,211 @@ pub fn place_xdg_file( .and_then(|base| base.place_config_file(file_name).map_err(Error::from)) .with_context(|| format!("failed to place {} in xdg home", description)) } + +pub fn get_pxar_archive_names( + archive_name: &str, + manifest: &BackupManifest, +) -> Result<(String, Option), Error> { + let (filename, ext) = match archive_name.strip_suffix(".didx") { + Some(filename) => (filename, ".didx"), + None => (archive_name, ""), + }; + + // Check if archive with given extension is present + if manifest + .files() + .iter() + .any(|fileinfo| fileinfo.filename == format!("{filename}.didx")) + { + // check if already given as one of split archive name variants + if let Some(base) = filename + .strip_suffix(".mpxar") + .or_else(|| filename.strip_suffix(".ppxar")) + { + return Ok(( + format!("{base}.mpxar{ext}"), + Some(format!("{base}.ppxar{ext}")), + )); + } + return Ok((archive_name.to_owned(), None)); + } + + // if not, try fallback from regular to split archive + if let Some(base) = filename.strip_suffix(".pxar") { + return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest); + } + + bail!("archive not found in manifest"); +} + +/// Check if the given filename has a valid pxar filename extension variant +/// +/// If `with_didx_extension` is `true`, check the additional `.didx` ending. +pub fn has_pxar_filename_extension(name: &str, with_didx_extension: bool) -> bool { + if with_didx_extension { + name.ends_with(".pxar.didx") + || name.ends_with(".mpxar.didx") + || name.ends_with(".ppxar.didx") + } else { + name.ends_with(".pxar") || name.ends_with(".mpxar") || name.ends_with(".ppxar") + } +} + +/// Decode possible format version and prelude entries before getting the root directory +/// entry. +/// +/// Returns the root directory entry and, if present, the prelude entry +pub fn handle_root_with_optional_format_version_prelude( + decoder: &mut pxar::decoder::sync::Decoder, +) -> Result<(pxar::Entry, Option), Error> { + let first = decoder + .next() + .ok_or_else(|| format_err!("missing root entry"))??; + match first.kind() { + pxar::EntryKind::Directory => { + let version = pxar::format::FormatVersion::Version1; + log::debug!("pxar format version '{version:?}'"); + Ok((first, None)) + } + pxar::EntryKind::Version(version) => { + log::debug!("pxar format version '{version:?}'"); + let second = decoder + .next() + .ok_or_else(|| format_err!("missing root entry"))??; + match second.kind() { + pxar::EntryKind::Directory => Ok((second, None)), + pxar::EntryKind::Prelude(_prelude) => { + let third = decoder + .next() + .ok_or_else(|| format_err!("missing root entry"))??; + Ok((third, Some(second))) + } + _ => bail!("unexpected entry kind {:?}", second.kind()), + } + } + _ => bail!("unexpected entry kind {:?}", first.kind()), + } +} + +/// Raise the soft limit for open file handles to the hard limit +/// +/// Returns the values set before raising the limit as libc::rlimit64 +pub fn raise_nofile_limit() -> Result { + let mut old = libc::rlimit64 { + rlim_cur: 0, + rlim_max: 0, + }; + if 0 != unsafe { libc::getrlimit64(libc::RLIMIT_NOFILE, &mut old as *mut libc::rlimit64) } { + bail!("Failed to get nofile rlimit"); + } + + let mut new = libc::rlimit64 { + rlim_cur: old.rlim_max, + rlim_max: old.rlim_max, + }; + if 0 != unsafe { libc::setrlimit64(libc::RLIMIT_NOFILE, &mut new as *mut libc::rlimit64) } { + bail!("Failed to set nofile rlimit"); + } + + Ok(old) +} + +/// Look up the directory entries of the given directory `path` in a pxar archive via it's given +/// `accessor` and return the entries formatted as [`ArchiveEntry`]'s, compatible with reading +/// entries from the catalog. +/// +/// If the optional `path_prefix` is given, all returned entry paths will be prefixed with it. +pub async fn pxar_metadata_catalog_lookup( + accessor: Accessor, + path: &OsStr, + path_prefix: Option<&str>, +) -> Result, Error> { + let root = accessor.open_root().await?; + let dir_entry = root + .lookup(&path) + .await + .map_err(|err| format_err!("lookup failed - {err}"))? + .ok_or_else(|| format_err!("lookup failed - error opening '{path:?}'"))?; + + let mut entries = Vec::new(); + if let EntryKind::Directory = dir_entry.kind() { + let dir_entry = dir_entry + .enter_directory() + .await + .map_err(|err| format_err!("failed to enter directory - {err}"))?; + + let mut entries_iter = dir_entry.read_dir(); + while let Some(entry) = entries_iter.next().await { + let entry = entry?.decode_entry().await?; + + let entry_attr = match entry.kind() { + EntryKind::Version(_) | EntryKind::Prelude(_) | EntryKind::GoodbyeTable => continue, + EntryKind::Directory => DirEntryAttribute::Directory { + start: entry.entry_range_info().entry_range.start, + }, + EntryKind::File { size, .. } => { + let mtime = match entry.metadata().mtime_as_duration() { + SignedDuration::Positive(val) => i64::try_from(val.as_secs())?, + SignedDuration::Negative(val) => -i64::try_from(val.as_secs())?, + }; + DirEntryAttribute::File { size: *size, mtime } + } + EntryKind::Device(_) => match entry.metadata().file_type() { + mode::IFBLK => DirEntryAttribute::BlockDevice, + mode::IFCHR => DirEntryAttribute::CharDevice, + _ => bail!("encountered unknown device type"), + }, + EntryKind::Symlink(_) => DirEntryAttribute::Symlink, + EntryKind::Hardlink(_) => DirEntryAttribute::Hardlink, + EntryKind::Fifo => DirEntryAttribute::Fifo, + EntryKind::Socket => DirEntryAttribute::Socket, + }; + + let entry_path = if let Some(prefix) = path_prefix { + let mut entry_path = PathBuf::from(prefix); + match entry.path().strip_prefix("/") { + Ok(path) => entry_path.push(path), + Err(_) => entry_path.push(entry.path()), + } + entry_path + } else { + PathBuf::from(entry.path()) + }; + entries.push(ArchiveEntry::new( + entry_path.as_os_str().as_bytes(), + Some(&entry_attr), + )); + } + } else { + bail!(format!( + "expected directory entry, got entry kind '{:?}'", + dir_entry.kind() + )); + } + + Ok(entries) +} + +/// Creates a temporary file (with `O_TMPFILE`) in `XDG_CACHE_HOME`. If we +/// cannot create the file there it will be created in `/tmp` instead. +pub fn create_tmp_file() -> std::io::Result { + static TMP_PATH: OnceLock = OnceLock::new(); + let tmp_path = TMP_PATH.get_or_init(|| { + xdg::BaseDirectories::new() + .map(|base| base.get_cache_home()) + .unwrap_or_else(|_| std::path::PathBuf::from("/tmp")) + }); + + let mut open_opts_binding = std::fs::OpenOptions::new(); + let builder = open_opts_binding + .write(true) + .read(true) + .custom_flags(libc::O_TMPFILE); + builder.open(tmp_path).or_else(|err| { + if tmp_path != std::path::Path::new("/tmp") { + builder.open("/tmp") + } else { + Err(err) + } + }) +} diff --git a/pbs-config/Cargo.toml b/pbs-config/Cargo.toml index d11cd41ee..12d0eb3da 100644 --- a/pbs-config/Cargo.toml +++ b/pbs-config/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Configuration file management for PBS" +rust-version.workspace = true [dependencies] anyhow.workspace = true const_format.workspace = true -lazy_static.workspace = true libc.workspace = true nix.workspace = true once_cell.workspace = true @@ -17,6 +17,7 @@ regex.workspace = true serde.workspace = true serde_json.workspace = true +proxmox-notify.workspace = true proxmox-router = { workspace = true, default-features = false } proxmox-schema.workspace = true proxmox-section-config.workspace = true diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs index a0354a053..4ce4c13c0 100644 --- a/pbs-config/src/acl.rs +++ b/pbs-config/src/acl.rs @@ -2,37 +2,36 @@ use std::collections::{BTreeMap, BTreeSet, HashMap}; use std::io::Write; use std::path::{Path, PathBuf}; use std::str::FromStr; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, LazyLock, RwLock}; use anyhow::{bail, Error}; -use lazy_static::lazy_static; - use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema}; use pbs_api_types::{Authid, Role, Userid, ROLE_NAME_NO_ACCESS}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - /// Map of pre-defined [Roles](Role) to their associated [privileges](PRIVILEGES) combination - /// and description. - pub static ref ROLE_NAMES: HashMap<&'static str, (u64, &'static str)> = { - let mut map = HashMap::new(); +/// Map of pre-defined [Roles](Role) to their associated [privileges](PRIVILEGES) combination +/// and description. +pub static ROLE_NAMES: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); - let list = match Role::API_SCHEMA { - Schema::String(StringSchema { format: Some(ApiStringFormat::Enum(list)), .. }) => list, - _ => unreachable!(), - }; - - for entry in list.iter() { - let privs: u64 = Role::from_str(entry.value).unwrap() as u64; - map.insert(entry.value, (privs, entry.description)); - } - - map + let list = match Role::API_SCHEMA { + Schema::String(StringSchema { + format: Some(ApiStringFormat::Enum(list)), + .. + }) => list, + _ => unreachable!(), }; -} + + for entry in list.iter() { + let privs: u64 = Role::from_str(entry.value).unwrap() as u64; + map.insert(entry.value, (privs, entry.description)); + } + + map +}); pub fn split_acl_path(path: &str) -> Vec<&str> { let items = path.split('/'); @@ -100,7 +99,8 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> { return Ok(()); } match components[1] { - "certificates" | "disks" | "log" | "status" | "tasks" | "time" => { + "certificates" | "disks" | "log" | "notifications" | "status" | "tasks" + | "time" => { if components_len == 2 { return Ok(()); } @@ -721,13 +721,13 @@ pub fn cached_config() -> Result, Error> { last_mtime_nsec: i64, } - lazy_static! { - static ref CACHED_CONFIG: RwLock = RwLock::new(ConfigCache { + static CACHED_CONFIG: LazyLock> = LazyLock::new(|| { + RwLock::new(ConfigCache { data: None, last_mtime: 0, - last_mtime_nsec: 0 - }); - } + last_mtime_nsec: 0, + }) + }); let stat = match nix::sys::stat::stat(ACL_CFG_FILENAME) { Ok(stat) => Some(stat), @@ -1046,14 +1046,14 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup let node = tree.find_node(path); assert!(node.is_some()); if let Some(node) = node { - assert!(node.users.get(&user1).is_none()); + assert!(!node.users.contains_key(&user1)); } } for path in &user2_paths { let node = tree.find_node(path); assert!(node.is_some()); if let Some(node) = node { - assert!(node.users.get(&user2).is_some()); + assert!(node.users.contains_key(&user2)); } } @@ -1063,7 +1063,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup let node = tree.find_node(path); assert!(node.is_some()); if let Some(node) = node { - assert!(node.users.get(&user2).is_none()); + assert!(!node.users.contains_key(&user2)); } } diff --git a/pbs-config/src/cached_user_info.rs b/pbs-config/src/cached_user_info.rs index b9534b802..e1cd2d68a 100644 --- a/pbs-config/src/cached_user_info.rs +++ b/pbs-config/src/cached_user_info.rs @@ -1,9 +1,8 @@ //! Cached user info for fast ACL permission checks -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, LazyLock, RwLock}; use anyhow::{bail, Error}; -use lazy_static::lazy_static; use proxmox_router::UserInformation; use proxmox_section_config::SectionConfigData; @@ -26,13 +25,13 @@ struct ConfigCache { last_user_cache_generation: usize, } -lazy_static! { - static ref CACHED_CONFIG: RwLock = RwLock::new(ConfigCache { +static CACHED_CONFIG: LazyLock> = LazyLock::new(|| { + RwLock::new(ConfigCache { data: None, last_update: 0, - last_user_cache_generation: 0 - }); -} + last_user_cache_generation: 0, + }) +}); impl CachedUserInfo { /// Returns a cached instance (up to 5 seconds old). @@ -179,7 +178,7 @@ impl CachedUserInfo { (privs, propagated_privs) } - /// Checks whether the `auth_id` has any of the privilegs `privs` on any object below `path`. + /// Checks whether the `auth_id` has any of the privileges `privs` on any object below `path`. pub fn any_privs_below( &self, auth_id: &Authid, diff --git a/pbs-config/src/datastore.rs b/pbs-config/src/datastore.rs index 5844a174e..dc5bb3da9 100644 --- a/pbs-config/src/datastore.rs +++ b/pbs-config/src/datastore.rs @@ -1,6 +1,7 @@ -use anyhow::Error; -use lazy_static::lazy_static; use std::collections::HashMap; +use std::sync::LazyLock; + +use anyhow::Error; use proxmox_schema::{AllOfSchema, ApiType}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -9,9 +10,7 @@ use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard, ConfigVersionCache}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { const OBJ_SCHEMA: &AllOfSchema = DataStoreConfig::API_SCHEMA.unwrap_all_of_schema(); diff --git a/pbs-config/src/domains.rs b/pbs-config/src/domains.rs index 35aa11d53..32bd967a1 100644 --- a/pbs-config/src/domains.rs +++ b/pbs-config/src/domains.rs @@ -1,20 +1,19 @@ use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use pbs_buildcfg::configdir; use proxmox_schema::{ApiType, ObjectSchema}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -use pbs_api_types::{LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA}; +use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { + const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema(); const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema(); const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema(); @@ -33,6 +32,10 @@ fn init() -> SectionConfig { config.register_plugin(plugin); + let plugin = SectionConfigPlugin::new("ad".to_string(), Some(String::from("realm")), AD_SCHEMA); + + config.register_plugin(plugin); + config } @@ -60,7 +63,7 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> { /// Check if a realm with the given name exists pub fn exists(domains: &SectionConfigData, realm: &str) -> bool { - realm == "pbs" || realm == "pam" || domains.sections.get(realm).is_some() + realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm) } // shell completion helper @@ -95,3 +98,7 @@ pub fn complete_openid_realm_name(_arg: &str, _param: &HashMap) pub fn complete_ldap_realm_name(_arg: &str, _param: &HashMap) -> Vec { complete_realm_of_type("ldap") } + +pub fn complete_ad_realm_name(_arg: &str, _param: &HashMap) -> Vec { + complete_realm_of_type("ad") +} diff --git a/pbs-config/src/drive.rs b/pbs-config/src/drive.rs index 67ffc5541..4e2befd2d 100644 --- a/pbs-config/src/drive.rs +++ b/pbs-config/src/drive.rs @@ -12,9 +12,9 @@ //! [SectionConfig]: proxmox::api::section_config::SectionConfig use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::{bail, Error}; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -23,10 +23,8 @@ use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger, VirtualTapeDrive, DRIVE_NAME_SCHEMA}; -lazy_static! { - /// Static [`SectionConfig`] to access parser/writer functions. - pub static ref CONFIG: SectionConfig = init(); -} +/// Static [`SectionConfig`] to access parser/writer functions. +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let mut config = SectionConfig::new(&DRIVE_NAME_SCHEMA); diff --git a/pbs-config/src/lib.rs b/pbs-config/src/lib.rs index 3cfdb2a11..20a8238da 100644 --- a/pbs-config/src/lib.rs +++ b/pbs-config/src/lib.rs @@ -7,6 +7,7 @@ pub mod drive; pub mod media_pool; pub mod metrics; pub mod network; +pub mod notifications; pub mod prune; pub mod remote; pub mod sync; diff --git a/pbs-config/src/media_pool.rs b/pbs-config/src/media_pool.rs index 3b6448c3c..3bf15188a 100644 --- a/pbs-config/src/media_pool.rs +++ b/pbs-config/src/media_pool.rs @@ -7,9 +7,9 @@ //! [SectionConfig]: proxmox_section_config::SectionConfig use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -18,10 +18,8 @@ use pbs_api_types::{MediaPoolConfig, MEDIA_POOL_NAME_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - /// Static [`SectionConfig`] to access parser/writer functions. - pub static ref CONFIG: SectionConfig = init(); -} +/// Static [`SectionConfig`] to access parser/writer functions. +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let mut config = SectionConfig::new(&MEDIA_POOL_NAME_SCHEMA); diff --git a/pbs-config/src/metrics.rs b/pbs-config/src/metrics.rs index 78e683e32..1b93f70c9 100644 --- a/pbs-config/src/metrics.rs +++ b/pbs-config/src/metrics.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -10,9 +10,7 @@ use pbs_api_types::{InfluxDbHttp, InfluxDbUdp, METRIC_SERVER_ID_SCHEMA}; use crate::{open_backup_lockfile, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let mut config = SectionConfig::new(&METRIC_SERVER_ID_SCHEMA); diff --git a/pbs-config/src/network/helper.rs b/pbs-config/src/network/helper.rs index 9e195d713..87a0e24f9 100644 --- a/pbs-config/src/network/helper.rs +++ b/pbs-config/src/network/helper.rs @@ -2,10 +2,10 @@ use std::collections::HashMap; use std::os::unix::io::{AsRawFd, FromRawFd, OwnedFd}; use std::path::Path; use std::process::Command; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; use const_format::concatcp; -use lazy_static::lazy_static; use nix::ioctl_read_bad; use nix::sys::socket::{socket, AddressFamily, SockFlag, SockType}; use regex::Regex; @@ -48,16 +48,14 @@ pub static IPV4_REVERSE_MASK: &[&str] = &[ "255.255.255.255", ]; -lazy_static! { - pub static ref IPV4_MASK_HASH_LOCALNET: HashMap<&'static str, u8> = { - let mut map = HashMap::new(); - #[allow(clippy::needless_range_loop)] - for i in 0..IPV4_REVERSE_MASK.len() { - map.insert(IPV4_REVERSE_MASK[i], i as u8); - } - map - }; -} +pub static IPV4_MASK_HASH_LOCALNET: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); + #[allow(clippy::needless_range_loop)] + for i in 0..IPV4_REVERSE_MASK.len() { + map.insert(IPV4_REVERSE_MASK[i], i as u8); + } + map +}); pub fn parse_cidr(cidr: &str) -> Result<(String, u8, bool), Error> { let (address, mask, is_v6) = parse_address_or_cidr(cidr)?; @@ -92,12 +90,10 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> { pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option, bool), Error> { // NOTE: This is NOT the same regex as in proxmox-schema as this one has capture groups for // the addresses vs cidr portions! - lazy_static! { - pub static ref CIDR_V4_REGEX: Regex = - Regex::new(concatcp!(r"^(", IPV4RE_STR, r")(?:/(\d{1,2}))?$")).unwrap(); - pub static ref CIDR_V6_REGEX: Regex = - Regex::new(concatcp!(r"^(", IPV6RE_STR, r")(?:/(\d{1,3}))?$")).unwrap(); - } + pub static CIDR_V4_REGEX: LazyLock = + LazyLock::new(|| Regex::new(concatcp!(r"^(", IPV4RE_STR, r")(?:/(\d{1,2}))?$")).unwrap()); + pub static CIDR_V6_REGEX: LazyLock = + LazyLock::new(|| Regex::new(concatcp!(r"^(", IPV6RE_STR, r")(?:/(\d{1,3}))?$")).unwrap()); if let Some(caps) = CIDR_V4_REGEX.captures(cidr) { let address = &caps[1]; @@ -133,9 +129,9 @@ pub fn get_network_interfaces() -> Result, Error> { ioctl_read_bad!(get_interface_flags, libc::SIOCGIFFLAGS, ifreq); - lazy_static! { - static ref IFACE_LINE_REGEX: Regex = Regex::new(r"^\s*([^:\s]+):").unwrap(); - } + static IFACE_LINE_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^\s*([^:\s]+):").unwrap()); + let raw = std::fs::read_to_string(PROC_NET_DEV) .map_err(|err| format_err!("unable to read {} - {}", PROC_NET_DEV, err))?; diff --git a/pbs-config/src/network/lexer.rs b/pbs-config/src/network/lexer.rs index fd23e3d8c..6a20f009a 100644 --- a/pbs-config/src/network/lexer.rs +++ b/pbs-config/src/network/lexer.rs @@ -1,8 +1,7 @@ use std::collections::{HashMap, VecDeque}; use std::io::BufRead; use std::iter::Iterator; - -use lazy_static::lazy_static; +use std::sync::LazyLock; #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum Token { @@ -24,6 +23,8 @@ pub enum Token { MTU, BridgePorts, BridgeVlanAware, + VlanId, + VlanRawDevice, BondSlaves, BondMode, BondPrimary, @@ -31,35 +32,37 @@ pub enum Token { EOF, } -lazy_static! { - static ref KEYWORDS: HashMap<&'static str, Token> = { - let mut map = HashMap::new(); - map.insert("address", Token::Address); - map.insert("auto", Token::Auto); - map.insert("dhcp", Token::DHCP); - map.insert("gateway", Token::Gateway); - map.insert("inet", Token::Inet); - map.insert("inet6", Token::Inet6); - map.insert("iface", Token::Iface); - map.insert("loopback", Token::Loopback); - map.insert("manual", Token::Manual); - map.insert("netmask", Token::Netmask); - map.insert("static", Token::Static); - map.insert("mtu", Token::MTU); - map.insert("bridge-ports", Token::BridgePorts); - map.insert("bridge_ports", Token::BridgePorts); - map.insert("bridge-vlan-aware", Token::BridgeVlanAware); - map.insert("bridge_vlan_aware", Token::BridgeVlanAware); - map.insert("bond-slaves", Token::BondSlaves); - map.insert("bond_slaves", Token::BondSlaves); - map.insert("bond-mode", Token::BondMode); - map.insert("bond-primary", Token::BondPrimary); - map.insert("bond_primary", Token::BondPrimary); - map.insert("bond_xmit_hash_policy", Token::BondXmitHashPolicy); - map.insert("bond-xmit-hash-policy", Token::BondXmitHashPolicy); - map - }; -} +static KEYWORDS: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); + map.insert("address", Token::Address); + map.insert("auto", Token::Auto); + map.insert("dhcp", Token::DHCP); + map.insert("gateway", Token::Gateway); + map.insert("inet", Token::Inet); + map.insert("inet6", Token::Inet6); + map.insert("iface", Token::Iface); + map.insert("loopback", Token::Loopback); + map.insert("manual", Token::Manual); + map.insert("netmask", Token::Netmask); + map.insert("static", Token::Static); + map.insert("mtu", Token::MTU); + map.insert("bridge-ports", Token::BridgePorts); + map.insert("bridge_ports", Token::BridgePorts); + map.insert("bridge-vlan-aware", Token::BridgeVlanAware); + map.insert("bridge_vlan_aware", Token::BridgeVlanAware); + map.insert("vlan-id", Token::VlanId); + map.insert("vlan_id", Token::VlanId); + map.insert("vlan-raw-device", Token::VlanRawDevice); + map.insert("vlan_raw_device", Token::VlanRawDevice); + map.insert("bond-slaves", Token::BondSlaves); + map.insert("bond_slaves", Token::BondSlaves); + map.insert("bond-mode", Token::BondMode); + map.insert("bond-primary", Token::BondPrimary); + map.insert("bond_primary", Token::BondPrimary); + map.insert("bond_xmit_hash_policy", Token::BondXmitHashPolicy); + map.insert("bond-xmit-hash-policy", Token::BondXmitHashPolicy); + map +}); pub struct Lexer { input: R, diff --git a/pbs-config/src/network/mod.rs b/pbs-config/src/network/mod.rs index e2008b7c2..21ad99434 100644 --- a/pbs-config/src/network/mod.rs +++ b/pbs-config/src/network/mod.rs @@ -1,8 +1,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::io::Write; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; use regex::Regex; use serde::de::{value, Deserialize, IntoDeserializer}; @@ -23,9 +23,11 @@ use pbs_api_types::{ use crate::{open_backup_lockfile, BackupLockGuard}; -lazy_static! { - static ref PHYSICAL_NIC_REGEX: Regex = Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap(); -} +static PHYSICAL_NIC_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap()); +static VLAN_INTERFACE_REGEX: LazyLock = LazyLock::new(|| { + Regex::new(r"^(?P\S+)\.(?P\d+)|vlan(?P\d+)$").unwrap() +}); pub fn is_physical_nic(iface: &str) -> bool { PHYSICAL_NIC_REGEX.is_match(iface) @@ -41,6 +43,21 @@ pub fn bond_xmit_hash_policy_from_str(s: &str) -> Result Option { + VLAN_INTERFACE_REGEX.captures(iface_name).and_then(|cap| { + cap.name("vlan_id") + .or(cap.name("vlan_id2")) + .and_then(|id| id.as_str().parse::().ok()) + }) +} + +pub fn parse_vlan_raw_device_from_name(iface_name: &str) -> Option<&str> { + VLAN_INTERFACE_REGEX + .captures(iface_name) + .and_then(|cap| cap.name("vlan_raw_device")) + .map(Into::into) +} + // Write attributes not depending on address family fn write_iface_attributes(iface: &Interface, w: &mut dyn Write) -> Result<(), Error> { static EMPTY_LIST: Vec = Vec::new(); @@ -79,6 +96,14 @@ fn write_iface_attributes(iface: &Interface, w: &mut dyn Write) -> Result<(), Er writeln!(w, "\tbond-slaves {}", slaves.join(" "))?; } } + NetworkInterfaceType::Vlan => { + if let Some(vlan_id) = iface.vlan_id { + writeln!(w, "\tvlan-id {vlan_id}")?; + } + if let Some(vlan_raw_device) = &iface.vlan_raw_device { + writeln!(w, "\tvlan-raw-device {vlan_raw_device}")?; + } + } _ => {} } @@ -243,7 +268,7 @@ impl NetworkConfig { } /// Check if ports are used only once - pub fn check_port_usage(&self) -> Result<(), Error> { + fn check_port_usage(&self) -> Result<(), Error> { let mut used_ports = HashMap::new(); let mut check_port_usage = |iface, ports: &Vec| { for port in ports.iter() { @@ -272,7 +297,7 @@ impl NetworkConfig { } /// Check if child mtu is less or equal than parent mtu - pub fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> { + fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> { let parent = self .interfaces .get(parent_name) @@ -312,7 +337,7 @@ impl NetworkConfig { } /// Check if bond slaves exists - pub fn check_bond_slaves(&self) -> Result<(), Error> { + fn check_bond_slaves(&self) -> Result<(), Error> { for (iface, interface) in self.interfaces.iter() { if let Some(slaves) = &interface.slaves { for slave in slaves.iter() { @@ -340,10 +365,9 @@ impl NetworkConfig { } /// Check if bridge ports exists - pub fn check_bridge_ports(&self) -> Result<(), Error> { - lazy_static! { - static ref VLAN_INTERFACE_REGEX: Regex = Regex::new(r"^(\S+)\.(\d+)$").unwrap(); - } + fn check_bridge_ports(&self) -> Result<(), Error> { + static VLAN_INTERFACE_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^(\S+)\.(\d+)$").unwrap()); for (iface, interface) in self.interfaces.iter() { if let Some(ports) = &interface.bridge_ports { @@ -364,7 +388,7 @@ impl NetworkConfig { Ok(()) } - pub fn write_config(&self, w: &mut dyn Write) -> Result<(), Error> { + fn write_config(&self, w: &mut dyn Write) -> Result<(), Error> { self.check_port_usage()?; self.check_bond_slaves()?; self.check_bridge_ports()?; @@ -505,148 +529,159 @@ pub fn complete_port_list(arg: &str, _param: &HashMap) -> Vec Result<(), Error> { - let input = ""; + fn test_write_network_config_manual() { + let iface_name = String::from("enp3s0"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Eth; + iface.method = Some(Manual); + iface.active = true; - let mut parser = NetworkParser::new(input.as_bytes()); + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; - let config = parser.parse_interfaces(None)?; - - let output = String::try_from(config)?; - - let expected = "auto lo\niface lo inet loopback\n\n"; - assert_eq!(output, expected); - - // run again using output as input - let mut parser = NetworkParser::new(output.as_bytes()); - - let config = parser.parse_interfaces(None)?; - - let output = String::try_from(config)?; - - assert_eq!(output, expected); - - Ok(()) + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + r#"iface enp3s0 inet manual"# + ); } #[test] - fn test_network_config_create_lo_2() -> Result<(), Error> { - let input = "#c1\n\n#c2\n\niface test inet manual\n"; + fn test_write_network_config_static() { + let iface_name = String::from("enp3s0"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Eth; + iface.method = Some(Static); + iface.cidr = Some(String::from("10.0.0.100/16")); + iface.active = true; - let mut parser = NetworkParser::new(input.as_bytes()); - - let config = parser.parse_interfaces(None)?; - - let output = String::try_from(config)?; - - // Note: loopback should be added in front of other interfaces - let expected = "#c1\n#c2\n\nauto lo\niface lo inet loopback\n\niface test inet manual\n\n"; - assert_eq!(output, expected); - - Ok(()) + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + r#" +iface enp3s0 inet static + address 10.0.0.100/16"# + .to_string() + .trim() + ); } #[test] - fn test_network_config_parser_no_blank_1() -> Result<(), Error> { - let input = "auto lo\n\ - iface lo inet loopback\n\ - iface lo inet6 loopback\n\ - auto ens18\n\ - iface ens18 inet static\n\ - \taddress 192.168.20.144/20\n\ - \tgateway 192.168.16.1\n\ - # comment\n\ - iface ens20 inet static\n\ - \taddress 192.168.20.145/20\n\ - iface ens21 inet manual\n\ - iface ens22 inet manual\n"; + fn test_write_network_config_static_with_gateway() { + let iface_name = String::from("enp3s0"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Eth; + iface.method = Some(Static); + iface.cidr = Some(String::from("10.0.0.100/16")); + iface.gateway = Some(String::from("10.0.0.1")); + iface.active = true; - let mut parser = NetworkParser::new(input.as_bytes()); - - let config = parser.parse_interfaces(None)?; - - let output = String::try_from(config)?; - - let expected = "auto lo\n\ - iface lo inet loopback\n\ - \n\ - iface lo inet6 loopback\n\ - \n\ - auto ens18\n\ - iface ens18 inet static\n\ - \taddress 192.168.20.144/20\n\ - \tgateway 192.168.16.1\n\ - #comment\n\ - \n\ - iface ens20 inet static\n\ - \taddress 192.168.20.145/20\n\ - \n\ - iface ens21 inet manual\n\ - \n\ - iface ens22 inet manual\n\ - \n"; - assert_eq!(output, expected); - - Ok(()) + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + r#" +iface enp3s0 inet static + address 10.0.0.100/16 + gateway 10.0.0.1"# + .to_string() + .trim() + ); } #[test] - fn test_network_config_parser_no_blank_2() -> Result<(), Error> { - // Adapted from bug 2926 - let input = "### Hetzner Online GmbH installimage\n\ - \n\ - source /etc/network/interfaces.d/*\n\ - \n\ - auto lo\n\ - iface lo inet loopback\n\ - iface lo inet6 loopback\n\ - \n\ - auto enp4s0\n\ - iface enp4s0 inet static\n\ - \taddress 10.10.10.10/24\n\ - \tgateway 10.10.10.1\n\ - \t# route 10.10.20.10/24 via 10.10.20.1\n\ - \tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\ - \n\ - iface enp4s0 inet6 static\n\ - \taddress fe80::5496:35ff:fe99:5a6a/64\n\ - \tgateway fe80::1\n"; + fn test_write_network_config_vlan_id_in_name() { + let iface_name = String::from("vmbr0.100"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Vlan; + iface.method = Some(Manual); + iface.active = true; - let mut parser = NetworkParser::new(input.as_bytes()); + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + "iface vmbr0.100 inet manual" + ); + } - let config = parser.parse_interfaces(None)?; + #[test] + fn test_write_network_config_vlan_with_raw_device() { + let iface_name = String::from("vlan100"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Vlan; + iface.vlan_raw_device = Some(String::from("vmbr0")); + iface.method = Some(Manual); + iface.active = true; - let output = String::try_from(config)?; + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + r#" +iface vlan100 inet manual + vlan-raw-device vmbr0"# + .trim() + ); + } - let expected = "### Hetzner Online GmbH installimage\n\ - \n\ - source /etc/network/interfaces.d/*\n\ - \n\ - auto lo\n\ - iface lo inet loopback\n\ - \n\ - iface lo inet6 loopback\n\ - \n\ - auto enp4s0\n\ - iface enp4s0 inet static\n\ - \taddress 10.10.10.10/24\n\ - \tgateway 10.10.10.1\n\ - \t# route 10.10.20.10/24 via 10.10.20.1\n\ - \tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\ - \n\ - iface enp4s0 inet6 static\n\ - \taddress fe80::5496:35ff:fe99:5a6a/64\n\ - \tgateway fe80::1\n\ - \n"; - assert_eq!(output, expected); + #[test] + fn test_write_network_config_vlan_with_individual_name() { + let iface_name = String::from("individual_name"); + let mut iface = Interface::new(iface_name.clone()); + iface.interface_type = Vlan; + iface.vlan_raw_device = Some(String::from("vmbr0")); + iface.vlan_id = Some(100); + iface.method = Some(Manual); + iface.active = true; - Ok(()) + let nw_config = NetworkConfig { + interfaces: BTreeMap::from([(iface_name.clone(), iface)]), + order: vec![Iface(iface_name.clone())], + }; + assert_eq!( + String::try_from(nw_config).unwrap().trim(), + r#" +iface individual_name inet manual + vlan-id 100 + vlan-raw-device vmbr0"# + .trim() + ); + } + + #[test] + fn test_vlan_parse_vlan_id_from_name() { + assert_eq!(parse_vlan_id_from_name("vlan100"), Some(100)); + assert_eq!(parse_vlan_id_from_name("vlan"), None); + assert_eq!(parse_vlan_id_from_name("arbitrary"), None); + assert_eq!(parse_vlan_id_from_name("vmbr0.100"), Some(100)); + assert_eq!(parse_vlan_id_from_name("vmbr0"), None); + // assert_eq!(parse_vlan_id_from_name("vmbr0.1.400"), Some(400)); // NOTE ifupdown2 does actually support this + } + + #[test] + fn test_vlan_parse_vlan_raw_device_from_name() { + assert_eq!(parse_vlan_raw_device_from_name("vlan100"), None); + assert_eq!(parse_vlan_raw_device_from_name("arbitrary"), None); + assert_eq!(parse_vlan_raw_device_from_name("vmbr0"), None); + assert_eq!(parse_vlan_raw_device_from_name("vmbr0.200"), Some("vmbr0")); } } diff --git a/pbs-config/src/network/parser.rs b/pbs-config/src/network/parser.rs index ec2c64eb9..a5d05c6ea 100644 --- a/pbs-config/src/network/parser.rs +++ b/pbs-config/src/network/parser.rs @@ -1,9 +1,11 @@ +use crate::network::VLAN_INTERFACE_REGEX; + use std::collections::{HashMap, HashSet}; use std::io::BufRead; use std::iter::{Iterator, Peekable}; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; use regex::Regex; use super::helper::*; @@ -361,6 +363,20 @@ impl NetworkParser { interface.bond_xmit_hash_policy = Some(policy); self.eat(Token::Newline)?; } + Token::VlanId => { + self.eat(Token::VlanId)?; + let vlan_id = self.next_text()?.parse()?; + interface.vlan_id = Some(vlan_id); + set_interface_type(interface, NetworkInterfaceType::Vlan)?; + self.eat(Token::Newline)?; + } + Token::VlanRawDevice => { + self.eat(Token::VlanRawDevice)?; + let vlan_raw_device = self.next_text()?; + interface.vlan_raw_device = Some(vlan_raw_device); + set_interface_type(interface, NetworkInterfaceType::Vlan)?; + self.eat(Token::Newline)?; + } _ => { // parse addon attributes let option = self.parse_to_eol()?; @@ -473,11 +489,11 @@ impl NetworkParser { &mut self, existing_interfaces: Option<&HashMap>, ) -> Result { - self._parse_interfaces(existing_interfaces) + self.do_parse_interfaces(existing_interfaces) .map_err(|err| format_err!("line {}: {}", self.line_nr, err)) } - pub fn _parse_interfaces( + fn do_parse_interfaces( &mut self, existing_interfaces: Option<&HashMap>, ) -> Result { @@ -520,10 +536,8 @@ impl NetworkParser { } } - lazy_static! { - static ref INTERFACE_ALIAS_REGEX: Regex = Regex::new(r"^\S+:\d+$").unwrap(); - static ref VLAN_INTERFACE_REGEX: Regex = Regex::new(r"^\S+\.\d+$").unwrap(); - } + static INTERFACE_ALIAS_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^\S+:\d+$").unwrap()); if let Some(existing_interfaces) = existing_interfaces { for (iface, active) in existing_interfaces.iter() { @@ -570,7 +584,7 @@ impl NetworkParser { } } - if config.interfaces.get("lo").is_none() { + if !config.interfaces.contains_key("lo") { let mut interface = Interface::new(String::from("lo")); set_method_v4(&mut interface, NetworkConfigMethod::Loopback)?; interface.interface_type = NetworkInterfaceType::Loopback; @@ -602,3 +616,231 @@ impl NetworkParser { Ok(config) } } + +#[cfg(test)] +mod test { + + use anyhow::Error; + + use super::*; + + #[test] + fn test_network_config_create_lo_1() -> Result<(), Error> { + let input = ""; + + let mut parser = NetworkParser::new(input.as_bytes()); + + let config = parser.parse_interfaces(None)?; + + let output = String::try_from(config)?; + + let expected = "auto lo\niface lo inet loopback\n\n"; + assert_eq!(output, expected); + + // run again using output as input + let mut parser = NetworkParser::new(output.as_bytes()); + + let config = parser.parse_interfaces(None)?; + + let output = String::try_from(config)?; + + assert_eq!(output, expected); + + Ok(()) + } + + #[test] + fn test_network_config_create_lo_2() -> Result<(), Error> { + let input = "#c1\n\n#c2\n\niface test inet manual\n"; + + let mut parser = NetworkParser::new(input.as_bytes()); + + let config = parser.parse_interfaces(None)?; + + let output = String::try_from(config)?; + + // Note: loopback should be added in front of other interfaces + let expected = "#c1\n#c2\n\nauto lo\niface lo inet loopback\n\niface test inet manual\n\n"; + assert_eq!(output, expected); + + Ok(()) + } + + #[test] + fn test_network_config_parser_no_blank_1() -> Result<(), Error> { + let input = "auto lo\n\ + iface lo inet loopback\n\ + iface lo inet6 loopback\n\ + auto ens18\n\ + iface ens18 inet static\n\ + \taddress 192.168.20.144/20\n\ + \tgateway 192.168.16.1\n\ + # comment\n\ + iface ens20 inet static\n\ + \taddress 192.168.20.145/20\n\ + iface ens21 inet manual\n\ + iface ens22 inet manual\n"; + + let mut parser = NetworkParser::new(input.as_bytes()); + + let config = parser.parse_interfaces(None)?; + + let output = String::try_from(config)?; + + let expected = "auto lo\n\ + iface lo inet loopback\n\ + \n\ + iface lo inet6 loopback\n\ + \n\ + auto ens18\n\ + iface ens18 inet static\n\ + \taddress 192.168.20.144/20\n\ + \tgateway 192.168.16.1\n\ + #comment\n\ + \n\ + iface ens20 inet static\n\ + \taddress 192.168.20.145/20\n\ + \n\ + iface ens21 inet manual\n\ + \n\ + iface ens22 inet manual\n\ + \n"; + assert_eq!(output, expected); + + Ok(()) + } + + #[test] + fn test_network_config_parser_no_blank_2() -> Result<(), Error> { + // Adapted from bug 2926 + let input = "### Hetzner Online GmbH installimage\n\ + \n\ + source /etc/network/interfaces.d/*\n\ + \n\ + auto lo\n\ + iface lo inet loopback\n\ + iface lo inet6 loopback\n\ + \n\ + auto enp4s0\n\ + iface enp4s0 inet static\n\ + \taddress 10.10.10.10/24\n\ + \tgateway 10.10.10.1\n\ + \t# route 10.10.20.10/24 via 10.10.20.1\n\ + \tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\ + \n\ + iface enp4s0 inet6 static\n\ + \taddress fe80::5496:35ff:fe99:5a6a/64\n\ + \tgateway fe80::1\n"; + + let mut parser = NetworkParser::new(input.as_bytes()); + + let config = parser.parse_interfaces(None)?; + + let output = String::try_from(config)?; + + let expected = "### Hetzner Online GmbH installimage\n\ + \n\ + source /etc/network/interfaces.d/*\n\ + \n\ + auto lo\n\ + iface lo inet loopback\n\ + \n\ + iface lo inet6 loopback\n\ + \n\ + auto enp4s0\n\ + iface enp4s0 inet static\n\ + \taddress 10.10.10.10/24\n\ + \tgateway 10.10.10.1\n\ + \t# route 10.10.20.10/24 via 10.10.20.1\n\ + \tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\ + \n\ + iface enp4s0 inet6 static\n\ + \taddress fe80::5496:35ff:fe99:5a6a/64\n\ + \tgateway fe80::1\n\ + \n"; + assert_eq!(output, expected); + + Ok(()) + } + + #[test] + fn test_network_config_parser_vlan_id_in_name() { + let input = "iface vmbr0.100 inet static manual"; + let mut parser = NetworkParser::new(input.as_bytes()); + let config = parser.parse_interfaces(None).unwrap(); + + let iface = config.interfaces.get("vmbr0.100").unwrap(); + assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan); + assert_eq!(iface.vlan_raw_device, None); + assert_eq!(iface.vlan_id, None); + } + + #[test] + fn test_network_config_parser_vlan_with_raw_device() { + let input = r#" +iface vlan100 inet manual + vlan-raw-device vmbr0"#; + + let mut parser = NetworkParser::new(input.as_bytes()); + let config = parser.parse_interfaces(None).unwrap(); + + let iface = config.interfaces.get("vlan100").unwrap(); + assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan); + assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0"))); + assert_eq!(iface.vlan_id, None); + } + + #[test] + fn test_network_config_parser_vlan_with_raw_device_static() { + let input = r#" +iface vlan100 inet static + vlan-raw-device vmbr0 + address 10.0.0.100/16"#; + + let mut parser = NetworkParser::new(input.as_bytes()); + let config = parser.parse_interfaces(None).unwrap(); + + let iface = config.interfaces.get("vlan100").unwrap(); + assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan); + assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0"))); + assert_eq!(iface.vlan_id, None); + assert_eq!(iface.method, Some(NetworkConfigMethod::Static)); + assert_eq!(iface.cidr, Some(String::from("10.0.0.100/16"))); + } + + #[test] + fn test_network_config_parser_vlan_individual_name() { + let input = r#" +iface individual_name inet manual + vlan-id 100 + vlan-raw-device vmbr0"#; + + let mut parser = NetworkParser::new(input.as_bytes()); + let config = parser.parse_interfaces(None).unwrap(); + + let iface = config.interfaces.get("individual_name").unwrap(); + assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan); + assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0"))); + assert_eq!(iface.vlan_id, Some(100)); + } + + #[test] + fn test_network_config_parser_vlan_individual_name_static() { + let input = r#" +iface individual_name inet static + vlan-id 100 + vlan-raw-device vmbr0 + address 10.0.0.100/16 +"#; + + let mut parser = NetworkParser::new(input.as_bytes()); + let config = parser.parse_interfaces(None).unwrap(); + + let iface = config.interfaces.get("individual_name").unwrap(); + assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan); + assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0"))); + assert_eq!(iface.vlan_id, Some(100)); + assert_eq!(iface.method, Some(NetworkConfigMethod::Static)); + assert_eq!(iface.cidr, Some(String::from("10.0.0.100/16"))); + } +} diff --git a/pbs-config/src/notifications.rs b/pbs-config/src/notifications.rs new file mode 100644 index 000000000..df327346e --- /dev/null +++ b/pbs-config/src/notifications.rs @@ -0,0 +1,41 @@ +use anyhow::Error; + +use proxmox_notify::Config; + +use pbs_buildcfg::configdir; + +use crate::{open_backup_lockfile, BackupLockGuard}; + +/// Configuration file location for notification targets/matchers. +pub const NOTIFICATION_CONFIG_PATH: &str = configdir!("/notifications.cfg"); + +/// Private configuration file location for secrets - only readable by `root`. +pub const NOTIFICATION_PRIV_CONFIG_PATH: &str = configdir!("/notifications-priv.cfg"); + +/// Lockfile to prevent concurrent write access. +pub const NOTIFICATION_LOCK_FILE: &str = configdir!("/.notifications.lck"); + +/// Get exclusive lock for `notifications.cfg` +pub fn lock_config() -> Result { + open_backup_lockfile(NOTIFICATION_LOCK_FILE, None, true) +} + +/// Load notification config. +pub fn config() -> Result { + let content = + proxmox_sys::fs::file_read_optional_string(NOTIFICATION_CONFIG_PATH)?.unwrap_or_default(); + + let priv_content = proxmox_sys::fs::file_read_optional_string(NOTIFICATION_PRIV_CONFIG_PATH)? + .unwrap_or_default(); + + Ok(Config::new(&content, &priv_content)?) +} + +/// Save notification config. +pub fn save_config(config: Config) -> Result<(), Error> { + let (cfg, priv_cfg) = config.write()?; + crate::replace_backup_config(NOTIFICATION_CONFIG_PATH, cfg.as_bytes())?; + crate::replace_secret_config(NOTIFICATION_PRIV_CONFIG_PATH, priv_cfg.as_bytes())?; + + Ok(()) +} diff --git a/pbs-config/src/prune.rs b/pbs-config/src/prune.rs index 21e52ffc9..14a954047 100644 --- a/pbs-config/src/prune.rs +++ b/pbs-config/src/prune.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use anyhow::Error; -use lazy_static::lazy_static; +use std::sync::LazyLock; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -10,9 +10,7 @@ use pbs_api_types::{PruneJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { const OBJ_SCHEMA: &AllOfSchema = PruneJobConfig::API_SCHEMA.unwrap_all_of_schema(); diff --git a/pbs-config/src/remote.rs b/pbs-config/src/remote.rs index 9cbd13213..26b217518 100644 --- a/pbs-config/src/remote.rs +++ b/pbs-config/src/remote.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -10,9 +10,7 @@ use pbs_api_types::{Remote, REMOTE_ID_SCHEMA}; use crate::{open_backup_lockfile, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let obj_schema = match Remote::API_SCHEMA { diff --git a/pbs-config/src/sync.rs b/pbs-config/src/sync.rs index 6d27c123a..45453abb1 100644 --- a/pbs-config/src/sync.rs +++ b/pbs-config/src/sync.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::{ApiType, Schema}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -10,9 +10,7 @@ use pbs_api_types::{SyncJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let obj_schema = match SyncJobConfig::API_SCHEMA { diff --git a/pbs-config/src/tape_job.rs b/pbs-config/src/tape_job.rs index 75ace6c7f..66e4a797d 100644 --- a/pbs-config/src/tape_job.rs +++ b/pbs-config/src/tape_job.rs @@ -1,6 +1,6 @@ use anyhow::Error; -use lazy_static::lazy_static; use std::collections::HashMap; +use std::sync::LazyLock; use proxmox_schema::{ApiType, Schema}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -9,9 +9,7 @@ use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let obj_schema = match TapeBackupJobConfig::API_SCHEMA { diff --git a/pbs-config/src/traffic_control.rs b/pbs-config/src/traffic_control.rs index 0826be83e..4ae1000d7 100644 --- a/pbs-config/src/traffic_control.rs +++ b/pbs-config/src/traffic_control.rs @@ -1,8 +1,8 @@ //! Traffic Control Settings (Network rate limits) use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::{ApiType, Schema}; @@ -13,10 +13,8 @@ use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlug use crate::ConfigVersionCache; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - /// Static [`SectionConfig`] to access parser/writer functions. - pub static ref CONFIG: SectionConfig = init(); -} +/// Static [`SectionConfig`] to access parser/writer functions. +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let mut config = SectionConfig::new(&TRAFFIC_CONTROL_ID_SCHEMA); diff --git a/pbs-config/src/user.rs b/pbs-config/src/user.rs index 8e10a778d..08d141e66 100644 --- a/pbs-config/src/user.rs +++ b/pbs-config/src/user.rs @@ -1,8 +1,7 @@ use std::collections::HashMap; -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, LazyLock, RwLock}; use anyhow::{bail, Error}; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -13,9 +12,7 @@ use crate::ConfigVersionCache; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let mut config = SectionConfig::new(&Authid::API_SCHEMA); @@ -57,7 +54,7 @@ pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> { let digest = openssl::sha::sha256(content.as_bytes()); let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?; - if data.sections.get("root@pam").is_none() { + if !data.sections.contains_key("root@pam") { let user: User = User { userid: Userid::root_userid().clone(), comment: Some("Superuser".to_string()), @@ -80,13 +77,13 @@ pub fn cached_config() -> Result, Error> { last_mtime_nsec: i64, } - lazy_static! { - static ref CACHED_CONFIG: RwLock = RwLock::new(ConfigCache { + static CACHED_CONFIG: LazyLock> = LazyLock::new(|| { + RwLock::new(ConfigCache { data: None, last_mtime: 0, - last_mtime_nsec: 0 - }); - } + last_mtime_nsec: 0, + }) + }); let stat = match nix::sys::stat::stat(USER_CFG_FILENAME) { Ok(stat) => Some(stat), diff --git a/pbs-config/src/verify.rs b/pbs-config/src/verify.rs index 2631eeef3..93776f8c0 100644 --- a/pbs-config/src/verify.rs +++ b/pbs-config/src/verify.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; +use std::sync::LazyLock; use anyhow::Error; -use lazy_static::lazy_static; use proxmox_schema::*; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; @@ -10,9 +10,7 @@ use pbs_api_types::{VerificationJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); fn init() -> SectionConfig { let obj_schema = match VerificationJobConfig::API_SCHEMA { diff --git a/pbs-datastore/Cargo.toml b/pbs-datastore/Cargo.toml index b793dc5ba..4ebc5fdc2 100644 --- a/pbs-datastore/Cargo.toml +++ b/pbs-datastore/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "low level pbs data storage access" +rust-version.workspace = true [dependencies] anyhow.workspace = true @@ -12,7 +13,6 @@ crc32fast.workspace = true endian_trait.workspace = true futures.workspace = true hex = { workspace = true, features = [ "serde" ] } -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true @@ -20,21 +20,24 @@ openssl.workspace = true serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = [] } +tracing.workspace = true walkdir.workspace = true zstd.workspace = true +zstd-safe.workspace = true pathpatterns.workspace = true pxar.workspace = true proxmox-borrow.workspace = true -proxmox-io.workspace = true proxmox-human-byte.workspace = true +proxmox-io.workspace = true proxmox-lang.workspace=true proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-serde = { workspace = true, features = [ "serde_json" ] } +proxmox-sys.workspace = true proxmox-time.workspace = true proxmox-uuid.workspace = true -proxmox-sys.workspace = true +proxmox-worker-task.workspace = true pbs-api-types.workspace = true pbs-buildcfg.workspace = true diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index bdfaeabc1..414ec878d 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -415,7 +415,9 @@ impl BackupDir { /// Returns the absolute path for backup_dir, using the cached formatted time string. pub fn full_path(&self) -> PathBuf { - self.store.snapshot_path(&self.ns, &self.dir) + let mut path = self.store.base_path(); + path.push(self.relative_path()); + path } pub fn protected_file(&self) -> PathBuf { diff --git a/pbs-datastore/src/cached_chunk_reader.rs b/pbs-datastore/src/cached_chunk_reader.rs index cdb42bb9b..be7f2a1e2 100644 --- a/pbs-datastore/src/cached_chunk_reader.rs +++ b/pbs-datastore/src/cached_chunk_reader.rs @@ -10,7 +10,6 @@ use anyhow::Error; use futures::ready; use tokio::io::{AsyncRead, AsyncSeek, ReadBuf}; -use proxmox_lang::error::io_err_other; use proxmox_lang::io_format_err; use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache}; @@ -182,7 +181,7 @@ where this.position += read as u64; Ok(()) } - Err(err) => Err(io_err_other(err)), + Err(err) => Err(std::io::Error::other(err)), }; // future completed, drop diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs index 9f6289c9f..dd0061ea5 100644 --- a/pbs-datastore/src/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -3,6 +3,7 @@ use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Error}; +use tracing::info; use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus}; use proxmox_io::ReadExt; @@ -10,8 +11,7 @@ use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateO use proxmox_sys::process_locker::{ ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker, }; -use proxmox_sys::task_log; -use proxmox_sys::WorkerTaskContext; +use proxmox_worker_task::WorkerTaskContext; use crate::file_formats::{ COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0, @@ -92,7 +92,6 @@ impl ChunkStore { path: P, uid: nix::unistd::Uid, gid: nix::unistd::Gid, - worker: Option<&dyn WorkerTaskContext>, sync_level: DatastoreFSyncLevel, ) -> Result where @@ -143,9 +142,7 @@ impl ChunkStore { } let percentage = (i * 100) / (64 * 1024); if percentage != last_percentage { - if let Some(worker) = worker { - task_log!(worker, "Chunkstore create: {}%", percentage) - } + info!("Chunkstore create: {percentage}%"); last_percentage = percentage; } } @@ -251,8 +248,9 @@ impl ChunkStore { pub fn get_chunk_iterator( &self, ) -> Result< - impl Iterator, usize, bool)> - + std::iter::FusedIterator, + impl std::iter::FusedIterator< + Item = (Result, usize, bool), + >, Error, > { // unwrap: only `None` in unit tests @@ -374,7 +372,7 @@ impl ChunkStore { for (entry, percentage, bad) in self.get_chunk_iterator()? { if last_percentage != percentage { last_percentage = percentage; - task_log!(worker, "processed {}% ({} chunks)", percentage, chunk_count,); + info!("processed {percentage}% ({chunk_count} chunks)"); } worker.check_abort()?; @@ -578,15 +576,8 @@ fn test_chunk_store1() { let user = nix::unistd::User::from_uid(nix::unistd::Uid::current()) .unwrap() .unwrap(); - let chunk_store = ChunkStore::create( - "test", - &path, - user.uid, - user.gid, - None, - DatastoreFSyncLevel::None, - ) - .unwrap(); + let chunk_store = + ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None).unwrap(); let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8]) .build() @@ -598,14 +589,8 @@ fn test_chunk_store1() { let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap(); assert!(exists); - let chunk_store = ChunkStore::create( - "test", - &path, - user.uid, - user.gid, - None, - DatastoreFSyncLevel::None, - ); + let chunk_store = + ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None); assert!(chunk_store.is_err()); if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ } diff --git a/pbs-datastore/src/chunker.rs b/pbs-datastore/src/chunker.rs index 712751829..ecdbca296 100644 --- a/pbs-datastore/src/chunker.rs +++ b/pbs-datastore/src/chunker.rs @@ -1,3 +1,5 @@ +use std::sync::mpsc::Receiver; + /// Note: window size 32 or 64, is faster because we can /// speedup modulo operations, but always computes hash 0 /// for constant data streams .. 0,0,0,0,0,0 @@ -5,6 +7,20 @@ /// use hash value 0 to detect a boundary. const CA_CHUNKER_WINDOW_SIZE: usize = 64; +/// Additional context for chunker to find possible boundaries in payload streams +#[derive(Default)] +pub struct Context { + /// Already consumed bytes of the chunk stream consumer + pub base: u64, + /// Total size currently buffered + pub total: u64, +} + +pub trait Chunker { + fn scan(&mut self, data: &[u8], ctx: &Context) -> usize; + fn reset(&mut self); +} + /// Sliding window chunker (Buzhash) /// /// This is a rewrite of *casync* chunker (cachunker.h) in rust. @@ -15,7 +31,7 @@ const CA_CHUNKER_WINDOW_SIZE: usize = 64; /// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from /// Wikipedia. -pub struct Chunker { +pub struct ChunkerImpl { h: u32, window_size: usize, chunk_size: usize, @@ -32,6 +48,16 @@ pub struct Chunker { window: [u8; CA_CHUNKER_WINDOW_SIZE], } +/// Sliding window chunker (Buzhash) with boundary suggestions +/// +/// Suggest to chunk at a given boundary instead of the regular chunk boundary for better alignment +/// with file payload boundaries. +pub struct PayloadChunker { + chunker: ChunkerImpl, + current_suggested: Option, + suggested_boundaries: Receiver, +} + const BUZHASH_TABLE: [u32; 256] = [ 0x458be752, 0xc10748cc, 0xfbbcdbb8, 0x6ded5b68, 0xb10a82b5, 0x20d75648, 0xdfc5665f, 0xa8428801, 0x7ebf5191, 0x841135c7, 0x65cc53b3, 0x280a597c, 0x16f60255, 0xc78cbc3e, 0x294415f5, 0xb938d494, @@ -67,7 +93,7 @@ const BUZHASH_TABLE: [u32; 256] = [ 0x5eff22f4, 0x6027f4cc, 0x77178b3c, 0xae507131, 0x7bf7cabc, 0xf9c18d66, 0x593ade65, 0xd95ddf11, ]; -impl Chunker { +impl ChunkerImpl { /// Create a new Chunker instance, which produces and average /// chunk size of `chunk_size_avg` (need to be a power of two). We /// allow variation from `chunk_size_avg/4` up to a maximum of @@ -105,11 +131,44 @@ impl Chunker { } } + // fast implementation avoiding modulo + // #[inline(always)] + fn shall_break(&self) -> bool { + if self.chunk_size >= self.chunk_size_max { + return true; + } + + if self.chunk_size < self.chunk_size_min { + return false; + } + + //(self.h & 0x1ffff) <= 2 //THIS IS SLOW!!! + + //(self.h & self.break_test_mask) <= 2 // Bad on 0 streams + + (self.h & self.break_test_mask) >= self.break_test_minimum + } + + // This is the original implementation from casync + /* + #[inline(always)] + fn shall_break_orig(&self) -> bool { + + if self.chunk_size >= self.chunk_size_max { return true; } + + if self.chunk_size < self.chunk_size_min { return false; } + + (self.h % self.discriminator) == (self.discriminator - 1) + } + */ +} + +impl Chunker for ChunkerImpl { /// Scans the specified data for a chunk border. Returns 0 if none /// was found (and the function should be called with more data /// later on), or another value indicating the position of a /// border. - pub fn scan(&mut self, data: &[u8]) -> usize { + fn scan(&mut self, data: &[u8], _ctx: &Context) -> usize { let window_len = self.window.len(); let data_len = data.len(); @@ -167,36 +226,89 @@ impl Chunker { 0 } - // fast implementation avoiding modulo - // #[inline(always)] - fn shall_break(&self) -> bool { - if self.chunk_size >= self.chunk_size_max { - return true; + fn reset(&mut self) { + self.h = 0; + self.chunk_size = 0; + self.window_size = 0; + } +} + +impl PayloadChunker { + /// Create a new PayloadChunker instance, which produces and average + /// chunk size of `chunk_size_avg` (need to be a power of two), if no + /// suggested boundaries are provided. + /// Use suggested boundaries instead, whenever the chunk size is within + /// the min - max range. + pub fn new(chunk_size_avg: usize, suggested_boundaries: Receiver) -> Self { + Self { + chunker: ChunkerImpl::new(chunk_size_avg), + current_suggested: None, + suggested_boundaries, } + } +} - if self.chunk_size < self.chunk_size_min { - return false; +impl Chunker for PayloadChunker { + fn scan(&mut self, data: &[u8], ctx: &Context) -> usize { + assert!(ctx.total >= data.len() as u64); + let pos = ctx.total - data.len() as u64; + + loop { + if let Some(boundary) = self.current_suggested { + if boundary < ctx.base + pos { + log::debug!("Boundary {boundary} in past"); + // ignore passed boundaries + self.current_suggested = None; + continue; + } + + if boundary > ctx.base + ctx.total { + log::debug!("Boundary {boundary} in future"); + // boundary in future, cannot decide yet + return self.chunker.scan(data, ctx); + } + + let chunk_size = (boundary - ctx.base) as usize; + if chunk_size < self.chunker.chunk_size_min { + log::debug!("Chunk size {chunk_size} below minimum chunk size"); + // chunk to small, ignore boundary + self.current_suggested = None; + continue; + } + + if chunk_size <= self.chunker.chunk_size_max { + self.current_suggested = None; + // calculate boundary relative to start of given data buffer + let len = chunk_size - pos as usize; + if len == 0 { + // passed this one, previous scan did not know about boundary just yet + return self.chunker.scan(data, ctx); + } + self.chunker.reset(); + log::debug!( + "Chunk at suggested boundary: {boundary}, chunk size: {chunk_size}" + ); + return len; + } + + log::debug!("Chunk {chunk_size} to big, regular scan"); + // chunk to big, cannot decide yet + // scan for hash based chunk boundary instead + return self.chunker.scan(data, ctx); + } + + if let Ok(boundary) = self.suggested_boundaries.try_recv() { + self.current_suggested = Some(boundary); + } else { + log::debug!("No suggested boundary, regular scan"); + return self.chunker.scan(data, ctx); + } } - - //(self.h & 0x1ffff) <= 2 //THIS IS SLOW!!! - - //(self.h & self.break_test_mask) <= 2 // Bad on 0 streams - - (self.h & self.break_test_mask) >= self.break_test_minimum } - // This is the original implementation from casync - /* - #[inline(always)] - fn shall_break_orig(&self) -> bool { - - if self.chunk_size >= self.chunk_size_max { return true; } - - if self.chunk_size < self.chunk_size_min { return false; } - - (self.h % self.discriminator) == (self.discriminator - 1) + fn reset(&mut self) { + self.chunker.reset(); } - */ } #[test] @@ -209,17 +321,18 @@ fn test_chunker1() { buffer.push(byte); } } - let mut chunker = Chunker::new(64 * 1024); + let mut chunker = ChunkerImpl::new(64 * 1024); let mut pos = 0; let mut last = 0; let mut chunks1: Vec<(usize, usize)> = vec![]; let mut chunks2: Vec<(usize, usize)> = vec![]; + let ctx = Context::default(); // test1: feed single bytes while pos < buffer.len() { - let k = chunker.scan(&buffer[pos..pos + 1]); + let k = chunker.scan(&buffer[pos..pos + 1], &ctx); pos += 1; if k != 0 { let prev = last; @@ -229,13 +342,13 @@ fn test_chunker1() { } chunks1.push((last, buffer.len() - last)); - let mut chunker = Chunker::new(64 * 1024); + let mut chunker = ChunkerImpl::new(64 * 1024); let mut pos = 0; // test2: feed with whole buffer while pos < buffer.len() { - let k = chunker.scan(&buffer[pos..]); + let k = chunker.scan(&buffer[pos..], &ctx); if k != 0 { chunks2.push((pos, k)); pos += k; @@ -269,3 +382,97 @@ fn test_chunker1() { panic!("got different chunks"); } } + +#[test] +fn test_suggested_boundary() { + let mut buffer = Vec::new(); + + for i in 0..(256 * 1024) { + for j in 0..4 { + let byte = ((i >> (j << 3)) & 0xff) as u8; + buffer.push(byte); + } + } + let (tx, rx) = std::sync::mpsc::channel(); + let mut chunker = PayloadChunker::new(64 * 1024, rx); + + // Suggest chunk boundary within regular chunk + tx.send(32 * 1024).unwrap(); + // Suggest chunk boundary within regular chunk, resulting chunk being 0 + tx.send(32 * 1024).unwrap(); + // Suggest chunk boundary in the past, must be ignored + tx.send(0).unwrap(); + // Suggest chunk boundary aligned with regular boundary + tx.send(405521).unwrap(); + + let mut pos = 0; + let mut last = 0; + + let mut chunks1: Vec<(usize, usize)> = vec![]; + let mut chunks2: Vec<(usize, usize)> = vec![]; + let mut ctx = Context::default(); + + // test1: feed single bytes with suggeset boundary + while pos < buffer.len() { + ctx.total += 1; + let k = chunker.scan(&buffer[pos..pos + 1], &ctx); + pos += 1; + if k != 0 { + let prev = last; + last = pos; + ctx.base += pos as u64; + ctx.total = 0; + chunks1.push((prev, pos - prev)); + } + } + chunks1.push((last, buffer.len() - last)); + + let mut pos = 0; + let mut ctx = Context::default(); + ctx.total = buffer.len() as u64; + chunker.reset(); + // Suggest chunk boundary within regular chunk + tx.send(32 * 1024).unwrap(); + // Suggest chunk boundary within regular chunk, + // resulting chunk being to small and therefore ignored + tx.send(32 * 1024).unwrap(); + // Suggest chunk boundary in the past, must be ignored + tx.send(0).unwrap(); + // Suggest chunk boundary aligned with regular boundary + tx.send(405521).unwrap(); + + while pos < buffer.len() { + let k = chunker.scan(&buffer[pos..], &ctx); + if k != 0 { + chunks2.push((pos, k)); + pos += k; + ctx.base += pos as u64; + ctx.total = (buffer.len() - pos) as u64; + } else { + break; + } + } + + chunks2.push((pos, buffer.len() - pos)); + + if chunks1 != chunks2 { + let mut size1 = 0; + for (_offset, len) in &chunks1 { + size1 += len; + } + println!("Chunks1: {size1}\n{chunks1:?}\n"); + + let mut size2 = 0; + for (_offset, len) in &chunks2 { + size2 += len; + } + println!("Chunks2: {size2}\n{chunks2:?}\n"); + + panic!("got different chunks"); + } + + let expected_sizes = [32768, 110609, 229376, 32768, 262144, 262144, 118767]; + for ((_, chunk_size), expected) in chunks1.iter().zip(expected_sizes.iter()) { + assert_eq!(chunk_size, expected); + } +} diff --git a/pbs-datastore/src/data_blob.rs b/pbs-datastore/src/data_blob.rs index 4119c4a4b..0fb4d44c2 100644 --- a/pbs-datastore/src/data_blob.rs +++ b/pbs-datastore/src/data_blob.rs @@ -30,7 +30,7 @@ pub struct ChunkInfo { /// ".didx"). /// pub struct DataBlob { - raw_data: Vec, // tagged, compressed, encryped data + raw_data: Vec, // tagged, compressed, encrypted data } impl DataBlob { @@ -56,13 +56,13 @@ impl DataBlob { /// accessor to crc32 checksum pub fn crc(&self) -> u32 { - let crc_o = proxmox_lang::offsetof!(DataBlobHeader, crc); + let crc_o = std::mem::offset_of!(DataBlobHeader, crc); u32::from_le_bytes(self.raw_data[crc_o..crc_o + 4].try_into().unwrap()) } // set the CRC checksum field pub fn set_crc(&mut self, crc: u32) { - let crc_o = proxmox_lang::offsetof!(DataBlobHeader, crc); + let crc_o = std::mem::offset_of!(DataBlobHeader, crc); self.raw_data[crc_o..crc_o + 4].copy_from_slice(&crc.to_le_bytes()); } @@ -93,28 +93,42 @@ impl DataBlob { bail!("data blob too large ({} bytes).", data.len()); } - let mut blob = if let Some(config) = config { - let compr_data; - let (_compress, data, magic) = if compress { - compr_data = zstd::bulk::compress(data, 1)?; - // Note: We only use compression if result is shorter - if compr_data.len() < data.len() { - (true, &compr_data[..], ENCR_COMPR_BLOB_MAGIC_1_0) - } else { - (false, data, ENCRYPTED_BLOB_MAGIC_1_0) - } - } else { - (false, data, ENCRYPTED_BLOB_MAGIC_1_0) - }; + let header_len = if config.is_some() { + std::mem::size_of::() + } else { + std::mem::size_of::() + }; - let header_len = std::mem::size_of::(); + let mut compressed = false; + let mut data_compressed = vec![0u8; header_len + data.len()]; + if compress { + match zstd_safe::compress(&mut data_compressed[header_len..], data, 1) { + Ok(size) if size <= data.len() => { + data_compressed.truncate(header_len + size); + compressed = true; + } + Err(err) if !zstd_error_is_target_too_small(err) => { + log::warn!("zstd compression error: {err}"); + } + _ => {} + } + } + + let (magic, encryption_source) = match (compressed, config.is_some()) { + (true, true) => (ENCR_COMPR_BLOB_MAGIC_1_0, &data_compressed[header_len..]), + (true, false) => (COMPRESSED_BLOB_MAGIC_1_0, &data_compressed[header_len..]), + (false, true) => (ENCRYPTED_BLOB_MAGIC_1_0, data), + (false, false) => { + (&mut data_compressed[header_len..]).write_all(data)?; + (UNCOMPRESSED_BLOB_MAGIC_1_0, data) + } + }; + + let raw_data = if let Some(config) = config { let mut raw_data = Vec::with_capacity(data.len() + header_len); let dummy_head = EncryptedDataBlobHeader { - head: DataBlobHeader { - magic: [0u8; 8], - crc: [0; 4], - }, + head: DataBlobHeader { magic, crc: [0; 4] }, iv: [0u8; 16], tag: [0u8; 16], }; @@ -122,7 +136,7 @@ impl DataBlob { raw_data.write_le_value(dummy_head)?; } - let (iv, tag) = Self::encrypt_to(config, data, &mut raw_data)?; + let (iv, tag) = Self::encrypt_to(config, encryption_source, &mut raw_data)?; let head = EncryptedDataBlobHeader { head: DataBlobHeader { magic, crc: [0; 4] }, @@ -134,45 +148,17 @@ impl DataBlob { (&mut raw_data[0..header_len]).write_le_value(head)?; } - DataBlob { raw_data } + raw_data } else { - let max_data_len = data.len() + std::mem::size_of::(); - if compress { - let mut comp_data = Vec::with_capacity(max_data_len); - - let head = DataBlobHeader { - magic: COMPRESSED_BLOB_MAGIC_1_0, - crc: [0; 4], - }; - unsafe { - comp_data.write_le_value(head)?; - } - - zstd::stream::copy_encode(data, &mut comp_data, 1)?; - - if comp_data.len() < max_data_len { - let mut blob = DataBlob { - raw_data: comp_data, - }; - blob.set_crc(blob.compute_crc()); - return Ok(blob); - } - } - - let mut raw_data = Vec::with_capacity(max_data_len); - - let head = DataBlobHeader { - magic: UNCOMPRESSED_BLOB_MAGIC_1_0, - crc: [0; 4], - }; + let head = DataBlobHeader { magic, crc: [0; 4] }; unsafe { - raw_data.write_le_value(head)?; + (&mut data_compressed[0..header_len]).write_le_value(head)?; } - raw_data.extend_from_slice(data); - DataBlob { raw_data } + data_compressed }; + let mut blob = DataBlob { raw_data }; blob.set_crc(blob.compute_crc()); Ok(blob) @@ -212,7 +198,7 @@ impl DataBlob { let data_start = std::mem::size_of::(); let mut reader = &self.raw_data[data_start..]; let data = zstd::stream::decode_all(&mut reader)?; - // zstd::block::decompress is abou 10% slower + // zstd::block::decompress is about 10% slower // let data = zstd::block::decompress(&self.raw_data[data_start..], MAX_BLOB_SIZE)?; if let Some(digest) = digest { Self::verify_digest(&data, None, digest)?; @@ -472,7 +458,7 @@ impl DataBlob { /// Builder for chunk DataBlobs /// /// Main purpose is to centralize digest computation. Digest -/// computation differ for encryped chunk, and this interface ensures that +/// computation differ for encrypted chunk, and this interface ensures that /// we always compute the correct one. pub struct DataChunkBuilder<'a, 'b> { config: Option<&'b CryptConfig>, @@ -562,3 +548,114 @@ impl<'a, 'b> DataChunkBuilder<'a, 'b> { chunk_builder.build() } } + +/// Check if the error code returned by `zstd_safe::compress`, or anything else that does FFI calls +/// into zstd code, was `70` 'Destination buffer is too small' by subtracting the error code from +/// `0` (with underflow), see `ERR_getErrorCode` in +/// https://github.com/facebook/zstd/blob/dev/lib/common/error_private.h +/// +/// There is a test below to ensure we catch any change in the interface or internal value. +fn zstd_error_is_target_too_small(err: usize) -> bool { + // TODO: when zstd_sys >= 2.0.9 is available, we can use + // ZSTD_getErrorCode and the enum to use the public zstd API + let (real_code, _) = 0usize.overflowing_sub(err); + // see ZSTD_ErrorCode in https://github.com/facebook/zstd/blob/dev/lib/zstd_errors.h + real_code == 70 // ZSTD_error_dstSize_tooSmall +} + +#[cfg(test)] +mod test { + use pbs_tools::crypt_config::CryptConfig; + + use super::{zstd_error_is_target_too_small, DataChunkBuilder}; + + const TEST_DATA_LEN: usize = 50; + + fn build_test_data() -> Vec { + let mut data = Vec::with_capacity(TEST_DATA_LEN); + for i in 0..TEST_DATA_LEN / 10 { + for _ in 0..10 { + data.push(i as u8); + } + } + data + } + + #[test] + fn unencrypted_uncompressed() { + let data = build_test_data(); + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(false) + .build() + .expect("could not create unencrypted, uncompressed chunk"); + + let data_decoded = chunk + .decode(None, Some(&digest)) + .expect("cannot decode unencrypted, uncompressed chunk"); + assert_eq!(data, data_decoded); + } + + #[test] + fn unencrypted_compressed() { + let data = build_test_data(); + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(true) + .build() + .expect("could not create unencrypted, compressed chunk"); + + let data_decoded = chunk + .decode(None, Some(&digest)) + .expect("cannot decode unencrypted, compressed chunk"); + assert_eq!(data, data_decoded); + } + + #[test] + fn encrypted_uncompressed() { + let data = build_test_data(); + let crypt_config = CryptConfig::new([9; 32]).expect("could not create crypt config"); + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(false) + .crypt_config(&crypt_config) + .build() + .expect("could not create encrypted, uncompressed chunk"); + + let data_decoded = chunk + .decode(Some(&crypt_config), Some(&digest)) + .expect("cannot decode encrypted, uncompressed chunk"); + assert_eq!(data, data_decoded); + } + + #[test] + fn encrypted_compressed() { + let data = build_test_data(); + let crypt_config = CryptConfig::new([9; 32]).expect("could not create crypt config"); + let (chunk, digest) = DataChunkBuilder::new(&data) + .compress(true) + .crypt_config(&crypt_config) + .build() + .expect("could not create encrypted, compressed chunk"); + + let data_decoded = chunk + .decode(Some(&crypt_config), Some(&digest)) + .expect("cannot decode encrypted, compressed chunk"); + assert_eq!(data, data_decoded); + } + + #[test] + /// test for the error code internal logic of zstd so we catch any interface/value changes on + /// (package) compile time + fn zstd_assert_dst_size_to_small_error_code_abi() { + let data = &build_test_data(); + let mut target = Vec::new(); + match zstd_safe::compress(&mut target, data, 1) { + Ok(_) => panic!("unexpected success with zero-sized buffer"), + Err(err) => { + if !zstd_error_is_target_too_small(err) { + panic!( + "unexpected error code {err}, check test validity and zstd for changes!" + ); + } + } + } + } +} diff --git a/pbs-datastore/src/data_blob_writer.rs b/pbs-datastore/src/data_blob_writer.rs deleted file mode 100644 index 30d9645fc..000000000 --- a/pbs-datastore/src/data_blob_writer.rs +++ /dev/null @@ -1,212 +0,0 @@ -use std::io::{Seek, SeekFrom, Write}; -use std::sync::Arc; - -use anyhow::Error; - -use proxmox_io::WriteExt; - -use pbs_tools::crypt_config::CryptConfig; - -use crate::checksum_writer::ChecksumWriter; -use crate::crypt_writer::CryptWriter; -use crate::file_formats::{self, DataBlobHeader, EncryptedDataBlobHeader}; - -enum BlobWriterState<'writer, W: Write> { - Uncompressed { - csum_writer: ChecksumWriter, - }, - Compressed { - compr: zstd::stream::write::Encoder<'writer, ChecksumWriter>, - }, - Encrypted { - crypt_writer: CryptWriter>, - }, - EncryptedCompressed { - compr: zstd::stream::write::Encoder<'writer, CryptWriter>>, - }, -} - -/// Data blob writer -pub struct DataBlobWriter<'writer, W: Write> { - state: BlobWriterState<'writer, W>, -} - -impl DataBlobWriter<'_, W> { - pub fn new_uncompressed(mut writer: W) -> Result { - writer.seek(SeekFrom::Start(0))?; - let head = DataBlobHeader { - magic: file_formats::UNCOMPRESSED_BLOB_MAGIC_1_0, - crc: [0; 4], - }; - unsafe { - writer.write_le_value(head)?; - } - let csum_writer = ChecksumWriter::new(writer, None); - Ok(Self { - state: BlobWriterState::Uncompressed { csum_writer }, - }) - } - - pub fn new_compressed(mut writer: W) -> Result { - writer.seek(SeekFrom::Start(0))?; - let head = DataBlobHeader { - magic: file_formats::COMPRESSED_BLOB_MAGIC_1_0, - crc: [0; 4], - }; - unsafe { - writer.write_le_value(head)?; - } - let csum_writer = ChecksumWriter::new(writer, None); - let compr = zstd::stream::write::Encoder::new(csum_writer, 1)?; - Ok(Self { - state: BlobWriterState::Compressed { compr }, - }) - } - - pub fn new_encrypted(mut writer: W, config: Arc) -> Result { - writer.seek(SeekFrom::Start(0))?; - let head = EncryptedDataBlobHeader { - head: DataBlobHeader { - magic: file_formats::ENCRYPTED_BLOB_MAGIC_1_0, - crc: [0; 4], - }, - iv: [0u8; 16], - tag: [0u8; 16], - }; - unsafe { - writer.write_le_value(head)?; - } - - let csum_writer = ChecksumWriter::new(writer, None); - let crypt_writer = CryptWriter::new(csum_writer, config)?; - Ok(Self { - state: BlobWriterState::Encrypted { crypt_writer }, - }) - } - - pub fn new_encrypted_compressed( - mut writer: W, - config: Arc, - ) -> Result { - writer.seek(SeekFrom::Start(0))?; - let head = EncryptedDataBlobHeader { - head: DataBlobHeader { - magic: file_formats::ENCR_COMPR_BLOB_MAGIC_1_0, - crc: [0; 4], - }, - iv: [0u8; 16], - tag: [0u8; 16], - }; - unsafe { - writer.write_le_value(head)?; - } - - let csum_writer = ChecksumWriter::new(writer, None); - let crypt_writer = CryptWriter::new(csum_writer, config)?; - let compr = zstd::stream::write::Encoder::new(crypt_writer, 1)?; - Ok(Self { - state: BlobWriterState::EncryptedCompressed { compr }, - }) - } - - pub fn finish(self) -> Result { - match self.state { - BlobWriterState::Uncompressed { csum_writer } => { - // write CRC - let (mut writer, crc, _) = csum_writer.finish()?; - let head = DataBlobHeader { - magic: file_formats::UNCOMPRESSED_BLOB_MAGIC_1_0, - crc: crc.to_le_bytes(), - }; - - writer.seek(SeekFrom::Start(0))?; - unsafe { - writer.write_le_value(head)?; - } - - Ok(writer) - } - BlobWriterState::Compressed { compr } => { - let csum_writer = compr.finish()?; - let (mut writer, crc, _) = csum_writer.finish()?; - - let head = DataBlobHeader { - magic: file_formats::COMPRESSED_BLOB_MAGIC_1_0, - crc: crc.to_le_bytes(), - }; - - writer.seek(SeekFrom::Start(0))?; - unsafe { - writer.write_le_value(head)?; - } - - Ok(writer) - } - BlobWriterState::Encrypted { crypt_writer } => { - let (csum_writer, iv, tag) = crypt_writer.finish()?; - let (mut writer, crc, _) = csum_writer.finish()?; - - let head = EncryptedDataBlobHeader { - head: DataBlobHeader { - magic: file_formats::ENCRYPTED_BLOB_MAGIC_1_0, - crc: crc.to_le_bytes(), - }, - iv, - tag, - }; - writer.seek(SeekFrom::Start(0))?; - unsafe { - writer.write_le_value(head)?; - } - Ok(writer) - } - BlobWriterState::EncryptedCompressed { compr } => { - let crypt_writer = compr.finish()?; - let (csum_writer, iv, tag) = crypt_writer.finish()?; - let (mut writer, crc, _) = csum_writer.finish()?; - - let head = EncryptedDataBlobHeader { - head: DataBlobHeader { - magic: file_formats::ENCR_COMPR_BLOB_MAGIC_1_0, - crc: crc.to_le_bytes(), - }, - iv, - tag, - }; - writer.seek(SeekFrom::Start(0))?; - unsafe { - writer.write_le_value(head)?; - } - Ok(writer) - } - } - } -} - -impl Write for DataBlobWriter<'_, W> { - fn write(&mut self, buf: &[u8]) -> Result { - match self.state { - BlobWriterState::Uncompressed { - ref mut csum_writer, - } => csum_writer.write(buf), - BlobWriterState::Compressed { ref mut compr } => compr.write(buf), - BlobWriterState::Encrypted { - ref mut crypt_writer, - } => crypt_writer.write(buf), - BlobWriterState::EncryptedCompressed { ref mut compr } => compr.write(buf), - } - } - - fn flush(&mut self) -> Result<(), std::io::Error> { - match self.state { - BlobWriterState::Uncompressed { - ref mut csum_writer, - } => csum_writer.flush(), - BlobWriterState::Compressed { ref mut compr } => compr.flush(), - BlobWriterState::Encrypted { - ref mut crypt_writer, - } => crypt_writer.flush(), - BlobWriterState::EncryptedCompressed { ref mut compr } => compr.flush(), - } - } -} diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 0685cc845..d0f3c53ac 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -2,11 +2,11 @@ use std::collections::{HashMap, HashSet}; use std::io::{self, Write}; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; use nix::unistd::{unlinkat, UnlinkatFlags}; +use tracing::{info, warn}; use proxmox_human_byte::HumanByte; use proxmox_schema::ApiType; @@ -15,12 +15,11 @@ use proxmox_sys::error::SysError; use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard}; use proxmox_sys::process_locker::ProcessLockSharedGuard; -use proxmox_sys::WorkerTaskContext; -use proxmox_sys::{task_log, task_warn}; +use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel, - DatastoreTuning, GarbageCollectionStatus, Operation, UPID, + DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, MaintenanceType, Operation, UPID, }; use crate::backup_info::{BackupDir, BackupGroup, BackupGroupDeleteStats}; @@ -29,14 +28,12 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive}; use crate::index::IndexFile; -use crate::manifest::{archive_type, ArchiveType}; +use crate::manifest::ArchiveType; use crate::task_tracking::{self, update_active_operations}; use crate::DataBlob; -lazy_static! { - static ref DATASTORE_MAP: Mutex>> = - Mutex::new(HashMap::new()); -} +static DATASTORE_MAP: LazyLock>>> = + LazyLock::new(|| Mutex::new(HashMap::new())); /// checks if auth_id is owner, or, if owner is a token, if /// auth_id is the user of the token @@ -145,7 +142,7 @@ impl DataStore { ) -> Result, Error> { // Avoid TOCTOU between checking maintenance mode and updating active operation counter, as // we use it to decide whether it is okay to delete the datastore. - let config_lock = pbs_config::datastore::lock_config()?; + let _config_lock = pbs_config::datastore::lock_config()?; // we could use the ConfigVersionCache's generation for staleness detection, but we load // the config anyway -> just use digest, additional benefit: manual changes get detected @@ -158,13 +155,6 @@ impl DataStore { } } - if let Some(operation) = operation { - update_active_operations(name, operation, 1)?; - } - - // Our operation is registered, unlock the config. - drop(config_lock); - let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); let entry = datastore_cache.get(name); @@ -172,6 +162,9 @@ impl DataStore { let chunk_store = if let Some(datastore) = &entry { let last_digest = datastore.last_digest.as_ref(); if let Some(true) = last_digest.map(|last_digest| last_digest == &digest) { + if let Some(operation) = operation { + update_active_operations(name, operation, 1)?; + } return Ok(Arc::new(Self { inner: Arc::clone(datastore), operation, @@ -195,6 +188,10 @@ impl DataStore { let datastore = Arc::new(datastore); datastore_cache.insert(name.to_string(), datastore.clone()); + if let Some(operation) = operation { + update_active_operations(name, operation, 1)?; + } + Ok(Arc::new(Self { inner: datastore, operation, @@ -377,7 +374,7 @@ impl DataStore { P: AsRef, { let filename = filename.as_ref(); - let out: Box = match archive_type(filename)? { + let out: Box = match ArchiveType::from_path(filename)? { ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?), ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?), _ => bail!("cannot open index file of unknown type: {:?}", filename), @@ -844,7 +841,7 @@ impl DataStore { ListGroupsType::new(Arc::clone(self), ns, ty) } - /// Get a streaming iter over top-level backup groups of a datatstore of a particular type, + /// Get a streaming iter over top-level backup groups of a datastore of a particular type, /// filtered by `Ok` results /// /// The iterated item's result is already unwrapped, if it contained an error it will be @@ -936,7 +933,7 @@ impl DataStore { continue; } }; - if let Ok(archive_type) = archive_type(&path) { + if let Ok(archive_type) = ArchiveType::from_path(&path) { if archive_type == ArchiveType::FixedIndex || archive_type == ArchiveType::DynamicIndex { @@ -965,8 +962,7 @@ impl DataStore { let digest = index.index_digest(pos).unwrap(); if !self.inner.chunk_store.cond_touch_chunk(digest, false)? { let hex = hex::encode(digest); - task_warn!( - worker, + warn!( "warning: unable to access non-existent chunk {hex}, required by {file_name:?}" ); @@ -1012,7 +1008,7 @@ impl DataStore { match std::fs::File::open(&img) { Ok(file) => { - if let Ok(archive_type) = archive_type(&img) { + if let Ok(archive_type) = ArchiveType::from_path(&img) { if archive_type == ArchiveType::FixedIndex { let index = FixedIndexReader::new(file).map_err(|e| { format_err!("can't read index '{}' - {}", img.to_string_lossy(), e) @@ -1032,22 +1028,17 @@ impl DataStore { let percentage = (i + 1) * 100 / image_count; if percentage > last_percentage { - task_log!( - worker, - "marked {}% ({} of {} index files)", - percentage, + info!( + "marked {percentage}% ({} of {image_count} index files)", i + 1, - image_count, ); last_percentage = percentage; } } if strange_paths_count > 0 { - task_log!( - worker, - "found (and marked) {} index files outside of expected directory scheme", - strange_paths_count, + info!( + "found (and marked) {strange_paths_count} index files outside of expected directory scheme" ); } @@ -1085,11 +1076,11 @@ impl DataStore { ..Default::default() }; - task_log!(worker, "Start GC phase1 (mark used chunks)"); + info!("Start GC phase1 (mark used chunks)"); self.mark_used_chunks(&mut gc_status, worker)?; - task_log!(worker, "Start GC phase2 (sweep unused chunks)"); + info!("Start GC phase2 (sweep unused chunks)"); self.inner.chunk_store.sweep_unused_chunks( oldest_writer, phase1_start_time, @@ -1097,30 +1088,27 @@ impl DataStore { worker, )?; - task_log!( - worker, + info!( "Removed garbage: {}", HumanByte::from(gc_status.removed_bytes), ); - task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks); + info!("Removed chunks: {}", gc_status.removed_chunks); if gc_status.pending_bytes > 0 { - task_log!( - worker, + info!( "Pending removals: {} (in {} chunks)", HumanByte::from(gc_status.pending_bytes), gc_status.pending_chunks, ); } if gc_status.removed_bad > 0 { - task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad); + info!("Removed bad chunks: {}", gc_status.removed_bad); } if gc_status.still_bad > 0 { - task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad); + info!("Leftover bad chunks: {}", gc_status.still_bad); } - task_log!( - worker, + info!( "Original data usage: {}", HumanByte::from(gc_status.index_data_bytes), ); @@ -1128,15 +1116,13 @@ impl DataStore { if gc_status.index_data_bytes > 0 { let comp_per = (gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64; - task_log!( - worker, - "On-Disk usage: {} ({:.2}%)", - HumanByte::from(gc_status.disk_bytes), - comp_per, + info!( + "On-Disk usage: {} ({comp_per:.2}%)", + HumanByte::from(gc_status.disk_bytes) ); } - task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks); + info!("On-Disk chunks: {}", gc_status.disk_chunks); let deduplication_factor = if gc_status.disk_bytes > 0 { (gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64) @@ -1144,11 +1130,11 @@ impl DataStore { 1.0 }; - task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor); + info!("Deduplication factor: {deduplication_factor:.2}"); if gc_status.disk_chunks > 0 { let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64); - task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk)); + info!("Average chunk size: {}", HumanByte::from(avg_chunk)); } if let Ok(serialized) = serde_json::to_string(&gc_status) { @@ -1380,17 +1366,17 @@ impl DataStore { /// Destroy a datastore. This requires that there are no active operations on the datastore. /// /// This is a synchronous operation and should be run in a worker-thread. - pub fn destroy( - name: &str, - destroy_data: bool, - worker: &dyn WorkerTaskContext, - ) -> Result<(), Error> { + pub fn destroy(name: &str, destroy_data: bool) -> Result<(), Error> { let config_lock = pbs_config::datastore::lock_config()?; let (mut config, _digest) = pbs_config::datastore::config()?; let mut datastore_config: DataStoreConfig = config.lookup("datastore", name)?; - datastore_config.maintenance_mode = Some("type=delete".to_string()); + datastore_config.set_maintenance_mode(Some(MaintenanceMode { + ty: MaintenanceType::Delete, + message: None, + }))?; + config.set_data(name, "datastore", &datastore_config)?; pbs_config::datastore::save_config(&config)?; drop(config_lock); @@ -1408,13 +1394,13 @@ impl DataStore { let remove = |subdir, ok: &mut bool| { if let Err(err) = std::fs::remove_dir_all(base.join(subdir)) { if err.kind() != io::ErrorKind::NotFound { - task_warn!(worker, "failed to remove {subdir:?} subdirectory: {err}"); + warn!("failed to remove {subdir:?} subdirectory: {err}"); *ok = false; } } }; - task_log!(worker, "Deleting datastore data..."); + info!("Deleting datastore data..."); remove("ns", &mut ok); // ns first remove("ct", &mut ok); remove("vm", &mut ok); @@ -1423,7 +1409,7 @@ impl DataStore { if ok { if let Err(err) = std::fs::remove_file(base.join(".gc-status")) { if err.kind() != io::ErrorKind::NotFound { - task_warn!(worker, "failed to remove .gc-status file: {err}"); + warn!("failed to remove .gc-status file: {err}"); ok = false; } } @@ -1437,7 +1423,7 @@ impl DataStore { // now the config if ok { - task_log!(worker, "Removing datastore from config..."); + info!("Removing datastore from config..."); let _lock = pbs_config::datastore::lock_config()?; let _ = config.sections.remove(name); pbs_config::datastore::save_config(&config)?; @@ -1448,35 +1434,32 @@ impl DataStore { if ok { if let Err(err) = std::fs::remove_file(base.join(".lock")) { if err.kind() != io::ErrorKind::NotFound { - task_warn!(worker, "failed to remove .lock file: {err}"); + warn!("failed to remove .lock file: {err}"); ok = false; } } } if ok { - task_log!(worker, "Finished deleting data."); + info!("Finished deleting data."); match std::fs::remove_dir(base) { - Ok(()) => task_log!(worker, "Removed empty datastore directory."), + Ok(()) => info!("Removed empty datastore directory."), Err(err) if err.kind() == io::ErrorKind::NotFound => { // weird, but ok } Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => { - task_warn!( - worker, - "Cannot delete datastore directory (is it a mount point?)." - ) + warn!("Cannot delete datastore directory (is it a mount point?).") } Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => { - task_warn!(worker, "Datastore directory not empty, not deleting.") + warn!("Datastore directory not empty, not deleting.") } Err(err) => { - task_warn!(worker, "Failed to remove datastore directory: {err}"); + warn!("Failed to remove datastore directory: {err}"); } } } else { - task_log!(worker, "There were errors deleting data."); + info!("There were errors deleting data."); } } diff --git a/pbs-datastore/src/dynamic_index.rs b/pbs-datastore/src/dynamic_index.rs index 71a5082e1..0e99ce585 100644 --- a/pbs-datastore/src/dynamic_index.rs +++ b/pbs-datastore/src/dynamic_index.rs @@ -23,7 +23,7 @@ use crate::data_blob::{DataBlob, DataChunkBuilder}; use crate::file_formats; use crate::index::{ChunkReadInfo, IndexFile}; use crate::read_chunk::ReadChunk; -use crate::Chunker; +use crate::{Chunker, ChunkerImpl}; /// Header format definition for dynamic index files (`.dixd`) #[repr(C)] @@ -72,6 +72,11 @@ impl DynamicEntry { pub fn end(&self) -> u64 { u64::from_le(self.end_le) } + + #[inline] + pub fn digest(&self) -> [u8; 32] { + self.digest + } } pub struct DynamicIndexReader { @@ -348,7 +353,7 @@ impl DynamicIndexWriter { self.writer.flush()?; - let csum_offset = proxmox_lang::offsetof!(DynamicIndexHeader, index_csum); + let csum_offset = std::mem::offset_of!(DynamicIndexHeader, index_csum); self.writer.seek(SeekFrom::Start(csum_offset as u64))?; let csum = self.csum.take().unwrap(); @@ -392,7 +397,7 @@ impl DynamicIndexWriter { pub struct DynamicChunkWriter { index: DynamicIndexWriter, closed: bool, - chunker: Chunker, + chunker: ChunkerImpl, stat: ChunkStat, chunk_offset: usize, last_chunk: usize, @@ -404,7 +409,7 @@ impl DynamicChunkWriter { Self { index, closed: false, - chunker: Chunker::new(chunk_size), + chunker: ChunkerImpl::new(chunk_size), stat: ChunkStat::new(0), chunk_offset: 0, last_chunk: 0, @@ -489,7 +494,8 @@ impl Write for DynamicChunkWriter { fn write(&mut self, data: &[u8]) -> std::result::Result { let chunker = &mut self.chunker; - let pos = chunker.scan(data); + let ctx = crate::chunker::Context::default(); + let pos = chunker.scan(data, &ctx); if pos > 0 { self.chunk_buffer.extend_from_slice(&data[0..pos]); diff --git a/pbs-datastore/src/fixed_index.rs b/pbs-datastore/src/fixed_index.rs index 9ed943fa7..d67c388ec 100644 --- a/pbs-datastore/src/fixed_index.rs +++ b/pbs-datastore/src/fixed_index.rs @@ -349,7 +349,7 @@ impl FixedIndexWriter { self.unmap()?; - let csum_offset = proxmox_lang::offsetof!(FixedIndexHeader, index_csum); + let csum_offset = std::mem::offset_of!(FixedIndexHeader, index_csum); self.file.seek(SeekFrom::Start(csum_offset as u64))?; self.file.write_all(&index_csum)?; self.file.flush()?; diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 43050162f..202b09558 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -179,7 +179,6 @@ pub mod crypt_reader; pub mod crypt_writer; pub mod data_blob; pub mod data_blob_reader; -pub mod data_blob_writer; pub mod file_formats; pub mod index; pub mod manifest; @@ -196,12 +195,11 @@ pub use backup_info::{BackupDir, BackupGroup, BackupInfo}; pub use checksum_reader::ChecksumReader; pub use checksum_writer::ChecksumWriter; pub use chunk_store::ChunkStore; -pub use chunker::Chunker; +pub use chunker::{Chunker, ChunkerImpl, PayloadChunker}; pub use crypt_reader::CryptReader; pub use crypt_writer::CryptWriter; pub use data_blob::DataBlob; pub use data_blob_reader::DataBlobReader; -pub use data_blob_writer::DataBlobWriter; pub use manifest::BackupManifest; pub use store_progress::StoreProgress; diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index 347af9337..c3df01427 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -76,11 +76,6 @@ impl ArchiveType { } } -//#[deprecated(note = "use ArchivType::from_path instead")] later... -pub fn archive_type>(archive_name: P) -> Result { - ArchiveType::from_path(archive_name) -} - impl BackupManifest { pub fn new(snapshot: pbs_api_types::BackupDir) -> Self { Self { diff --git a/pbs-datastore/src/prune.rs b/pbs-datastore/src/prune.rs index 96da58267..ad1493bfe 100644 --- a/pbs-datastore/src/prune.rs +++ b/pbs-datastore/src/prune.rs @@ -55,7 +55,7 @@ fn mark_selections Result>( for info in list { let backup_id = info.backup_dir.relative_path(); - if mark.get(&backup_id).is_some() { + if mark.contains_key(&backup_id) { continue; } if info.protected { diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index ec7a48e54..f9c772079 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -14,7 +14,7 @@ use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use crate::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot @@ -138,13 +138,16 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> { if self.current_index.is_none() { if let Some(filename) = self.todo_list.pop() { let file = self.snapshot_reader.open_file(&filename)?; - let index: Box = match archive_type(&filename)? { - ArchiveType::FixedIndex => Box::new(FixedIndexReader::new(file)?), - ArchiveType::DynamicIndex => Box::new(DynamicIndexReader::new(file)?), - _ => bail!( - "SnapshotChunkIterator: got unknown file type - internal error" - ), - }; + let index: Box = + match ArchiveType::from_path(&filename)? { + ArchiveType::FixedIndex => Box::new(FixedIndexReader::new(file)?), + ArchiveType::DynamicIndex => { + Box::new(DynamicIndexReader::new(file)?) + } + _ => bail!( + "SnapshotChunkIterator: got unknown file type - internal error" + ), + }; let datastore = DataStore::lookup_datastore( self.snapshot_reader.datastore_name(), @@ -178,7 +181,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> { let mut todo_list = Vec::new(); for filename in snapshot_reader.file_list() { - match archive_type(filename)? { + match ArchiveType::from_path(filename)? { ArchiveType::FixedIndex | ArchiveType::DynamicIndex => { todo_list.push(filename.to_owned()); } diff --git a/pbs-fuse-loop/Cargo.toml b/pbs-fuse-loop/Cargo.toml index 40a5e744e..188980234 100644 --- a/pbs-fuse-loop/Cargo.toml +++ b/pbs-fuse-loop/Cargo.toml @@ -4,11 +4,11 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "fuse and loop device helpers" +rust-version.workspace = true [dependencies] anyhow.workspace = true futures.workspace = true -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true diff --git a/pbs-fuse-loop/src/fuse_loop.rs b/pbs-fuse-loop/src/fuse_loop.rs index 3d0ef1237..d55be3b08 100644 --- a/pbs-fuse-loop/src/fuse_loop.rs +++ b/pbs-fuse-loop/src/fuse_loop.rs @@ -7,6 +7,7 @@ use std::fs::{read_to_string, remove_file, File, OpenOptions}; use std::io::prelude::*; use std::io::SeekFrom; use std::path::{Path, PathBuf}; +use std::sync::LazyLock; use nix::sys::signal::{self, Signal}; use nix::unistd::Pid; @@ -22,9 +23,7 @@ use proxmox_time::epoch_i64; const RUN_DIR: &str = "/run/pbs-loopdev"; -lazy_static::lazy_static! { - static ref LOOPDEV_REGEX: Regex = Regex::new(r"^loop\d+$").unwrap(); -} +static LOOPDEV_REGEX: LazyLock = LazyLock::new(|| Regex::new(r"^loop\d+$").unwrap()); /// Represents an ongoing FUSE-session that has been mapped onto a loop device. /// Create with map_loop, then call 'main' and poll until startup_chan reports diff --git a/pbs-pxar-fuse/src/lib.rs b/pbs-pxar-fuse/src/lib.rs index bf196b6c4..f8531a1ea 100644 --- a/pbs-pxar-fuse/src/lib.rs +++ b/pbs-pxar-fuse/src/lib.rs @@ -5,7 +5,6 @@ use std::ffi::{OsStr, OsString}; use std::future::Future; use std::io; use std::mem; -use std::ops::Range; use std::os::unix::ffi::OsStrExt; use std::path::Path; use std::pin::Pin; @@ -20,7 +19,7 @@ use futures::sink::SinkExt; use futures::stream::{StreamExt, TryStreamExt}; use proxmox_io::vec; -use pxar::accessor::{self, EntryRangeInfo, ReadAt}; +use pxar::accessor::{self, ContentRange, EntryRangeInfo, ReadAt}; use proxmox_fuse::requests::{self, FuseRequest}; use proxmox_fuse::{EntryParam, Fuse, ReplyBufState, Request, ROOT_ID}; @@ -61,12 +60,24 @@ impl Session { options: &OsStr, verbose: bool, mountpoint: &Path, + payload_input_path: Option<&Path>, ) -> Result { // TODO: Add a buffered/caching ReadAt layer? let file = std::fs::File::open(archive_path)?; let file_size = file.metadata()?.len(); let reader: Reader = Arc::new(accessor::sync::FileReader::new(file)); - let accessor = Accessor::new(reader, file_size).await?; + let accessor = if let Some(payload_input) = payload_input_path { + let payload_file = std::fs::File::open(payload_input)?; + let payload_size = payload_file.metadata()?.len(); + let payload_reader: Reader = Arc::new(accessor::sync::FileReader::new(payload_file)); + Accessor::new( + pxar::PxarVariant::Split(reader, (payload_reader, payload_size)), + file_size, + ) + .await? + } else { + Accessor::new(pxar::PxarVariant::Unified(reader), file_size).await? + }; Self::mount(accessor, options, verbose, mountpoint) } @@ -118,7 +129,7 @@ struct Lookup { inode: u64, parent: u64, entry_range_info: EntryRangeInfo, - content_range: Option>, + content_range: Option, } impl Lookup { @@ -126,7 +137,7 @@ impl Lookup { inode: u64, parent: u64, entry_range_info: EntryRangeInfo, - content_range: Option>, + content_range: Option, ) -> Box { Box::new(Self { refs: AtomicUsize::new(1), @@ -421,13 +432,17 @@ impl SessionImpl { } } - fn open_content(&self, lookup: &LookupRef) -> Result { + async fn open_content<'a>(&'a self, lookup: &'a LookupRef<'a>) -> Result { if is_dir_inode(lookup.inode) { io_return!(libc::EISDIR); } - match lookup.content_range.clone() { - Some(range) => Ok(unsafe { self.accessor.open_contents_at_range(range) }), + match &lookup.content_range { + Some(range) => self + .accessor + .open_contents_at_range(range) + .await + .map_err(|err| err.into()), None => io_return!(libc::EBADF), } } @@ -569,7 +584,7 @@ impl SessionImpl { async fn read(&self, inode: u64, len: usize, offset: u64) -> Result, Error> { let file = self.get_lookup(inode)?; - let content = self.open_content(&file)?; + let content = self.open_content(&file).await?; let mut buf = vec::undefined(len); let mut pos = 0; // fuse' read is different from normal read - no short reads allowed except for EOF! @@ -605,7 +620,7 @@ impl SessionImpl { use pxar::format::XAttr; if let Some(fcaps) = metadata.fcaps { - xattrs.push(XAttr::new(xattr::xattr_name_fcaps().to_bytes(), fcaps.data)); + xattrs.push(XAttr::new(xattr::XATTR_NAME_FCAPS.to_bytes(), fcaps.data)); } // TODO: Special cases: diff --git a/pbs-tape/Cargo.toml b/pbs-tape/Cargo.toml index 970315b7a..4f153feda 100644 --- a/pbs-tape/Cargo.toml +++ b/pbs-tape/Cargo.toml @@ -4,13 +4,13 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "LTO tape support" +rust-version.workspace = true [dependencies] anyhow.workspace = true bitflags.workspace = true endian_trait.workspace = true hex.workspace = true -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true @@ -23,6 +23,7 @@ udev.workspace = true proxmox-io.workspace = true proxmox-lang.workspace=true +proxmox-log.workspace=true proxmox-sys.workspace = true proxmox-time.workspace = true proxmox-uuid.workspace = true @@ -34,4 +35,5 @@ proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-router = { workspace = true, features = ["cli", "server"] } pbs-api-types.workspace = true +pbs-buildcfg.workspace = true pbs-config.workspace = true diff --git a/pbs-tape/src/bin/pmt.rs b/pbs-tape/src/bin/pmt.rs index 4a5e08e5e..9e39dbe16 100644 --- a/pbs-tape/src/bin/pmt.rs +++ b/pbs-tape/src/bin/pmt.rs @@ -15,6 +15,7 @@ use anyhow::{bail, Error}; use serde_json::Value; +use proxmox_log::init_cli_logger; use proxmox_router::cli::*; use proxmox_router::RpcEnvironment; use proxmox_schema::{api, ArraySchema, IntegerSchema, Schema, StringSchema}; @@ -799,7 +800,7 @@ fn options( } fn main() -> Result<(), Error> { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?; let uid = nix::unistd::Uid::current(); diff --git a/pbs-tape/src/bin/pmtx.rs b/pbs-tape/src/bin/pmtx.rs index 6f34bc448..303353e6b 100644 --- a/pbs-tape/src/bin/pmtx.rs +++ b/pbs-tape/src/bin/pmtx.rs @@ -16,6 +16,7 @@ use std::fs::File; use anyhow::{bail, Error}; use serde_json::Value; +use proxmox_log::init_cli_logger; use proxmox_router::cli::*; use proxmox_router::RpcEnvironment; use proxmox_schema::api; @@ -387,7 +388,7 @@ fn scan(param: Value) -> Result<(), Error> { } fn main() -> Result<(), Error> { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?; let uid = nix::unistd::Uid::current(); diff --git a/pbs-tape/src/lib.rs b/pbs-tape/src/lib.rs index 8d408b70a..bbbe6c0c5 100644 --- a/pbs-tape/src/lib.rs +++ b/pbs-tape/src/lib.rs @@ -75,6 +75,7 @@ pub struct BlockHeader { bitflags! { /// Header flags (e.g. `END_OF_STREAM` or `INCOMPLETE`) + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct BlockHeaderFlags: u8 { /// Marks the last block in a stream. const END_OF_STREAM = 0b00000001; diff --git a/pbs-tape/src/linux_list_drives.rs b/pbs-tape/src/linux_list_drives.rs index 39d2aac79..962276722 100644 --- a/pbs-tape/src/linux_list_drives.rs +++ b/pbs-tape/src/linux_list_drives.rs @@ -3,6 +3,7 @@ use std::fs::{File, OpenOptions}; use std::os::unix::fs::OpenOptionsExt; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; use nix::fcntl::{fcntl, FcntlArg, OFlag}; @@ -12,10 +13,8 @@ use proxmox_sys::fs::scan_subdir; use pbs_api_types::{DeviceKind, OptionalDeviceIdentification, TapeDeviceInfo}; -lazy_static::lazy_static! { - static ref SCSI_GENERIC_NAME_REGEX: regex::Regex = - regex::Regex::new(r"^sg\d+$").unwrap(); -} +static SCSI_GENERIC_NAME_REGEX: LazyLock = + LazyLock::new(|| regex::Regex::new(r"^sg\d+$").unwrap()); /// List linux tape changer devices pub fn linux_tape_changer_list() -> Vec { diff --git a/pbs-tape/src/sg_pt_changer.rs b/pbs-tape/src/sg_pt_changer.rs index 3945d18f3..940eed4a6 100644 --- a/pbs-tape/src/sg_pt_changer.rs +++ b/pbs-tape/src/sg_pt_changer.rs @@ -52,7 +52,7 @@ struct AddressAssignmentPage { storage_element_count: u16, first_import_export_element_address: u16, import_export_element_count: u16, - first_tranfer_element_address: u16, + first_transfer_element_address: u16, transfer_element_count: u16, reserved22: u8, reserved23: u8, @@ -412,9 +412,11 @@ pub fn read_element_status(file: &mut F) -> Result for drive in drives.iter_mut() { for drive2 in &page.drives { if drive2.element_address == drive.element_address { - drive.vendor = drive2.vendor.clone(); - drive.model = drive2.model.clone(); - drive.drive_serial_number = drive2.drive_serial_number.clone(); + drive.vendor.clone_from(&drive2.vendor); + drive.model.clone_from(&drive2.model); + drive + .drive_serial_number + .clone_from(&drive2.drive_serial_number); } } } @@ -744,9 +746,18 @@ fn decode_element_status_page( let desc: TransportDescriptor = unsafe { reader.read_be_value()? }; let full = (desc.flags1 & 1) != 0; - let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?; - subhead.skip_alternate_volume_tag(&mut reader)?; + let volume_tag = match subhead.parse_optional_volume_tag(&mut reader, full) { + Ok(tag) => tag, + Err(err) => { + log::warn!("could not read optional volume tag: {err}"); + None + } + }; + + if let Err(err) = subhead.skip_alternate_volume_tag(&mut reader) { + log::warn!("could not skip alternate volume tag: {err}"); + } result.last_element_address = Some(desc.element_address); diff --git a/pbs-tape/src/sg_tape.rs b/pbs-tape/src/sg_tape.rs index 146d05507..605372036 100644 --- a/pbs-tape/src/sg_tape.rs +++ b/pbs-tape/src/sg_tape.rs @@ -15,6 +15,9 @@ mod volume_statistics; use proxmox_uuid::Uuid; pub use volume_statistics::*; +mod device_status; +pub use device_status::*; + mod tape_alert_flags; pub use tape_alert_flags::*; @@ -28,7 +31,8 @@ use proxmox_io::{ReadExt, WriteExt}; use proxmox_sys::error::SysResult; use pbs_api_types::{ - Lp17VolumeStatistics, LtoDriveAndMediaStatus, LtoTapeDrive, MamAttribute, TapeDensity, + DeviceActivity, Lp17VolumeStatistics, LtoDriveAndMediaStatus, LtoTapeDrive, MamAttribute, + TapeDensity, }; use crate::linux_list_drives::open_lto_tape_device; @@ -78,6 +82,32 @@ impl DataCompressionModePage { } } +#[repr(C, packed)] +#[derive(Endian, Debug, Copy, Clone)] +struct DeviceConfigurationExtensionModePage { + page_code: u8, // 0x10 + sub_page_code: u8, // 0x01 + page_length: u16, // 0x1C + flags4: u8, + modes: u8, + pews: u16, + flags8: u8, + reserved: [u8; 23], +} + +impl DeviceConfigurationExtensionModePage { + /// Sets the Programmable Early Warning Zone to the given amount, rounded up to the next + /// Megabyte (10^6 bytes) up to 2^16 Megabyte (the value will be clamped) + pub fn set_pewz(&mut self, bytes: usize) { + let mbytes = if bytes == 0 { + 0 + } else { + (bytes / 1000 / 1000) + 1 + }; + self.pews = mbytes.clamp(0, u16::MAX as usize) as u16; + } +} + #[repr(C, packed)] #[derive(Endian)] struct MediumConfigurationModePage { @@ -168,6 +198,12 @@ impl SgTape { }) } + /// Read device activity + pub fn device_activity(config: &LtoTapeDrive) -> Result { + let mut file = open_lto_tape_device(&config.path)?; + read_device_activity(&mut file) + } + /// Access to file descriptor - useful for testing pub fn file_mut(&mut self) -> &mut File { &mut self.file @@ -285,6 +321,8 @@ impl SgTape { self.erase_media(fast)? } + self.clear_mam_attributes(); + Ok(()) } } @@ -588,8 +626,8 @@ impl SgTape { Err(ScsiError::Sense(SenseInfo { sense_key: 0, asc: 0, - ascq: 2, - })) => { /* LEOM - ignore */ } + ascq: 2 | 7, + })) => { /* LEOM or PEWZ - ignore */ } Err(err) => { proxmox_lang::io_bail!("write filemark failed - {err}"); } @@ -704,9 +742,9 @@ impl SgTape { Err(ScsiError::Sense(SenseInfo { sense_key: 0, asc: 0, - ascq: 2, + ascq: 2 | 7, })) => { - Ok(true) // LEOM + Ok(true) // LEOM or PEWZ } Err(err) => { proxmox_lang::io_bail!("write failed - {err}"); @@ -792,35 +830,17 @@ impl SgTape { Ok(()) } - /// Set important drive options - #[allow(clippy::vec_init_then_push)] - pub fn set_drive_options( + // tries to set the given page with either mode sense 6 or 10 depending on + // the given header size + fn drive_mode_select( &mut self, - compression: Option, - block_length: Option, - buffer_mode: Option, + head: ModeParameterHeader, + block_descriptor: ModeBlockDescriptor, + page: T, ) -> Result<(), Error> { - // Note: Read/Modify/Write - - let (mut head, mut block_descriptor, mut page) = self.read_compression_page()?; - let mut sg_raw = SgRaw::new(&mut self.file, 0)?; sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT); - head.reset_mode_data_len(); // mode_data_len need to be zero - - if let Some(compression) = compression { - page.set_compression(compression); - } - - if let Some(block_length) = block_length { - block_descriptor.set_block_length(block_length)?; - } - - if let Some(buffer_mode) = buffer_mode { - head.set_buffer_mode(buffer_mode); - } - match head { ModeParameterHeader::Long(head) => { let mut data = Vec::new(); @@ -866,6 +886,45 @@ impl SgTape { format_err!("set drive options (mode select(6)) failed - {err}") })?; } + }; + Ok(()) + } + + /// Set important drive options + #[allow(clippy::vec_init_then_push)] + pub fn set_drive_options( + &mut self, + compression: Option, + block_length: Option, + buffer_mode: Option, + ) -> Result<(), Error> { + // Note: Read/Modify/Write + let (mut head, mut block_descriptor, mut page) = self.read_compression_page()?; + + head.reset_mode_data_len(); // mode_data_len need to be zero + + if let Some(compression) = compression { + page.set_compression(compression); + } + + if let Some(block_length) = block_length { + block_descriptor.set_block_length(block_length)?; + } + + if let Some(buffer_mode) = buffer_mode { + head.set_buffer_mode(buffer_mode); + } + + self.drive_mode_select(head, block_descriptor, page)?; + + // LTO-4 does not support this page, but try to write it if we can read it + if let Ok((mut head, block_descriptor, mut page)) = + self.read_device_configuration_extension_page() + { + head.reset_mode_data_len(); // mode_data_len need to be zero + page.set_pewz(0); // disable PEWZ + + let _ = self.drive_mode_select(head, block_descriptor, page); } Ok(()) @@ -933,6 +992,41 @@ impl SgTape { .map_err(|err| format_err!("read_compression_page failed: {err}")) } + fn read_device_configuration_extension_page( + &mut self, + ) -> Result< + ( + ModeParameterHeader, + ModeBlockDescriptor, + DeviceConfigurationExtensionModePage, + ), + Error, + > { + let (head, block_descriptor, page): (_, _, DeviceConfigurationExtensionModePage) = + scsi_mode_sense(&mut self.file, false, 0x10, 0x01)?; + + proxmox_lang::try_block!({ + if (page.page_code & 0b0011_1111) != 0x10 { + bail!("wrong page code {}", page.page_code); + } + if page.sub_page_code != 0x01 { + bail!("wrong sub page code {}", page.sub_page_code); + } + let page_length = page.page_length; + if page_length != 0x1C { + bail!("wrong page length {page_length}"); + } + + let block_descriptor = match block_descriptor { + Some(block_descriptor) => block_descriptor, + None => bail!("missing block descriptor"), + }; + + Ok((head, block_descriptor, page)) + }) + .map_err(|err| format_err!("read_device_configuration_extension_page failed: {err}")) + } + /// Read drive options/status /// /// We read the drive compression page, including the @@ -957,10 +1051,19 @@ impl SgTape { pub fn get_drive_and_media_status(&mut self) -> Result { let drive_status = self.read_drive_status()?; - let alert_flags = self - .tape_alert_flags() - .map(|flags| format!("{:?}", flags)) - .ok(); + let drive_activity = read_device_activity(&mut self.file).ok(); + + // some operations block when the tape moves, which can take up to 2 hours + // (e.g. for calibrating) so skip those queries while it's doing that + let is_moving = !matches!(drive_activity, None | Some(DeviceActivity::NoActivity)); + + let alert_flags = if !is_moving { + self.tape_alert_flags() + .map(|flags| format!("{:?}", flags)) + .ok() + } else { + None + }; let mut status = LtoDriveAndMediaStatus { vendor: self.info().vendor.clone(), @@ -980,6 +1083,7 @@ impl SgTape { medium_passes: None, medium_wearout: None, volume_mounts: None, + drive_activity, }; if self.test_unit_ready().is_ok() { @@ -987,10 +1091,12 @@ impl SgTape { status.write_protect = Some(drive_status.write_protect); } - let position = self.position()?; + if !is_moving { + let position = self.position()?; - status.file_number = Some(position.logical_file_id); - status.block_number = Some(position.logical_object_number); + status.file_number = Some(position.logical_file_id); + status.block_number = Some(position.logical_object_number); + } if let Ok(mam) = self.cartridge_memory() { match mam_extract_media_usage(&mam) { @@ -1004,26 +1110,60 @@ impl SgTape { } } - if let Ok(volume_stats) = self.volume_statistics() { - let passes = std::cmp::max( - volume_stats.beginning_of_medium_passes, - volume_stats.middle_of_tape_passes, - ); + if !is_moving { + if let Ok(volume_stats) = self.volume_statistics() { + let passes = std::cmp::max( + volume_stats.beginning_of_medium_passes, + volume_stats.middle_of_tape_passes, + ); - // assume max. 16000 medium passes - // see: https://en.wikipedia.org/wiki/Linear_Tape-Open - let wearout: f64 = (passes as f64) / 16000.0_f64; + // assume max. 16000 medium passes + // see: https://en.wikipedia.org/wiki/Linear_Tape-Open + let wearout: f64 = (passes as f64) / 16000.0_f64; - status.medium_passes = Some(passes); - status.medium_wearout = Some(wearout); + status.medium_passes = Some(passes); + status.medium_wearout = Some(wearout); - status.volume_mounts = Some(volume_stats.volume_mounts); + status.volume_mounts = Some(volume_stats.volume_mounts); + } } } } Ok(status) } + + /// Tries to write useful attributes to the MAM like Vendor/Application/Version + pub fn write_mam_attributes(&mut self, label: Option, pool: Option) { + use pbs_buildcfg::PROXMOX_BACKUP_CRATE_VERSION; + let mut attribute_list: Vec<(u16, &[u8])> = vec![ + (0x08_00, b"Proxmox"), // APPLICATION VENDOR, 8 bytes + (0x08_01, b"Proxmox Backup Server"), // APPLICATION NAME, 32 bytes + (0x08_02, PROXMOX_BACKUP_CRATE_VERSION.as_bytes()), // APPLICATION VERSION, 8 bytes + ]; + if let Some(ref label) = label { + attribute_list.push((0x08_03, label.as_bytes())); // USER MEDIUM TEXT LABEL, 160 bytes + } + + if let Some(ref pool) = pool { + attribute_list.push((0x08_08, pool.as_bytes())); // MEDIA POOL, 160 bytes + } + + for (id, data) in attribute_list { + if let Err(err) = write_mam_attribute(&mut self.file, id, data) { + log::warn!("could not set MAM Attribute {id:x}: {err}"); + } + } + } + + // clear all custom set mam attributes + fn clear_mam_attributes(&mut self) { + for attr in [0x08_00, 0x08_01, 0x08_02, 0x08_03, 0x08_08] { + if let Err(err) = write_mam_attribute(&mut self.file, attr, b"") { + log::warn!("could not clear MAM attribute {attr:x}: {err}"); + } + } + } } pub struct SgTapeReader<'a> { diff --git a/pbs-tape/src/sg_tape/device_status.rs b/pbs-tape/src/sg_tape/device_status.rs new file mode 100644 index 000000000..353ba0a7b --- /dev/null +++ b/pbs-tape/src/sg_tape/device_status.rs @@ -0,0 +1,99 @@ +use std::os::fd::AsRawFd; + +use anyhow::{bail, format_err, Error}; + +use pbs_api_types::DeviceActivity; +use proxmox_io::ReadExt; + +use super::LpParameterHeader; +use crate::sgutils2::SgRaw; + +/// SCSI command to query volume statistics +/// +/// CDB: LOG SENSE / LP11h DT Device Activity +/// +/// Only returns the Device Activity result from the VHF data +pub fn read_device_activity(file: &mut F) -> Result { + let data = sg_read_dt_device_status(file)?; + + decode_dt_device_status(&data) + .map_err(|err| format_err!("decode dt device status failed - {}", err)) +} + +#[allow(clippy::vec_init_then_push)] +fn sg_read_dt_device_status(file: &mut F) -> Result, Error> { + let alloc_len: u16 = 8192; + let mut sg_raw = SgRaw::new(file, alloc_len as usize)?; + + let mut cmd = Vec::new(); + cmd.push(0x4D); // LOG SENSE + cmd.push(0); + cmd.push((1 << 6) | 0x11); // DT Device Status log page + cmd.push(0); // Subpage 0 + cmd.push(0); + cmd.push(0); + cmd.push(0); + cmd.extend(alloc_len.to_be_bytes()); // alloc len + cmd.push(0u8); // control byte + + sg_raw.set_timeout(1); // use short timeout + sg_raw + .do_command(&cmd) + .map_err(|err| format_err!("read tape dt device status failed - {}", err)) + .map(|v| v.to_vec()) +} + +fn decode_dt_device_status(data: &[u8]) -> Result { + if !((data[0] & 0x7f) == 0x11 && data[1] == 0) { + bail!("invalid response"); + } + + let mut reader = &data[2..]; + + let page_len: u16 = unsafe { reader.read_be_value()? }; + + let page_len = page_len as usize; + + if (page_len + 4) > data.len() { + bail!("invalid page length"); + } else { + // Note: Quantum hh7 returns the allocation_length instead of real data_len + reader = &data[4..page_len + 4]; + } + + let mut page_valid = false; + + let mut activity = DeviceActivity::Other; + + loop { + if reader.is_empty() { + break; + } + let head: LpParameterHeader = unsafe { reader.read_be_value()? }; + + match head.parameter_code { + 0x0000 => { + let vhf_descriptor = reader.read_exact_allocated(head.parameter_len as usize)?; + + if vhf_descriptor.len() != 4 { + bail!("invalid VHF data descriptor"); + } + + activity = vhf_descriptor[2].try_into()?; + + if vhf_descriptor[0] & 0x01 == 1 { + page_valid = true; + } + } + _ => { + reader.read_exact_allocated(head.parameter_len as usize)?; + } + } + } + + if !page_valid { + bail!("missing page-valid parameter"); + } + + Ok(activity) +} diff --git a/pbs-tape/src/sg_tape/encryption.rs b/pbs-tape/src/sg_tape/encryption.rs index 9fafb62a8..7247d257f 100644 --- a/pbs-tape/src/sg_tape/encryption.rs +++ b/pbs-tape/src/sg_tape/encryption.rs @@ -76,7 +76,7 @@ struct SspSetDataEncryptionPage { control_byte_5: u8, encryption_mode: u8, decryption_mode: u8, - algorythm_index: u8, + algorithm_index: u8, key_format: u8, reserved: [u8; 8], key_len: u16, @@ -86,7 +86,7 @@ struct SspSetDataEncryptionPage { #[allow(clippy::vec_init_then_push)] fn sg_spout_set_encryption( file: &mut F, - algorythm_index: u8, + algorithm_index: u8, key: Option<[u8; 32]>, ) -> Result<(), Error> { let mut sg_raw = SgRaw::new(file, 0)?; @@ -106,7 +106,7 @@ fn sg_spout_set_encryption( control_byte_5: (chok << 2), encryption_mode: if key.is_some() { 2 } else { 0 }, decryption_mode: if key.is_some() { 3 } else { 0 }, // mixed mode - algorythm_index, + algorithm_index, key_format: 0, reserved: [0u8; 8], key_len: if let Some(ref key) = key { @@ -221,7 +221,7 @@ struct SspDataEncryptionCapabilityPage { #[derive(Endian)] #[repr(C, packed)] struct SspDataEncryptionAlgorithmDescriptor { - algorythm_index: u8, + algorithm_index: u8, reserved1: u8, descriptor_len: u16, control_byte_4: u8, @@ -236,7 +236,7 @@ struct SspDataEncryptionAlgorithmDescriptor { algorithm_code: u32, } -// Returns the algorythm_index for AES-GCM +// Returns the algorithm_index for AES-GCM fn decode_spin_data_encryption_caps(data: &[u8]) -> Result { proxmox_lang::try_block!({ let mut reader = data; @@ -259,7 +259,7 @@ fn decode_spin_data_encryption_caps(data: &[u8]) -> Result { continue; // can't decrypt in hardware } if desc.algorithm_code == 0x00010014 && desc.key_size == 32 { - aes_gcm_index = Some(desc.algorythm_index); + aes_gcm_index = Some(desc.algorithm_index); break; } } @@ -280,7 +280,7 @@ struct SspDataEncryptionStatusPage { scope_byte: u8, encryption_mode: u8, decryption_mode: u8, - algorythm_index: u8, + algorithm_index: u8, key_instance_counter: u32, control_byte: u8, key_format: u8, diff --git a/pbs-tape/src/sg_tape/mam.rs b/pbs-tape/src/sg_tape/mam.rs index 61368d287..4a94fa92c 100644 --- a/pbs-tape/src/sg_tape/mam.rs +++ b/pbs-tape/src/sg_tape/mam.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::os::unix::io::AsRawFd; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; use endian_trait::Endian; @@ -8,7 +9,7 @@ use proxmox_io::ReadExt; use pbs_api_types::MamAttribute; -use crate::sgutils2::SgRaw; +use crate::sgutils2::{alloc_page_aligned_buffer, SgRaw}; use super::TapeAlertFlags; @@ -28,6 +29,7 @@ enum MamFormat { BINARY, ASCII, DEC, + TEXT, } struct MamType { @@ -55,6 +57,9 @@ impl MamType { const fn dec(id: u16, len: u16, description: &'static str) -> Self { Self::new(id, len, MamFormat::DEC, description) } + const fn text(id: u16, len: u16, description: &'static str) -> Self { + Self::new(id, len, MamFormat::TEXT, description) + } } static MAM_ATTRIBUTES: &[MamType] = &[ @@ -95,12 +100,12 @@ static MAM_ATTRIBUTES: &[MamType] = &[ MamType::ascii(0x08_00, 8, "Application Vendor"), MamType::ascii(0x08_01, 32, "Application Name"), MamType::ascii(0x08_02, 8, "Application Version"), - MamType::ascii(0x08_03, 160, "User Medium Text Label"), + MamType::text(0x08_03, 160, "User Medium Text Label"), MamType::ascii(0x08_04, 12, "Date And Time Last Written"), MamType::bin(0x08_05, 1, "Text Localization Identifier"), MamType::ascii(0x08_06, 32, "Barcode"), MamType::ascii(0x08_07, 80, "Owning Host Textual Name"), - MamType::ascii(0x08_08, 160, "Media Pool"), + MamType::text(0x08_08, 160, "Media Pool"), MamType::ascii(0x08_0B, 16, "Application Format Version"), // length for vol. coherency is not specified for IBM, and HP says 23-n MamType::bin(0x08_0C, 0, "Volume Coherency Information"), @@ -110,18 +115,15 @@ static MAM_ATTRIBUTES: &[MamType] = &[ MamType::bin(0x10_01, 24, "Alternate Unique Cartridge Identify (Alt-UCI)"), ]; -lazy_static::lazy_static! { +static MAM_ATTRIBUTE_NAMES: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); - static ref MAM_ATTRIBUTE_NAMES: HashMap = { - let mut map = HashMap::new(); + for entry in MAM_ATTRIBUTES { + map.insert(entry.id, entry); + } - for entry in MAM_ATTRIBUTES { - map.insert(entry.id, entry); - } - - map - }; -} + map +}); fn read_tape_mam(file: &mut F) -> Result, Error> { let alloc_len: u32 = 32 * 1024; @@ -139,6 +141,65 @@ fn read_tape_mam(file: &mut F) -> Result, Error> { .map(|v| v.to_vec()) } +/// Write attribute to MAM +pub fn write_mam_attribute( + file: &mut F, + attribute_id: u16, + data: &[u8], +) -> Result<(), Error> { + let mut sg_raw = SgRaw::new(file, 0)?; + + let mut parameters = Vec::new(); + + let attribute = MAM_ATTRIBUTE_NAMES + .get(&attribute_id) + .ok_or_else(|| format_err!("MAM attribute '{attribute_id:x}' unknown"))?; + + let mut attr_data = Vec::new(); + attr_data.extend(attribute_id.to_be_bytes()); + attr_data.push(match attribute.format { + MamFormat::BINARY | MamFormat::DEC => 0x00, + MamFormat::ASCII => 0x01, + MamFormat::TEXT => 0x02, + }); + let len = if data.is_empty() { 0 } else { attribute.len }; + attr_data.extend(len.to_be_bytes()); + attr_data.extend(data); + if !data.is_empty() && data.len() < attribute.len as usize { + attr_data.resize(attr_data.len() - data.len() + attribute.len as usize, 0); + } else if data.len() > u16::MAX as usize { + bail!("data to long"); + } + + parameters.extend(attr_data); + + let mut data_out = alloc_page_aligned_buffer(parameters.len() + 4)?; + data_out[..4].copy_from_slice(&(parameters.len() as u32).to_be_bytes()); + data_out[4..].copy_from_slice(¶meters); + + let mut cmd = vec![ + 0x8d, // WRITE ATTRIBUTE CDB (8Dh) + 0x01, // WTC=1 + 0x00, // reserved + 0x00, // reserved + 0x00, // reserved + 0x00, // Volume Number + 0x00, // reserved + 0x00, // Partition Number + 0x00, // reserved + 0x00, // reserved + ]; + cmd.extend((data_out.len() as u32).to_be_bytes()); + cmd.extend([ + 0x00, // reserved + 0x00, // reserved + ]); + + sg_raw.do_out_command(&cmd, &data_out)?; + + Ok(()) +} + /// Read Medium auxiliary memory attributes (cartridge memory) using raw SCSI command. pub fn read_mam_attributes(file: &mut F) -> Result, Error> { let data = read_tape_mam(file)?; @@ -188,7 +249,7 @@ fn decode_mam_attributes(data: &[u8]) -> Result, Error> { }; if info.len == 0 || info.len == head.len { let value = match info.format { - MamFormat::ASCII => String::from_utf8_lossy(&data).to_string(), + MamFormat::ASCII | MamFormat::TEXT => String::from_utf8_lossy(&data).to_string(), MamFormat::DEC => { if info.len == 2 { format!("{}", u16::from_be_bytes(data[0..2].try_into()?)) diff --git a/pbs-tape/src/sg_tape/report_density.rs b/pbs-tape/src/sg_tape/report_density.rs index 57b753238..74596b7b6 100644 --- a/pbs-tape/src/sg_tape/report_density.rs +++ b/pbs-tape/src/sg_tape/report_density.rs @@ -9,7 +9,7 @@ use crate::sgutils2::SgRaw; #[repr(C, packed)] #[derive(Endian)] -struct DesnityDescriptorBlock { +struct DensityDescriptorBlock { primary_density_code: u8, secondary_density_code: u8, flags2: u8, @@ -58,7 +58,7 @@ pub fn report_density(file: &mut F) -> Result { if reader.is_empty() { break; } - let block: DesnityDescriptorBlock = unsafe { reader.read_be_value()? }; + let block: DensityDescriptorBlock = unsafe { reader.read_be_value()? }; if block.primary_density_code > max_density { max_density = block.primary_density_code; } diff --git a/pbs-tape/src/sg_tape/tape_alert_flags.rs b/pbs-tape/src/sg_tape/tape_alert_flags.rs index 191e7bd0d..0ac4d6417 100644 --- a/pbs-tape/src/sg_tape/tape_alert_flags.rs +++ b/pbs-tape/src/sg_tape/tape_alert_flags.rs @@ -12,6 +12,7 @@ bitflags::bitflags! { /// Tape Alert Flags /// /// See LTO SCSI Reference LOG_SENSE - LP 2Eh: TapeAlerts + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub struct TapeAlertFlags: u64 { #[allow(clippy::eq_op)] const READ_WARNING = 1 << (0x0001 -1); diff --git a/pbs-tape/src/sg_tape/volume_statistics.rs b/pbs-tape/src/sg_tape/volume_statistics.rs index f27a682c1..d3815db09 100644 --- a/pbs-tape/src/sg_tape/volume_statistics.rs +++ b/pbs-tape/src/sg_tape/volume_statistics.rs @@ -46,10 +46,10 @@ fn sg_read_volume_statistics(file: &mut F) -> Result, Error> #[repr(C, packed)] #[derive(Endian)] -struct LpParameterHeader { - parameter_code: u16, - control: u8, - parameter_len: u8, +pub(crate) struct LpParameterHeader { + pub parameter_code: u16, + pub control: u8, + pub parameter_len: u8, } fn decode_volume_statistics(data: &[u8]) -> Result { diff --git a/pbs-tools/Cargo.toml b/pbs-tools/Cargo.toml index 3dcae88a5..998e3077e 100644 --- a/pbs-tools/Cargo.toml +++ b/pbs-tools/Cargo.toml @@ -8,37 +8,21 @@ description = "common tools used throughout pbs" # This must not depend on any subcrates more closely related to pbs itself. [dependencies] anyhow.workspace = true -base64.workspace = true bytes.workspace = true -crc32fast.workspace = true -endian_trait.workspace = true -flate2.workspace = true foreign-types.workspace = true -futures.workspace = true hex.workspace = true -lazy_static.workspace = true libc.workspace = true -log.workspace = true -nix.workspace = true nom.workspace = true openssl.workspace = true -regex.workspace = true serde_json.workspace = true # rt-multi-thread is required for block_in_place tokio = { workspace = true, features = [ "fs", "io-util", "rt", "rt-multi-thread", "sync" ] } -url.workspace = true -walkdir.workspace = true -zstd.workspace = true proxmox-async.workspace = true proxmox-io = { workspace = true, features = [ "tokio" ] } proxmox-human-byte.workspace = true -proxmox-lang.workspace=true proxmox-sys.workspace = true proxmox-time.workspace = true -pbs-api-types.workspace = true -pbs-buildcfg.workspace = true - [dev-dependencies] tokio = { workspace = true, features = [ "macros" ] } diff --git a/pbs-tools/src/async_lru_cache.rs b/pbs-tools/src/async_lru_cache.rs index 1f023d345..c43b87717 100644 --- a/pbs-tools/src/async_lru_cache.rs +++ b/pbs-tools/src/async_lru_cache.rs @@ -80,6 +80,7 @@ impl AsyncL } } +#[cfg(test)] mod test { use super::*; diff --git a/pbs-tools/src/crypt_config.rs b/pbs-tools/src/crypt_config.rs index cbf741908..6ea46b577 100644 --- a/pbs-tools/src/crypt_config.rs +++ b/pbs-tools/src/crypt_config.rs @@ -26,7 +26,7 @@ const FINGERPRINT_INPUT: [u8; 32] = [ pub struct CryptConfig { // the Cipher cipher: Cipher, - // A secrect key use to provide the chunk digest name space. + // A secret key use to provide the chunk digest name space. id_key: [u8; 32], // Openssl hmac PKey of id_key id_pkey: openssl::pkey::PKey, diff --git a/pbs-tools/src/format.rs b/pbs-tools/src/format.rs index c208d8cb0..bc9f20a8b 100644 --- a/pbs-tools/src/format.rs +++ b/pbs-tools/src/format.rs @@ -1,9 +1,11 @@ use std::borrow::Borrow; +use std::time::Duration; -use anyhow::Error; +use anyhow::{Context, Error}; use serde_json::Value; use proxmox_human_byte::HumanByte; +use proxmox_time::TimeSpan; pub fn strip_server_file_extension(name: &str) -> &str { if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { @@ -64,3 +66,13 @@ pub fn render_bytes_human_readable(value: &Value, _record: &Value) -> Result Result { + if val.is_null() { + return Ok(String::new()); + } + let duration = val.as_u64().context("not a number")?; + let time_span = TimeSpan::from(Duration::from_secs(duration)); + + Ok(format!("{time_span}")) +} diff --git a/pbs-tools/src/lib.rs b/pbs-tools/src/lib.rs index bee5c1c0b..af900c925 100644 --- a/pbs-tools/src/lib.rs +++ b/pbs-tools/src/lib.rs @@ -10,7 +10,7 @@ pub mod async_lru_cache; /// Set MMAP_THRESHOLD to a fixed value (128 KiB) /// -/// This avoids the "dynamic" mmap-treshold logic from glibc's malloc, which seems misguided and +/// This avoids the "dynamic" mmap-threshold logic from glibc's malloc, which seems misguided and /// effectively avoids using mmap for all allocations smaller than 32 MiB. Which, in combination /// with the allocation pattern from our/tokio's complex async machinery, resulted in very large /// RSS sizes due to defragmentation and long-living (smaller) allocation on top of the heap diff --git a/proxmox-backup-client/Cargo.toml b/proxmox-backup-client/Cargo.toml index 40de24507..a91a4908b 100644 --- a/proxmox-backup-client/Cargo.toml +++ b/proxmox-backup-client/Cargo.toml @@ -8,7 +8,6 @@ edition.workspace = true anyhow.workspace = true futures.workspace = true hyper.workspace = true -libc.workspace = true log.workspace = true nix.workspace = true openssl.workspace = true @@ -24,19 +23,19 @@ pathpatterns.workspace = true pxar.workspace = true proxmox-async.workspace = true -proxmox-fuse.workspace = true proxmox-human-byte.workspace = true +proxmox-log.workspace = true proxmox-io.workspace = true proxmox-router = { workspace = true, features = [ "cli" ] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-sortable-macro.workspace = true proxmox-sys.workspace = true +proxmox-systemd.workspace = true proxmox-time.workspace = true pbs-api-types.workspace = true pbs-buildcfg.workspace = true pbs-client.workspace = true -pbs-config.workspace = true pbs-datastore.workspace = true pbs-fuse-loop.workspace = true pbs-key-config.workspace = true diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs index b3047308c..a6f24d745 100644 --- a/proxmox-backup-client/src/benchmark.rs +++ b/proxmox-backup-client/src/benchmark.rs @@ -29,7 +29,7 @@ use crate::{ #[derive(Copy, Clone, Serialize)] /// Speed test result struct Speed { - /// The meassured speed in Bytes/second + /// The measured speed in Bytes/second #[serde(skip_serializing_if = "Option::is_none")] speed: Option, /// Top result we want to compare with @@ -229,7 +229,7 @@ async fn test_upload_speed( log::debug!("Connecting to backup server"); let client = BackupWriter::start( - client, + &client, crypt_config.clone(), repo.store(), &BackupNamespace::root(), @@ -331,9 +331,10 @@ fn test_crypt_speed(benchmark_result: &mut BenchmarkResult) -> Result<(), Error> let start_time = std::time::Instant::now(); let mut bytes = 0; + let mut out = Vec::new(); loop { - let mut out = Vec::new(); DataBlob::encrypt_benchmark(&crypt_config, &random_data, &mut out)?; + out.clear(); bytes += random_data.len(); if start_time.elapsed().as_micros() > 1_000_000 { break; diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index 72b22e67f..0a374c011 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -1,5 +1,4 @@ use std::io::{Seek, SeekFrom}; -use std::os::unix::fs::OpenOptionsExt; use std::sync::Arc; use anyhow::{bail, format_err, Error}; @@ -9,17 +8,19 @@ use proxmox_router::cli::*; use proxmox_schema::api; use pbs_api_types::BackupNamespace; +use pbs_client::tools::has_pxar_filename_extension; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json::required_string_param; +use crate::helper; use crate::{ complete_backup_snapshot, complete_group_or_snapshot, complete_namespace, complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param, - record_repository, BackupDir, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, - DynamicIndexReader, IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, + record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader, + IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, }; #[api( @@ -104,11 +105,7 @@ async fn dump_catalog(param: Value) -> Result { let mut reader = BufferedDynamicReader::new(index, chunk_reader); - let mut catalogfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut catalogfile = pbs_client::tools::create_tmp_file()?; std::io::copy(&mut reader, &mut catalogfile) .map_err(|err| format_err!("unable to download catalog - {}", err))?; @@ -180,7 +177,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { } }; - let server_archive_name = if archive_name.ends_with(".pxar") { + let server_archive_name = if has_pxar_filename_extension(archive_name, false) { format!("{}.didx", archive_name) } else { bail!("Can only mount pxar archives."); @@ -196,31 +193,18 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - let mut tmpfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut tmpfile = pbs_client::tools::create_tmp_file()?; let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - let index = client - .download_dynamic_index(&manifest, &server_archive_name) - .await?; - let most_used = index.find_most_used_chunks(8); - - let file_info = manifest.lookup_file_info(&server_archive_name)?; - let chunk_reader = RemoteChunkReader::new( + let decoder = helper::get_pxar_fuse_accessor( + &server_archive_name, client.clone(), + &manifest, crypt_config.clone(), - file_info.chunk_crypt_mode(), - most_used, - ); - let reader = BufferedDynamicReader::new(index, chunk_reader); - let archive_size = reader.archive_size(); - let reader: pbs_pxar_fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader)); - let decoder = pbs_pxar_fuse::Accessor::new(reader, archive_size).await?; + ) + .await?; client.download(CATALOG_NAME, &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) @@ -240,11 +224,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { most_used, ); let mut reader = BufferedDynamicReader::new(index, chunk_reader); - let mut catalogfile = std::fs::OpenOptions::new() - .write(true) - .read(true) - .custom_flags(libc::O_TMPFILE) - .open("/tmp")?; + let mut catalogfile = pbs_client::tools::create_tmp_file()?; std::io::copy(&mut reader, &mut catalogfile) .map_err(|err| format_err!("unable to download catalog - {}", err))?; diff --git a/proxmox-backup-client/src/group.rs b/proxmox-backup-client/src/group.rs new file mode 100644 index 000000000..67f26e261 --- /dev/null +++ b/proxmox-backup-client/src/group.rs @@ -0,0 +1,88 @@ +use anyhow::{bail, Error}; +use serde_json::Value; + +use proxmox_router::cli::{CliCommand, CliCommandMap, Confirmation}; +use proxmox_schema::api; + +use crate::{ + complete_backup_group, complete_namespace, complete_repository, merge_group_into, + REPO_URL_SCHEMA, +}; +use pbs_api_types::{BackupGroup, BackupNamespace}; +use pbs_client::tools::{connect, remove_repository_from_value}; + +pub fn group_mgmt_cli() -> CliCommandMap { + CliCommandMap::new().insert( + "forget", + CliCommand::new(&API_METHOD_FORGET_GROUP) + .arg_param(&["group"]) + .completion_cb("ns", complete_namespace) + .completion_cb("repository", complete_repository) + .completion_cb("group", complete_backup_group), + ) +} + +#[api( + input: { + properties: { + group: { + type: String, + description: "Backup group", + }, + repository: { + schema: REPO_URL_SCHEMA, + optional: true, + }, + ns: { + type: BackupNamespace, + optional: true, + }, + } + } +)] +/// Forget (remove) backup snapshots. +async fn forget_group(group: String, mut param: Value) -> Result<(), Error> { + let backup_group: BackupGroup = group.parse()?; + let repo = remove_repository_from_value(&mut param)?; + let client = connect(&repo)?; + + let mut api_param = param; + merge_group_into(api_param.as_object_mut().unwrap(), backup_group.clone()); + + let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store()); + let result = client.get(&path, Some(api_param.clone())).await?; + let snapshots = result["data"].as_array().unwrap().len(); + + let confirmation = Confirmation::query_with_default( + format!( + "Delete group \"{}\" with {} snapshot(s)?", + backup_group, snapshots + ) + .as_str(), + Confirmation::No, + )?; + if confirmation.is_yes() { + let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); + if let Err(err) = client.delete(&path, Some(api_param)).await { + // "ENOENT: No such file or directory" is part of the error returned when the group + // has not been found. The full error contains the full datastore path and we would + // like to avoid printing that to the console. Checking if it exists before deleting + // the group doesn't work because we currently do not differentiate between an empty + // and a nonexistent group. This would make it impossible to remove empty groups. + if err + .root_cause() + .to_string() + .contains("ENOENT: No such file or directory") + { + bail!("Unable to find backup group!"); + } else { + bail!(err); + } + } + println!("Successfully deleted group!"); + } else { + println!("Abort."); + } + + Ok(()) +} diff --git a/proxmox-backup-client/src/helper.rs b/proxmox-backup-client/src/helper.rs new file mode 100644 index 000000000..60355d7d0 --- /dev/null +++ b/proxmox-backup-client/src/helper.rs @@ -0,0 +1,79 @@ +use std::sync::Arc; + +use anyhow::Error; +use pbs_client::{BackupReader, RemoteChunkReader}; +use pbs_datastore::BackupManifest; +use pbs_tools::crypt_config::CryptConfig; + +use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile}; + +pub(crate) async fn get_pxar_fuse_accessor( + archive_name: &str, + client: Arc, + manifest: &BackupManifest, + crypt_config: Option>, +) -> Result { + let (archive_name, payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(archive_name, manifest)?; + + let (reader, archive_size) = get_pxar_fuse_reader( + &archive_name, + client.clone(), + manifest, + crypt_config.clone(), + ) + .await?; + + let reader = if let Some(payload_archive_name) = payload_archive_name { + let (payload_reader, payload_size) = get_pxar_fuse_reader( + &payload_archive_name, + client.clone(), + manifest, + crypt_config.clone(), + ) + .await?; + + pxar::PxarVariant::Split(reader, (payload_reader, payload_size)) + } else { + pxar::PxarVariant::Unified(reader) + }; + + let accessor = pbs_pxar_fuse::Accessor::new(reader, archive_size).await?; + + Ok(accessor) +} + +pub(crate) async fn get_pxar_fuse_reader( + archive_name: &str, + client: Arc, + manifest: &BackupManifest, + crypt_config: Option>, +) -> Result<(pbs_pxar_fuse::Reader, u64), Error> { + let reader = get_buffered_pxar_reader(archive_name, client, manifest, crypt_config).await?; + let archive_size = reader.archive_size(); + let reader: pbs_pxar_fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader)); + + Ok((reader, archive_size)) +} + +pub(crate) async fn get_buffered_pxar_reader( + archive_name: &str, + client: Arc, + manifest: &BackupManifest, + crypt_config: Option>, +) -> Result, Error> { + let index = client + .download_dynamic_index(manifest, archive_name) + .await?; + + let most_used = index.find_most_used_chunks(8); + let file_info = manifest.lookup_file_info(archive_name)?; + let chunk_reader = RemoteChunkReader::new( + client.clone(), + crypt_config.clone(), + file_info.chunk_crypt_mode(), + most_used, + ); + + Ok(BufferedDynamicReader::new(index, chunk_reader)) +} diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index 546275cb1..e4034aa99 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -15,46 +15,47 @@ use xdg::BaseDirectories; use pathpatterns::{MatchEntry, MatchType, PatternFlag}; use proxmox_async::blocking::TokioWriterAdapter; -use proxmox_human_byte::HumanByte; use proxmox_io::StdChannelWriter; +use proxmox_log::init_cli_logger; use proxmox_router::{cli::*, ApiMethod, RpcEnvironment}; use proxmox_schema::api; use proxmox_sys::fs::{file_get_json, image_size, replace_file, CreateOptions}; use proxmox_time::{epoch_i64, strftime_local}; +use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode, - Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem, - StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA, + Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig, + CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, + SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; -use pbs_client::pxar::ErrorHandler as PxarErrorHandler; +use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; use pbs_client::tools::{ complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, complete_backup_source, complete_chunk_size, complete_group_or_snapshot, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, - connect, connect_rate_limited, extract_repository_from_value, + connect, connect_rate_limited, extract_repository_from_value, has_pxar_filename_extension, key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, }, - CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA, + raise_nofile_limit, CHUNK_SIZE_SCHEMA, REPO_URL_SCHEMA, }; use pbs_client::{ - delete_ticket_info, parse_backup_specification, view_task_result, BackupReader, - BackupRepository, BackupSpecificationType, BackupStats, BackupWriter, ChunkStream, - FixedChunkStream, HttpClient, PxarBackupStream, RemoteChunkReader, UploadOptions, - BACKUP_SOURCE_SCHEMA, + delete_ticket_info, parse_backup_specification, view_task_result, BackupDetectionMode, + BackupReader, BackupRepository, BackupSpecificationType, BackupStats, BackupWriter, + ChunkStream, FixedChunkStream, HttpClient, InjectionData, PxarBackupStream, RemoteChunkReader, + UploadOptions, BACKUP_SOURCE_SCHEMA, }; use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter}; use pbs_datastore::chunk_store::verify_chunk_size; -use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader}; +use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{ - archive_type, ArchiveType, BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, + ArchiveType, BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, }; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::CATALOG_NAME; @@ -62,19 +63,30 @@ use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json; -mod benchmark; -pub use benchmark::*; -mod mount; -pub use mount::*; -mod task; -pub use task::*; -mod catalog; -pub use catalog::*; -mod snapshot; -pub use snapshot::*; pub mod key; pub mod namespace; +mod benchmark; +pub use benchmark::*; + +mod catalog; +pub use catalog::*; + +mod group; +pub use group::*; + +mod helper; +pub(crate) use helper::*; + +mod mount; +pub use mount::*; + +mod snapshot; +pub use snapshot::*; + +mod task; +pub use task::*; + fn record_repository(repo: &BackupRepository) { let base = match BaseDirectories::with_prefix("proxmox-backup") { Ok(v) => v, @@ -187,14 +199,26 @@ async fn backup_directory>( client: &BackupWriter, dir_path: P, archive_name: &str, + payload_target: Option<&str>, chunk_size: Option, - catalog: Arc>>>>, + catalog: Option>>>>>, pxar_create_options: pbs_client::pxar::PxarCreateOptions, upload_options: UploadOptions, -) -> Result { - let pxar_stream = PxarBackupStream::open(dir_path.as_ref(), catalog, pxar_create_options)?; - let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size); +) -> Result<(BackupStats, Option), Error> { + if upload_options.fixed_size.is_some() { + bail!("cannot backup directory with fixed chunk size!"); + } + let (payload_boundaries_tx, payload_boundaries_rx) = std::sync::mpsc::channel(); + let (pxar_stream, payload_stream) = PxarBackupStream::open( + dir_path.as_ref(), + catalog, + pxar_create_options, + Some(payload_boundaries_tx), + payload_target.is_some(), + )?; + + let mut chunk_stream = ChunkStream::new(pxar_stream, chunk_size, None, None); let (tx, rx) = mpsc::channel(10); // allow to buffer 10 chunks let stream = ReceiverStream::new(rx).map_err(Error::from); @@ -206,15 +230,49 @@ async fn backup_directory>( } }); - if upload_options.fixed_size.is_some() { - bail!("cannot backup directory with fixed chunk size!"); + let stats = client.upload_stream(archive_name, stream, upload_options.clone(), None); + + if let Some(mut payload_stream) = payload_stream { + let payload_target = payload_target + .ok_or_else(|| format_err!("got payload stream, but no target archive name"))?; + + let (payload_injections_tx, payload_injections_rx) = std::sync::mpsc::channel(); + let injection_data = InjectionData::new(payload_boundaries_rx, payload_injections_tx); + let suggested_boundaries = payload_stream.suggested_boundaries.take(); + let mut payload_chunk_stream = ChunkStream::new( + payload_stream, + chunk_size, + Some(injection_data), + suggested_boundaries, + ); + let (payload_tx, payload_rx) = mpsc::channel(10); // allow to buffer 10 chunks + let stream = ReceiverStream::new(payload_rx).map_err(Error::from); + + // spawn payload chunker inside a separate task so that it can run parallel + tokio::spawn(async move { + while let Some(v) = payload_chunk_stream.next().await { + let _ = payload_tx.send(v).await; + } + }); + + let payload_stats = client.upload_stream( + payload_target, + stream, + upload_options, + Some(payload_injections_rx), + ); + + match futures::join!(stats, payload_stats) { + (Ok(stats), Ok(payload_stats)) => Ok((stats, Some(payload_stats))), + (Err(err), Ok(_)) => Err(format_err!("upload failed: {err}")), + (Ok(_), Err(err)) => Err(format_err!("upload failed: {err}")), + (Err(err), Err(payload_err)) => { + Err(format_err!("upload failed: {err} - {payload_err}")) + } + } + } else { + Ok((stats.await?, None)) } - - let stats = client - .upload_stream(archive_name, stream, upload_options) - .await?; - - Ok(stats) } async fn backup_image>( @@ -228,8 +286,12 @@ async fn backup_image>( let file = tokio::fs::File::open(path).await?; - let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new()) - .map_err(Error::from); + let stream = tokio_util::codec::FramedRead::with_capacity( + file, + tokio_util::codec::BytesCodec::new(), + 4 * 1024 * 1024, + ) + .map_err(Error::from); let stream = FixedChunkStream::new(stream, chunk_size.unwrap_or(4 * 1024 * 1024)); @@ -238,7 +300,7 @@ async fn backup_image>( } let stats = client - .upload_stream(archive_name, stream, upload_options) + .upload_stream(archive_name, stream, upload_options, None) .await?; Ok(stats) @@ -529,7 +591,8 @@ fn spawn_catalog_upload( let (catalog_tx, catalog_rx) = std::sync::mpsc::sync_channel(10); // allow to buffer 10 writes let catalog_stream = proxmox_async::blocking::StdChannelStream(catalog_rx); let catalog_chunk_size = 512 * 1024; - let catalog_chunk_stream = ChunkStream::new(catalog_stream, Some(catalog_chunk_size)); + let catalog_chunk_stream = + ChunkStream::new(catalog_stream, Some(catalog_chunk_size), None, None); let catalog_writer = Arc::new(Mutex::new(CatalogWriter::new(TokioWriterAdapter::new( StdChannelWriter::new(catalog_tx), @@ -545,7 +608,7 @@ fn spawn_catalog_upload( tokio::spawn(async move { let catalog_upload_result = client - .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options) + .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options, None) .await; if let Err(ref err) = catalog_upload_result { @@ -636,12 +699,12 @@ fn spawn_catalog_upload( schema: CHUNK_SIZE_SCHEMA, optional: true, }, - rate: { - schema: TRAFFIC_CONTROL_RATE_SCHEMA, - optional: true, + limit: { + type: ClientRateLimitConfig, + flatten: true, }, - burst: { - schema: TRAFFIC_CONTROL_BURST_SCHEMA, + "change-detection-mode": { + type: BackupDetectionMode, optional: true, }, "exclude": { @@ -679,8 +742,10 @@ async fn create_backup( param: Value, all_file_systems: bool, skip_lost_and_found: bool, + change_detection_mode: Option, dry_run: bool, skip_e2big_xattr: bool, + limit: ClientRateLimitConfig, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, ) -> Result { @@ -696,16 +761,7 @@ async fn create_backup( verify_chunk_size(size)?; } - let rate = match param["rate"].as_str() { - Some(s) => Some(s.parse::()?), - None => None, - }; - let burst = match param["burst"].as_str() { - Some(s) => Some(s.parse::()?), - None => None, - }; - - let rate_limit = RateLimitConfig::with_same_inout(rate, burst); + let rate_limit = RateLimitConfig::from_client_config(limit); let crypto = crypto_parameters(¶m)?; @@ -785,7 +841,8 @@ async fn create_backup( upload_list.push(( BackupSpecificationType::PXAR, filename.to_owned(), - format!("{}.didx", target), + target.to_owned(), + "didx", 0, )); } @@ -803,7 +860,8 @@ async fn create_backup( upload_list.push(( BackupSpecificationType::IMAGE, filename.to_owned(), - format!("{}.fidx", target), + target.to_owned(), + "fidx", size, )); } @@ -814,7 +872,8 @@ async fn create_backup( upload_list.push(( BackupSpecificationType::CONFIG, filename.to_owned(), - format!("{}.blob", target), + target.to_owned(), + "blob", metadata.len(), )); } @@ -825,7 +884,8 @@ async fn create_backup( upload_list.push(( BackupSpecificationType::LOGFILE, filename.to_owned(), - format!("{}.blob", target), + target.to_owned(), + "blob", metadata.len(), )); } @@ -834,7 +894,9 @@ async fn create_backup( let backup_time = backup_time_opt.unwrap_or_else(epoch_i64); - let client = connect_rate_limited(&repo, rate_limit)?; + let detection_mode = change_detection_mode.unwrap_or_default(); + + let http_client = connect_rate_limited(&repo, rate_limit)?; record_repository(&repo); let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time)); @@ -886,7 +948,7 @@ async fn create_backup( }; let client = BackupWriter::start( - client, + &http_client, crypt_config.clone(), repo.store(), &backup_ns, @@ -934,7 +996,7 @@ async fn create_backup( None }; - let mut manifest = BackupManifest::new(snapshot); + let mut manifest = BackupManifest::new(snapshot.clone()); let mut catalog = None; let mut catalog_result_rx = None; @@ -944,7 +1006,8 @@ async fn create_backup( log::info!("{} {} '{}' to '{}' as {}", what, desc, file, repo, target); }; - for (backup_type, filename, target, size) in upload_list { + for (backup_type, filename, target_base, extension, size) in upload_list { + let target = format!("{target_base}.{extension}"); match (backup_type, dry_run) { // dry-run (BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target), @@ -980,20 +1043,80 @@ async fn create_backup( manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::PXAR, false) => { + let target_base = if let Some(base) = target_base.strip_suffix(".pxar") { + base.to_string() + } else { + bail!("unexpected suffix in target: {target_base}"); + }; + + let (target, payload_target) = + if detection_mode.is_metadata() || detection_mode.is_data() { + ( + format!("{target_base}.mpxar.{extension}"), + Some(format!("{target_base}.ppxar.{extension}")), + ) + } else { + (target, None) + }; + // start catalog upload on first use - if catalog.is_none() { + if catalog.is_none() && !detection_mode.is_data() && !detection_mode.is_metadata() { let catalog_upload_res = spawn_catalog_upload(client.clone(), crypto.mode == CryptMode::Encrypt)?; catalog = Some(catalog_upload_res.catalog_writer); catalog_result_rx = Some(catalog_upload_res.result); } - let catalog = catalog.as_ref().unwrap(); log_file("directory", &filename, &target); - catalog - .lock() - .unwrap() - .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; + if let Some(catalog) = catalog.as_ref() { + catalog + .lock() + .unwrap() + .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; + } + + let mut previous_ref = None; + let max_cache_size = if detection_mode.is_metadata() { + let old_rlimit = raise_nofile_limit()?; + if let Some(ref manifest) = previous_manifest { + // BackupWriter::start created a new snapshot, get the one before + if let Some(backup_time) = client.previous_backup_time().await? { + let backup_dir: BackupDir = + (snapshot.group.clone(), backup_time).into(); + let backup_reader = BackupReader::start( + &http_client, + crypt_config.clone(), + repo.store(), + &backup_ns, + &backup_dir, + true, + ) + .await?; + previous_ref = prepare_reference( + &target, + manifest.clone(), + &client, + backup_reader.clone(), + crypt_config.clone(), + crypto.mode, + ) + .await? + } + } + + if old_rlimit.rlim_max <= 4096 { + log::info!( + "resource limit for open file handles low: {}", + old_rlimit.rlim_max, + ); + } + + Some(usize::try_from( + old_rlimit.rlim_max - old_rlimit.rlim_cur / 2, + )?) + } else { + None + }; let pxar_options = pbs_client::pxar::PxarCreateOptions { device_set: devices.clone(), @@ -1001,6 +1124,8 @@ async fn create_backup( entries_max: entries_max as usize, skip_lost_and_found, skip_e2big_xattr, + previous_ref, + max_cache_size, }; let upload_options = UploadOptions { @@ -1010,18 +1135,31 @@ async fn create_backup( ..UploadOptions::default() }; - let stats = backup_directory( + let (stats, payload_stats) = backup_directory( &client, &filename, &target, + payload_target.as_deref(), chunk_size_opt, - catalog.clone(), + catalog.as_ref().cloned(), pxar_options, upload_options, ) .await?; + + if let Some(payload_stats) = payload_stats { + manifest.add_file( + payload_target + .ok_or_else(|| format_err!("missing payload target archive"))?, + payload_stats.size, + payload_stats.csum, + crypto.mode, + )?; + } manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; - catalog.lock().unwrap().end_directory()?; + if let Some(catalog) = catalog.as_ref() { + catalog.lock().unwrap().end_directory()?; + } } (BackupSpecificationType::IMAGE, false) => { log_file("image", &filename, &target); @@ -1101,6 +1239,72 @@ async fn create_backup( Ok(Value::Null) } +async fn prepare_reference( + target: &str, + manifest: Arc, + backup_writer: &BackupWriter, + backup_reader: Arc, + crypt_config: Option>, + crypt_mode: CryptMode, +) -> Result, Error> { + let (target, payload_target) = + match pbs_client::tools::get_pxar_archive_names(target, &manifest) { + Ok((target, payload_target)) => (target, payload_target), + Err(_) => return Ok(None), + }; + let payload_target = payload_target.unwrap_or_default(); + + let metadata_ref_index = if let Ok(index) = backup_reader + .download_dynamic_index(&manifest, &target) + .await + { + index + } else { + log::info!("No previous metadata index, continue without reference"); + return Ok(None); + }; + + let file_info = match manifest.lookup_file_info(&payload_target) { + Ok(file_info) => file_info, + Err(_) => { + log::info!("No previous payload index found in manifest, continue without reference"); + return Ok(None); + } + }; + + if file_info.crypt_mode != crypt_mode { + log::info!("Crypt mode mismatch, continue without reference"); + return Ok(None); + } + + let known_payload_chunks = Arc::new(Mutex::new(HashSet::new())); + let payload_ref_index = backup_writer + .download_previous_dynamic_index(&payload_target, &manifest, known_payload_chunks) + .await?; + + log::info!("Using previous index as metadata reference for '{target}'"); + + let most_used = metadata_ref_index.find_most_used_chunks(8); + let file_info = manifest.lookup_file_info(&target)?; + let chunk_reader = RemoteChunkReader::new( + backup_reader.clone(), + crypt_config.clone(), + file_info.chunk_crypt_mode(), + most_used, + ); + let reader = BufferedDynamicReader::new(metadata_ref_index, chunk_reader); + let archive_size = reader.archive_size(); + let reader: MetadataArchiveReader = Arc::new(LocalDynamicReadAt::new(reader)); + // only care about the metadata, therefore do not attach payload reader + let accessor = Accessor::new(pxar::PxarVariant::Unified(reader), archive_size).await?; + + Ok(Some(pbs_client::pxar::PxarPrevRef { + accessor, + payload_index: payload_ref_index, + archive_name: target, + })) +} + async fn dump_image( client: Arc, crypt_config: Option>, @@ -1149,8 +1353,8 @@ async fn dump_image( fn parse_archive_type(name: &str) -> (String, ArchiveType) { if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { - (name.into(), archive_type(name).unwrap()) - } else if name.ends_with(".pxar") { + (name.into(), ArchiveType::from_path(name).unwrap()) + } else if has_pxar_filename_extension(name, false) { (format!("{}.didx", name), ArchiveType::DynamicIndex) } else if name.ends_with(".img") { (format!("{}.fidx", name), ArchiveType::FixedIndex) @@ -1186,13 +1390,9 @@ We do not extract '.pxar' archives when writing to standard output. "### }, - rate: { - schema: TRAFFIC_CONTROL_RATE_SCHEMA, - optional: true, - }, - burst: { - schema: TRAFFIC_CONTROL_BURST_SCHEMA, - optional: true, + limit: { + type: ClientRateLimitConfig, + flatten: true, }, "allow-existing-dirs": { type: Boolean, @@ -1262,7 +1462,12 @@ We do not extract '.pxar' archives when writing to standard output. description: "ignore errors that occur during device node extraction", optional: true, default: false, - } + }, + "prelude-target": { + description: "Path to restore prelude to, (pxar v2 archives only).", + type: String, + optional: true, + }, } } )] @@ -1278,22 +1483,14 @@ async fn restore( overwrite_files: bool, overwrite_symlinks: bool, overwrite_hardlinks: bool, + limit: ClientRateLimitConfig, ignore_extract_device_errors: bool, ) -> Result { let repo = extract_repository_from_value(¶m)?; let archive_name = json::required_string_param(¶m, "archive-name")?; - let rate = match param["rate"].as_str() { - Some(s) => Some(s.parse::()?), - None => None, - }; - let burst = match param["burst"].as_str() { - Some(s) => Some(s.parse::()?), - None => None, - }; - - let rate_limit = RateLimitConfig::with_same_inout(rate, burst); + let rate_limit = RateLimitConfig::from_client_config(limit); let client = connect_rate_limited(&repo, rate_limit)?; record_repository(&repo); @@ -1362,8 +1559,6 @@ async fn restore( return Ok(Value::Null); } - let file_info = manifest.lookup_file_info(&archive_name)?; - if archive_type == ArchiveType::Blob { let mut reader = client.download_blob(&manifest, &archive_name).await?; @@ -1384,20 +1579,16 @@ async fn restore( .map_err(|err| format_err!("unable to pipe data - {}", err))?; } } else if archive_type == ArchiveType::DynamicIndex { - let index = client - .download_dynamic_index(&manifest, &archive_name) - .await?; + let (archive_name, payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; - let most_used = index.find_most_used_chunks(8); - - let chunk_reader = RemoteChunkReader::new( + let mut reader = get_buffered_pxar_reader( + &archive_name, client.clone(), - crypt_config, - file_info.chunk_crypt_mode(), - most_used, - ); - - let mut reader = BufferedDynamicReader::new(index, chunk_reader); + &manifest, + crypt_config.clone(), + ) + .await?; let on_error = if ignore_extract_device_errors { let handler: PxarErrorHandler = Box::new(move |err: Error| { @@ -1428,12 +1619,15 @@ async fn restore( overwrite_flags.insert(pbs_client::pxar::OverwriteFlags::all()); } + let prelude_path = param["prelude-target"].as_str().map(PathBuf::from); + let options = pbs_client::pxar::PxarExtractOptions { match_list: &[], extract_match_default: true, allow_existing_dirs, overwrite_flags, on_error, + prelude_path, }; let mut feature_flags = pbs_client::pxar::Flags::DEFAULT; @@ -1452,8 +1646,22 @@ async fn restore( } if let Some(target) = target { + let reader = if let Some(payload_archive_name) = payload_archive_name { + let payload_reader = get_buffered_pxar_reader( + &payload_archive_name, + client.clone(), + &manifest, + crypt_config.clone(), + ) + .await?; + pxar::PxarVariant::Split(reader, payload_reader) + } else { + pxar::PxarVariant::Unified(reader) + }; + let decoder = pxar::decoder::Decoder::from_std(reader)?; + pbs_client::pxar::extract_archive( - pxar::decoder::Decoder::from_std(reader)?, + decoder, Path::new(target), feature_flags, |path| { @@ -1463,6 +1671,9 @@ async fn restore( ) .map_err(|err| format_err!("error extracting archive - {:#}", err))?; } else { + if archive_name.ends_with(".mpxar.didx") || archive_name.ends_with(".ppxar.didx") { + bail!("unable to pipe split archive"); + } let mut writer = std::fs::OpenOptions::new() .write(true) .open("/dev/stdout") @@ -1472,6 +1683,7 @@ async fn restore( .map_err(|err| format_err!("unable to pipe data - {}", err))?; } } else if archive_type == ArchiveType::FixedIndex { + let file_info = manifest.lookup_file_info(&archive_name)?; let index = client .download_fixed_index(&manifest, &archive_name) .await?; @@ -1717,7 +1929,7 @@ impl ReadAt for BufferedDynamicReadAt { fn main() { pbs_tools::setup_libc_malloc_opts(); - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger"); let backup_cmd_def = CliCommand::new(&API_METHOD_CREATE_BACKUP) .arg_param(&["backupspec"]) @@ -1744,7 +1956,8 @@ fn main() { .completion_cb("ns", complete_namespace) .completion_cb("snapshot", complete_group_or_snapshot) .completion_cb("archive-name", complete_archive_name) - .completion_cb("target", complete_file_name); + .completion_cb("target", complete_file_name) + .completion_cb("prelude-target", complete_file_name); let prune_cmd_def = CliCommand::new(&API_METHOD_PRUNE) .arg_param(&["group"]) @@ -1791,6 +2004,7 @@ fn main() { .insert("benchmark", benchmark_cmd_def) .insert("change-owner", change_owner_cmd_def) .insert("namespace", namespace::cli_map()) + .insert("group", group_mgmt_cli()) .alias(&["files"], &["snapshot", "files"]) .alias(&["forget"], &["snapshot", "forget"]) .alias(&["upload-log"], &["snapshot", "upload-log"]) diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index 4a2f83357..c15e030f5 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -16,22 +16,23 @@ use tokio::signal::unix::{signal, SignalKind}; use proxmox_router::{cli::*, ApiHandler, ApiMethod, RpcEnvironment}; use proxmox_schema::*; use proxmox_sortable_macro::sortable; +use proxmox_systemd; use pbs_api_types::BackupNamespace; +use pbs_client::tools::has_pxar_filename_extension; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::cached_chunk_reader::CachedChunkReader; -use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_datastore::index::IndexFile; use pbs_key_config::load_and_decrypt_key; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json::required_string_param; +use crate::helper; use crate::{ complete_group_or_snapshot, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, connect, dir_or_last_from_group, - extract_repository_from_value, optional_ns_param, record_repository, BufferedDynamicReadAt, - REPO_URL_SCHEMA, + extract_repository_from_value, optional_ns_param, record_repository, REPO_URL_SCHEMA, }; #[sortable] @@ -156,7 +157,7 @@ fn complete_mapping_names( ) -> Vec { match pbs_fuse_loop::find_all_mappings() { Ok(mappings) => mappings - .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok()) + .filter_map(|(name, _)| proxmox_systemd::unescape_unit(&name).ok()) .collect(), Err(_) => Vec::new(), } @@ -183,7 +184,17 @@ fn mount( Ok(ForkResult::Parent { .. }) => { drop(pw); // Blocks the parent process until we are ready to go in the child - let _res = nix::unistd::read(pr.as_raw_fd(), &mut [0]).unwrap(); + let mut buffer = [0u8]; + nix::unistd::read(pr.as_raw_fd(), &mut buffer).unwrap(); + + // Read buffer didn't change, which indicates that nothing has been read and the file + // descriptor has probably been closed. This means that there was an error in the child + // process and it did not daemonize correctly. + if buffer[0] == 0 { + // Wait for the child process to finish, so it can return a nice error. + nix::sys::wait::wait().unwrap(); + } + Ok(Value::Null) } Ok(ForkResult::Child) => { @@ -219,7 +230,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } }; - let server_archive_name = if archive_name.ends_with(".pxar") { + let server_archive_name = if has_pxar_filename_extension(archive_name, false) { if target.is_none() { bail!("use the 'mount' command to mount pxar archives"); } @@ -246,8 +257,6 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - let file_info = manifest.lookup_file_info(&server_archive_name)?; - let daemonize = || -> Result<(), Error> { if let Some(pipe) = pipe { nix::unistd::chdir(Path::new("/")).unwrap(); @@ -266,7 +275,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } // Signal the parent process that we are done with the setup and it can // terminate. - nix::unistd::write(pipe.as_raw_fd(), &[0u8])?; + nix::unistd::write(pipe.as_raw_fd(), &[1u8])?; let _: OwnedFd = pipe; } @@ -283,20 +292,13 @@ async fn mount_do(param: Value, pipe: Option) -> Result { futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); if server_archive_name.ends_with(".didx") { - let index = client - .download_dynamic_index(&manifest, &server_archive_name) - .await?; - let most_used = index.find_most_used_chunks(8); - let chunk_reader = RemoteChunkReader::new( + let decoder = helper::get_pxar_fuse_accessor( + &server_archive_name, client.clone(), - crypt_config, - file_info.chunk_crypt_mode(), - most_used, - ); - let reader = BufferedDynamicReader::new(index, chunk_reader); - let archive_size = reader.archive_size(); - let reader: pbs_pxar_fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader)); - let decoder = pbs_pxar_fuse::Accessor::new(reader, archive_size).await?; + &manifest, + crypt_config.clone(), + ) + .await?; let session = pbs_pxar_fuse::Session::mount(decoder, options, false, Path::new(target.unwrap())) @@ -311,6 +313,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } } } else if server_archive_name.ends_with(".fidx") { + let file_info = manifest.lookup_file_info(&server_archive_name)?; let index = client .download_fixed_index(&manifest, &server_archive_name) .await?; @@ -324,7 +327,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); let name = &format!("{}:{}/{}", repo, path, archive_name); - let name_escaped = proxmox_sys::systemd::escape_unit(name, false); + let name_escaped = proxmox_systemd::escape_unit(name, false); let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?; @@ -386,7 +389,7 @@ fn unmap( pbs_fuse_loop::cleanup_unused_run_files(None); let mut any = false; for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? { - let name = proxmox_sys::systemd::unescape_unit(&backing)?; + let name = proxmox_systemd::unescape_unit(&backing)?; log::info!( "{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), @@ -409,7 +412,7 @@ fn unmap( if name.starts_with("/dev/loop") { pbs_fuse_loop::unmap_loopdev(name)?; } else { - let name = proxmox_sys::systemd::escape_unit(&name, false); + let name = proxmox_systemd::escape_unit(&name, false); pbs_fuse_loop::unmap_name(name)?; } diff --git a/proxmox-file-restore/Cargo.toml b/proxmox-file-restore/Cargo.toml index cd92acb32..8f99ecf9b 100644 --- a/proxmox-file-restore/Cargo.toml +++ b/proxmox-file-restore/Cargo.toml @@ -21,16 +21,17 @@ pxar.workspace = true proxmox-async.workspace = true proxmox-compression.workspace = true proxmox-lang.workspace=true +proxmox-log.workspace=true proxmox-router = { workspace = true, features = [ "cli" ] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-sys = { workspace = true, features = [ "logrotate" ] } +proxmox-systemd.workspace = true proxmox-time.workspace = true proxmox-uuid.workspace = true pbs-api-types.workspace = true pbs-buildcfg.workspace = true pbs-client.workspace = true -pbs-config.workspace = true pbs-datastore.workspace = true pbs-key-config.workspace = true pbs-tools.workspace = true diff --git a/proxmox-file-restore/src/block_driver.rs b/proxmox-file-restore/src/block_driver.rs index fa954832d..56b868b5c 100644 --- a/proxmox-file-restore/src/block_driver.rs +++ b/proxmox-file-restore/src/block_driver.rs @@ -37,7 +37,7 @@ pub type Async = Pin + Send>>; /// An abstract implementation for retrieving data out of a block file backup pub trait BlockRestoreDriver { - /// List ArchiveEntrys for the given image file and path + /// List ArchiveEntries for the given image file and path fn data_list( &self, details: SnapRestoreDetails, diff --git a/proxmox-file-restore/src/block_driver_qemu.rs b/proxmox-file-restore/src/block_driver_qemu.rs index fdaf71df6..adc6ccc7c 100644 --- a/proxmox-file-restore/src/block_driver_qemu.rs +++ b/proxmox-file-restore/src/block_driver_qemu.rs @@ -12,6 +12,7 @@ use serde::{Deserialize, Serialize}; use serde_json::json; use proxmox_sys::fs::lock_file; +use proxmox_systemd; use pbs_api_types::{file_restore::FileRestoreFormat, BackupDir, BackupNamespace}; use pbs_client::{BackupRepository, VsockClient, DEFAULT_VSOCK_PORT}; @@ -88,7 +89,7 @@ fn make_name(repo: &BackupRepository, ns: &BackupNamespace, snap: &BackupDir) -> } else { format!("qemu_{repo}:{ns}/{snap}") }; - proxmox_sys::systemd::escape_unit(full, false) + proxmox_systemd::escape_unit(full, false) } /// remove non-responsive VMs from given map, returns 'true' if map was modified @@ -309,7 +310,7 @@ impl BlockRestoreDriver for QemuBlockDriver { let resp = client .get("api2/json/status", Some(json!({"keep-timeout": true}))) .await; - let name = proxmox_sys::systemd::unescape_unit(n) + let name = proxmox_systemd::unescape_unit(n) .unwrap_or_else(|_| "".to_owned()); let mut extra = json!({"pid": s.pid, "cid": s.cid}); @@ -344,7 +345,7 @@ impl BlockRestoreDriver for QemuBlockDriver { fn stop(&self, id: String) -> Async> { async move { - let name = proxmox_sys::systemd::escape_unit(&id, false); + let name = proxmox_systemd::escape_unit(&id, false); let mut map = VMStateMap::load()?; let map_mod = cleanup_map(&mut map.map).await; match map.map.get(&name) { @@ -374,7 +375,7 @@ impl BlockRestoreDriver for QemuBlockDriver { match VMStateMap::load_read_only() { Ok(state) => state .iter() - .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(name).ok()) + .filter_map(|(name, _)| proxmox_systemd::unescape_unit(name).ok()) .collect(), Err(_) => Vec::new(), } diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 50875a636..cda4e911c 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -9,10 +9,11 @@ use serde_json::{json, Value}; use tokio::io::AsyncWriteExt; use proxmox_compression::zstd::ZstdEncoder; +use proxmox_log::init_cli_logger; use proxmox_router::cli::{ complete_file_name, default_table_format_options, format_and_print_result_full, - get_output_format, init_cli_logger, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, - ColumnConfig, OUTPUT_FORMAT, + get_output_format, run_cli_command, CliCommand, CliCommandMap, CliEnvironment, ColumnConfig, + OUTPUT_FORMAT, }; use proxmox_router::{http_err, HttpError}; use proxmox_schema::api; @@ -24,6 +25,7 @@ use pbs_api_types::{file_restore::FileRestoreFormat, BackupDir, BackupNamespace, use pbs_client::pxar::{create_tar, create_zip, extract_sub_dir, extract_sub_dir_seq}; use pbs_client::tools::{ complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value, + has_pxar_filename_extension, key_source::{ crypto_parameters_keep_fd, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, KEYFILE_SCHEMA, @@ -34,7 +36,7 @@ use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader}; use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt}; use pbs_datastore::index::IndexFile; -use pbs_datastore::CATALOG_NAME; +use pbs_datastore::{BackupManifest, CATALOG_NAME}; use pbs_key_config::decrypt_key; use pbs_tools::crypt_config::CryptConfig; @@ -75,7 +77,7 @@ fn parse_path(path: String, base64: bool) -> Result { (file, path) }; - if file.ends_with(".pxar.didx") { + if has_pxar_filename_extension(&file, true) { Ok(ExtractPath::Pxar(file, path)) } else if file.ends_with(".img.fidx") { Ok(ExtractPath::VM(file, path)) @@ -123,11 +125,14 @@ async fn list_files( ExtractPath::ListArchives => { let mut entries = vec![]; for file in manifest.files() { - if !file.filename.ends_with(".pxar.didx") && !file.filename.ends_with(".img.fidx") { + if !file.filename.ends_with(".pxar.didx") + && !file.filename.ends_with(".mpxar.didx") + && !file.filename.ends_with(".img.fidx") + { continue; } let path = format!("/{}", file.filename); - let attr = if file.filename.ends_with(".pxar.didx") { + let attr = if has_pxar_filename_extension(&file.filename, true) { // a pxar file is a file archive, so it's root is also a directory root Some(&DirEntryAttribute::Directory { start: 0 }) } else { @@ -143,24 +148,48 @@ async fn list_files( Ok(entries) } ExtractPath::Pxar(file, mut path) => { - let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + if let Ok(file_info) = manifest.lookup_file_info(CATALOG_NAME) { + let index = client + .download_dynamic_index(&manifest, CATALOG_NAME) + .await?; + let most_used = index.find_most_used_chunks(8); + let chunk_reader = RemoteChunkReader::new( + client.clone(), + crypt_config, + file_info.chunk_crypt_mode(), + most_used, + ); + let reader = BufferedDynamicReader::new(index, chunk_reader); + let mut catalog_reader = CatalogReader::new(reader); + + let mut fullpath = file.into_bytes(); + fullpath.append(&mut path); + + catalog_reader.list_dir_contents(&fullpath) + } else { + if path.is_empty() { + path = vec![b'/']; + } + + let (archive_name, _payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(&file, &manifest)?; + + let (reader, archive_size) = get_remote_pxar_reader( + &archive_name, + client.clone(), + &manifest, + crypt_config.clone(), + ) .await?; - let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(CATALOG_NAME)?; - let chunk_reader = RemoteChunkReader::new( - client.clone(), - crypt_config, - file_info.chunk_crypt_mode(), - most_used, - ); - let reader = BufferedDynamicReader::new(index, chunk_reader); - let mut catalog_reader = CatalogReader::new(reader); - let mut fullpath = file.into_bytes(); - fullpath.append(&mut path); + // only care about the metadata, don't attach a payload reader + let reader = pxar::PxarVariant::Unified(reader); + let accessor = Accessor::new(reader, archive_size).await?; + let path = OsStr::from_bytes(&path); - catalog_reader.list_dir_contents(&fullpath) + pbs_client::tools::pxar_metadata_catalog_lookup(accessor, path, Some(&archive_name)) + .await + } } ExtractPath::VM(file, path) => { let details = SnapRestoreDetails { @@ -325,6 +354,31 @@ async fn list( Ok(()) } +async fn get_remote_pxar_reader( + archive_name: &str, + client: Arc, + manifest: &BackupManifest, + crypt_config: Option>, +) -> Result<(LocalDynamicReadAt, u64), Error> { + let index = client + .download_dynamic_index(manifest, archive_name) + .await?; + let most_used = index.find_most_used_chunks(8); + + let file_info = manifest.lookup_file_info(archive_name)?; + let chunk_reader = RemoteChunkReader::new( + client.clone(), + crypt_config, + file_info.chunk_crypt_mode(), + most_used, + ); + + let reader = BufferedDynamicReader::new(index, chunk_reader); + let archive_size = reader.archive_size(); + + Ok((LocalDynamicReadAt::new(reader), archive_size)) +} + #[api( input: { properties: { @@ -442,23 +496,29 @@ async fn extract( match path { ExtractPath::Pxar(archive_name, path) => { - let file_info = manifest.lookup_file_info(&archive_name)?; - let index = client - .download_dynamic_index(&manifest, &archive_name) - .await?; - let most_used = index.find_most_used_chunks(8); - let chunk_reader = RemoteChunkReader::new( + let (archive_name, payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; + let (reader, archive_size) = get_remote_pxar_reader( + &archive_name, client.clone(), - crypt_config, - file_info.chunk_crypt_mode(), - most_used, - ); - let reader = BufferedDynamicReader::new(index, chunk_reader); + &manifest, + crypt_config.clone(), + ) + .await?; - let archive_size = reader.archive_size(); - let reader = LocalDynamicReadAt::new(reader); + let reader = if let Some(payload_archive_name) = payload_archive_name { + let (payload_reader, payload_size) = + get_remote_pxar_reader(&payload_archive_name, client, &manifest, crypt_config) + .await?; + pxar::PxarVariant::Split(reader, (payload_reader, payload_size)) + } else { + pxar::PxarVariant::Unified(reader) + }; let decoder = Accessor::new(reader, archive_size).await?; - extract_to_target(decoder, &path, target, format, zstd).await?; + + extract_to_target(decoder, &path, target, format, zstd) + .await + .map_err(|err| format_err!("error extracting archive - {err:#}"))?; } ExtractPath::VM(file, path) => { let details = SnapRestoreDetails { @@ -483,7 +543,7 @@ async fn extract( false, ) .await?; - let decoder = Decoder::from_tokio(reader).await?; + let decoder = Decoder::from_tokio(pxar::PxarVariant::Unified(reader)).await?; extract_sub_dir_seq(&target, decoder).await?; // we extracted a .pxarexclude-cli file auto-generated by the VM when encoding the @@ -586,10 +646,10 @@ where fn main() { let loglevel = match qemu_helper::debug_mode() { - true => "debug", - false => "info", + true => proxmox_log::LevelFilter::DEBUG, + false => proxmox_log::LevelFilter::INFO, }; - init_cli_logger("PBS_LOG", loglevel); + init_cli_logger("PBS_LOG", loglevel).expect("failed to initiate logger"); let list_cmd_def = CliCommand::new(&API_METHOD_LIST) .arg_param(&["snapshot", "path"]) diff --git a/proxmox-file-restore/src/qemu_helper.rs b/proxmox-file-restore/src/qemu_helper.rs index 531d66360..dc905de51 100644 --- a/proxmox-file-restore/src/qemu_helper.rs +++ b/proxmox-file-restore/src/qemu_helper.rs @@ -51,7 +51,7 @@ fn create_restore_log_dir() -> Result { Ok(logpath) } -fn validate_img_existance(debug: bool) -> Result<(), Error> { +fn validate_img_existence(debug: bool) -> Result<(), Error> { let kernel = PathBuf::from(pbs_buildcfg::PROXMOX_BACKUP_KERNEL_FN); let initramfs = PathBuf::from(if debug { pbs_buildcfg::PROXMOX_BACKUP_INITRAMFS_DBG_FN @@ -213,7 +213,7 @@ pub async fn start_vm( let debug = debug_mode(); - validate_img_existance(debug)?; + validate_img_existence(debug)?; let pid; let (mut pid_file, pid_path) = diff --git a/proxmox-restore-daemon/Cargo.toml b/proxmox-restore-daemon/Cargo.toml index 161b371d3..9d31978b1 100644 --- a/proxmox-restore-daemon/Cargo.toml +++ b/proxmox-restore-daemon/Cargo.toml @@ -4,6 +4,7 @@ version = "0.1.0" authors.workspace = true edition.workspace = true description = "Proxmox Restore Daemon" +rust-version.workspace = true [dependencies] anyhow.workspace = true @@ -12,12 +13,10 @@ env_logger.workspace = true futures.workspace = true http.workspace = true hyper.workspace = true -lazy_static.workspace = true libc.workspace = true log.workspace = true nix.workspace = true regex.workspace = true -serde.workspace = true serde_json.workspace = true tokio = { workspace = true, features = ["macros", "parking_lot", "sync"] } tokio-stream.workspace = true diff --git a/proxmox-restore-daemon/src/main.rs b/proxmox-restore-daemon/src/main.rs index c07fdc483..87c5d7654 100644 --- a/proxmox-restore-daemon/src/main.rs +++ b/proxmox-restore-daemon/src/main.rs @@ -6,10 +6,9 @@ use std::os::unix::{ net, }; use std::path::Path; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; use log::{error, info}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -29,12 +28,9 @@ pub const MAX_PENDING: usize = 32; /// Will be present in base initramfs pub const VM_DETECT_FILE: &str = "/restore-vm-marker"; -lazy_static! { - /// The current disks state. Use for accessing data on the attached snapshots. - pub static ref DISK_STATE: Arc> = { - Arc::new(Mutex::new(DiskState::scan().unwrap())) - }; -} +/// The current disks state. Use for accessing data on the attached snapshots. +pub static DISK_STATE: LazyLock>> = + LazyLock::new(|| Arc::new(Mutex::new(DiskState::scan().unwrap()))); fn init_disk_state() { info!("scanning all disks..."); diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs index c20552225..6c27a8861 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs @@ -23,7 +23,9 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::read_subdir; use pbs_api_types::file_restore::{FileRestoreFormat, RestoreDaemonStatus}; -use pbs_client::pxar::{create_archive, Flags, PxarCreateOptions, ENCODER_MAX_ENTRIES}; +use pbs_client::pxar::{ + create_archive, Flags, PxarCreateOptions, PxarWriters, ENCODER_MAX_ENTRIES, +}; use pbs_datastore::catalog::{ArchiveEntry, DirEntryAttribute}; use pbs_tools::json::required_string_param; @@ -116,7 +118,7 @@ fn get_dir_entry(path: &Path) -> Result { } #[api( - streaming: true, + serializing: true, input: { properties: { "path": { @@ -182,12 +184,17 @@ fn list( let mut full_path = PathBuf::new(); full_path.push(param_path_buf); full_path.push(path); - let entry = get_dir_entry(&full_vm_path); - if let Ok(entry) = entry { - res.push(ArchiveEntry::new( + match get_dir_entry(&full_vm_path) { + Ok(entry) => res.push(ArchiveEntry::new( full_path.as_os_str().as_bytes(), Some(&entry), - )); + )), + Err(err) => { + eprintln!( + "error getting entry: {:?} : {err}", + full_path.as_os_str() + ); + } } } } @@ -353,11 +360,21 @@ fn extract( patterns, skip_lost_and_found: false, skip_e2big_xattr: false, + previous_ref: None, + max_cache_size: None, }; - let pxar_writer = TokioWriter::new(writer); - create_archive(dir, pxar_writer, Flags::DEFAULT, |_| Ok(()), None, options) - .await + let pxar_writer = pxar::PxarVariant::Unified(TokioWriter::new(writer)); + create_archive( + dir, + PxarWriters::new(pxar_writer, None), + Flags::DEFAULT, + |_| Ok(()), + options, + None, + None, + ) + .await } .await; if let Err(err) = result { diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs index 3ff409287..d2aa49914 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs @@ -4,9 +4,9 @@ use std::fs::{create_dir_all, File}; use std::io::{BufRead, BufReader}; use std::path::{Component, Path, PathBuf}; use std::process::Command; +use std::sync::LazyLock; use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; use log::{info, warn}; use proxmox_schema::const_regex; @@ -21,26 +21,25 @@ const_regex! { ZPOOL_IMPORT_DISK_REGEX = r"^\t {2,4}(vd[a-z]+(?:\d+)?)\s+ONLINE$"; } -lazy_static! { - static ref FS_OPT_MAP: HashMap<&'static str, &'static str> = { - let mut m = HashMap::new(); +static FS_OPT_MAP: LazyLock> = LazyLock::new(|| { + let mut m = HashMap::new(); - // otherwise ext complains about mounting read-only - m.insert("ext2", "noload"); - m.insert("ext3", "noload"); - m.insert("ext4", "noload"); + // otherwise ext complains about mounting read-only + m.insert("ext2", "noload"); + m.insert("ext3", "noload"); + m.insert("ext4", "noload"); - m.insert("xfs", "norecovery"); + m.insert("xfs", "norecovery"); - // ufs2 is used as default since FreeBSD 5.0 released in 2003, so let's assume that - // whatever the user is trying to restore is not using anything older... - m.insert("ufs", "ufstype=ufs2"); + // ufs2 is used as default since FreeBSD 5.0 released in 2003, so let's assume that + // whatever the user is trying to restore is not using anything older... + m.insert("ufs", "ufstype=ufs2"); - m.insert("ntfs", "utf8"); + m.insert("ntfs", "utf8"); + m.insert("ntfs3", "iocharset=utf8"); - m - }; -} + m +}); pub enum ResolveResult { Path(PathBuf), @@ -99,7 +98,7 @@ impl Bucket { let ty = ty.as_ref(); haystack.iter_mut().find(|b| match b { Bucket::Partition(data) => { - if let Some(comp) = comp.get(0) { + if let Some(comp) = comp.first() { ty == "part" && comp.as_ref().parse::().unwrap() == data.number } else { false @@ -107,14 +106,14 @@ impl Bucket { } Bucket::RawFs(_) => ty == "raw", Bucket::ZPool(data) => { - if let Some(ref comp) = comp.get(0) { + if let Some(ref comp) = comp.first() { ty == "zpool" && comp.as_ref() == data.name } else { false } } Bucket::LVM(data) => { - if let (Some(ref vg), Some(ref lv)) = (comp.get(0), comp.get(1)) { + if let (Some(ref vg), Some(ref lv)) = (comp.first(), comp.get(1)) { ty == "lvm" && vg.as_ref() == data.vg_name && lv.as_ref() == data.lv_name } else { false @@ -633,12 +632,21 @@ impl DiskState { _ => bail!("no or invalid image in path"), }; - let buckets = match self.disk_map.get_mut( - req_fidx - .strip_suffix(".img.fidx") - .unwrap_or_else(|| req_fidx.as_ref()), - ) { + let serial = req_fidx + .strip_suffix(".img.fidx") + .unwrap_or_else(|| req_fidx.as_ref()); + let buckets = match self.disk_map.get_mut(serial) { Some(x) => x, + None if serial.len() > 20 => { + let (truncated_serial, _) = serial.split_at(20); + eprintln!( + "given image '{req_fidx}' not found with '{serial}', trying with '{truncated_serial}'." + ); + match self.disk_map.get_mut(truncated_serial) { + Some(x) => x, + None => bail!("given image '{req_fidx}' not found with '{truncated_serial}'"), + } + } None => bail!("given image '{req_fidx}' not found"), }; diff --git a/pxar-bin/Cargo.toml b/pxar-bin/Cargo.toml index d91c03d3e..d0d7ab24d 100644 --- a/pxar-bin/Cargo.toml +++ b/pxar-bin/Cargo.toml @@ -11,7 +11,6 @@ path = "src/main.rs" [dependencies] anyhow.workspace = true futures.workspace = true -log.workspace = true nix.workspace = true serde_json.workspace = true tokio = { workspace = true, features = [ "rt", "rt-multi-thread" ] } @@ -20,10 +19,11 @@ pathpatterns.workspace = true pxar.workspace = true proxmox-async.workspace = true +proxmox-human-byte.workspace = true +proxmox-log.workspace = true proxmox-router = { workspace = true, features = ["cli", "server"] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-sys.workspace = true pbs-client.workspace = true pbs-pxar-fuse.workspace = true -pbs-tools.workspace = true diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs index 2bbe90e34..9d822eae2 100644 --- a/pxar-bin/src/main.rs +++ b/pxar-bin/src/main.rs @@ -13,9 +13,13 @@ use tokio::signal::unix::{signal, SignalKind}; use pathpatterns::{MatchEntry, MatchType, PatternFlag}; use pbs_client::pxar::{ - format_single_line_entry, Flags, OverwriteFlags, PxarExtractOptions, ENCODER_MAX_ENTRIES, + format_single_line_entry, Flags, OverwriteFlags, PxarExtractOptions, PxarWriters, + ENCODER_MAX_ENTRIES, }; +use pxar::EntryKind; +use proxmox_human_byte::HumanByte; +use proxmox_log::{debug, enabled, error, init_cli_logger, Level}; use proxmox_router::cli::*; use proxmox_schema::api; @@ -24,13 +28,19 @@ fn extract_archive_from_reader( target: &str, feature_flags: Flags, options: PxarExtractOptions, + payload_reader: Option<&mut R>, ) -> Result<(), Error> { + let reader = if let Some(payload_reader) = payload_reader { + pxar::PxarVariant::Split(reader, payload_reader) + } else { + pxar::PxarVariant::Unified(reader) + }; pbs_client::pxar::extract_archive( pxar::decoder::Decoder::from_std(reader)?, Path::new(target), feature_flags, |path| { - log::debug!("{:?}", path); + debug!("{path:?}"); }, options, ) @@ -119,6 +129,14 @@ fn extract_archive_from_reader( optional: true, default: false, }, + "payload-input": { + description: "'ppxar' payload input data file to restore split archive.", + optional: true, + }, + "prelude-target": { + description: "Path to restore pxar archive prelude to.", + optional: true, + }, }, }, )] @@ -141,6 +159,8 @@ fn extract_archive( no_fifos: bool, no_sockets: bool, strict: bool, + payload_input: Option, + prelude_target: Option, ) -> Result<(), Error> { let mut feature_flags = Flags::DEFAULT; if no_xattrs { @@ -202,7 +222,7 @@ fn extract_archive( // otherwise we want to log them but not act on them Some(Box::new(move |err| { was_ok.store(false, Ordering::Release); - log::error!("error: {}", err); + error!("error: {err:?}"); Ok(()) }) as Box Result<(), Error> + Send>) @@ -214,17 +234,32 @@ fn extract_archive( overwrite_flags, extract_match_default, on_error, + prelude_path: prelude_target.map(PathBuf::from), }; if archive == "-" { let stdin = std::io::stdin(); let mut reader = stdin.lock(); - extract_archive_from_reader(&mut reader, target, feature_flags, options)?; + extract_archive_from_reader(&mut reader, target, feature_flags, options, None) + .map_err(|err| format_err!("error extracting archive - {err:#}"))?; } else { - log::debug!("PXAR extract: {}", archive); + debug!("PXAR extract: {archive}"); let file = std::fs::File::open(archive)?; let mut reader = std::io::BufReader::new(file); - extract_archive_from_reader(&mut reader, target, feature_flags, options)?; + let mut payload_reader = if let Some(payload_input) = payload_input { + let file = std::fs::File::open(payload_input)?; + Some(std::io::BufReader::new(file)) + } else { + None + }; + extract_archive_from_reader( + &mut reader, + target, + feature_flags, + options, + payload_reader.as_mut(), + ) + .map_err(|err| format_err!("error extracting archive - {err:#}"))? } if !was_ok.load(Ordering::Acquire) { @@ -294,6 +329,10 @@ fn extract_archive( minimum: 0, maximum: isize::MAX, }, + "payload-output": { + description: "'ppxar' payload output data file to create split archive.", + optional: true, + }, }, }, )] @@ -311,6 +350,7 @@ async fn create_archive( no_sockets: bool, exclude: Option>, entries_max: isize, + payload_output: Option, ) -> Result<(), Error> { let patterns = { let input = exclude.unwrap_or_default(); @@ -336,6 +376,8 @@ async fn create_archive( patterns, skip_lost_and_found: false, skip_e2big_xattr: false, + previous_ref: None, + max_cache_size: None, }; let source = PathBuf::from(source); @@ -352,6 +394,16 @@ async fn create_archive( .mode(0o640) .open(archive)?; + let payload_file = payload_output + .map(|payload_output| { + OpenOptions::new() + .create_new(true) + .write(true) + .mode(0o640) + .open(payload_output) + }) + .transpose()?; + let writer = std::io::BufWriter::with_capacity(1024 * 1024, file); let mut feature_flags = Flags::DEFAULT; if no_xattrs { @@ -373,17 +425,26 @@ async fn create_archive( feature_flags.remove(Flags::WITH_SOCKETS); } - let writer = pxar::encoder::sync::StandardWriter::new(writer); + let writer = if let Some(payload_file) = payload_file { + let payload_writer = std::io::BufWriter::with_capacity(1024 * 1024, payload_file); + pxar::PxarVariant::Split( + pxar::encoder::sync::StandardWriter::new(writer), + pxar::encoder::sync::StandardWriter::new(payload_writer), + ) + } else { + pxar::PxarVariant::Unified(pxar::encoder::sync::StandardWriter::new(writer)) + }; pbs_client::pxar::create_archive( dir, - writer, + PxarWriters::new(writer, None), feature_flags, move |path| { - log::debug!("{:?}", path); + debug!("{path:?}"); Ok(()) }, - None, options, + None, + None, ) .await?; @@ -400,25 +461,41 @@ async fn create_archive( optional: true, default: false, }, + "payload-input": { + description: "'ppxar' payload input data file to restore split archive.", + optional: true, + }, }, }, )] /// Mount the archive to the provided mountpoint via FUSE. -async fn mount_archive(archive: String, mountpoint: String, verbose: bool) -> Result<(), Error> { +async fn mount_archive( + archive: String, + mountpoint: String, + verbose: bool, + payload_input: Option, +) -> Result<(), Error> { let archive = Path::new(&archive); let mountpoint = Path::new(&mountpoint); let options = OsStr::new("ro,default_permissions"); + let payload_input = payload_input.map(PathBuf::from); - let session = pbs_pxar_fuse::Session::mount_path(archive, options, verbose, mountpoint) - .await - .map_err(|err| format_err!("pxar mount failed: {}", err))?; + let session = pbs_pxar_fuse::Session::mount_path( + archive, + options, + verbose, + mountpoint, + payload_input.as_deref(), + ) + .await + .map_err(|err| format_err!("pxar mount failed: {}", err))?; let mut interrupt = signal(SignalKind::interrupt())?; select! { res = session.fuse() => res?, _ = interrupt.recv().fuse() => { - log::debug!("interrupted"); + debug!("interrupted"); } } @@ -431,25 +508,68 @@ async fn mount_archive(archive: String, mountpoint: String, verbose: bool) -> Re archive: { description: "Archive name.", }, + "payload-input": { + description: "'ppxar' payload input data file for split archive.", + optional: true, + }, }, }, )] /// List the contents of an archive. -fn dump_archive(archive: String) -> Result<(), Error> { - for entry in pxar::decoder::Decoder::open(archive)? { +fn dump_archive(archive: String, payload_input: Option) -> Result<(), Error> { + if archive.ends_with(".mpxar") && payload_input.is_none() { + bail!("Payload input required for split pxar archives"); + } + + let input = if let Some(payload_input) = payload_input { + pxar::PxarVariant::Split(archive, payload_input) + } else { + pxar::PxarVariant::Unified(archive) + }; + + let mut last = None; + for entry in pxar::decoder::Decoder::open(input)? { let entry = entry?; - if log::log_enabled!(log::Level::Debug) { - log::debug!("{}", format_single_line_entry(&entry)); + if enabled!(Level::DEBUG) { + match entry.kind() { + EntryKind::Version(version) => { + debug!("pxar format version '{version:?}'"); + continue; + } + EntryKind::Prelude(prelude) => { + debug!("prelude of size {}", HumanByte::from(prelude.data.len())); + continue; + } + EntryKind::File { + payload_offset: Some(offset), + size, + .. + } => { + if let Some(last) = last { + let skipped = offset - last; + if skipped > 0 { + debug!("Encountered padding of {skipped} bytes"); + } + } + last = Some(offset + size + std::mem::size_of::() as u64); + } + _ => (), + } + + println!("{}", format_single_line_entry(&entry)); } else { - log::info!("{:?}", entry.path()); + match entry.kind() { + EntryKind::Version(_) | EntryKind::Prelude(_) => continue, + _ => println!("{:?}", entry.path()), + } } } Ok(()) } fn main() { - init_cli_logger("PXAR_LOG", "info"); + init_cli_logger("PXAR_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger"); let cmd_def = CliCommandMap::new() .insert( @@ -457,7 +577,8 @@ fn main() { CliCommand::new(&API_METHOD_CREATE_ARCHIVE) .arg_param(&["archive", "source"]) .completion_cb("archive", complete_file_name) - .completion_cb("source", complete_file_name), + .completion_cb("source", complete_file_name) + .completion_cb("payload-output", complete_file_name), ) .insert( "extract", @@ -465,20 +586,24 @@ fn main() { .arg_param(&["archive", "target"]) .completion_cb("archive", complete_file_name) .completion_cb("target", complete_file_name) - .completion_cb("files-from", complete_file_name), + .completion_cb("files-from", complete_file_name) + .completion_cb("payload-input", complete_file_name) + .completion_cb("prelude-target", complete_file_name), ) .insert( "mount", CliCommand::new(&API_METHOD_MOUNT_ARCHIVE) .arg_param(&["archive", "mountpoint"]) .completion_cb("archive", complete_file_name) - .completion_cb("mountpoint", complete_file_name), + .completion_cb("mountpoint", complete_file_name) + .completion_cb("payload-input", complete_file_name), ) .insert( "list", CliCommand::new(&API_METHOD_DUMP_ARCHIVE) .arg_param(&["archive"]) - .completion_cb("archive", complete_file_name), + .completion_cb("archive", complete_file_name) + .completion_cb("payload-input", complete_file_name), ); let rpcenv = CliEnvironment::new(); diff --git a/pxar-bin/tests/pxar.rs b/pxar-bin/tests/pxar.rs index 37ea514cd..2da02990b 100644 --- a/pxar-bin/tests/pxar.rs +++ b/pxar-bin/tests/pxar.rs @@ -7,15 +7,17 @@ fn pxar_create_and_extract() { let src_dir = "../tests/catar_data/test_xattrs_src/"; let dest_dir = "../tests/catar_data/test_xattrs_dest/"; + let target_subdir = std::env::var("DEB_HOST_RUST_TYPE").unwrap_or_default(); + let exec_path = if cfg!(debug_assertions) { - "../target/debug/pxar" + format!("../target/{target_subdir}/debug/pxar") } else { - "../target/release/pxar" + format!("../target/{target_subdir}/release/pxar") }; println!("run '{} create archive.pxar {}'", exec_path, src_dir); - Command::new(exec_path) + Command::new(&exec_path) .arg("create") .arg("./tests/archive.pxar") .arg(src_dir) @@ -24,7 +26,7 @@ fn pxar_create_and_extract() { println!("run '{} extract archive.pxar {}'", exec_path, dest_dir); - Command::new(exec_path) + Command::new(&exec_path) .arg("extract") .arg("./tests/archive.pxar") .arg("--target") @@ -78,3 +80,102 @@ fn pxar_create_and_extract() { panic!("pxar create and extract did not yield the same contents"); } } + +#[test] +fn pxar_split_archive_test() { + let src_dir = "../tests/catar_data/test_files_and_subdirs/"; + let dest_dir = "../tests/catar_data/test_files_and_subdirs_dest/"; + + let target_subdir = std::env::var("DEB_HOST_RUST_TYPE").unwrap_or_default(); + + let exec_path = if cfg!(debug_assertions) { + format!("../target/{target_subdir}/debug/pxar") + } else { + format!("../target/{target_subdir}/release/pxar") + }; + + println!("run '{exec_path} create archive.mpxar {src_dir} --payload-output archive.ppxar'"); + + Command::new(&exec_path) + .arg("create") + .arg("./tests/archive.mpxar") + .arg(src_dir) + .arg("--payload-output=./tests/archive.ppxar") + .status() + .unwrap_or_else(|err| panic!("Failed to invoke '{exec_path}': {err}")); + + let output = Command::new(&exec_path) + .arg("list") + .arg("./tests/archive.mpxar") + .arg("--payload-input=./tests/archive.ppxar") + .output() + .expect("failed to run pxar list"); + assert!(output.status.success()); + + let expected = "\"/\" +\"/a-test-symlink\" +\"/file1\" +\"/file2\" +\"/subdir1\" +\"/subdir1/subfile1\" +\"/subdir1/subfile2\" +"; + + assert_eq!(expected.as_bytes(), output.stdout); + + println!("run '{exec_path} extract archive.mpxar {dest_dir} --payload-input archive.ppxar'"); + + Command::new(&exec_path) + .arg("extract") + .arg("./tests/archive.mpxar") + .arg("--payload-input=./tests/archive.ppxar") + .arg("--target") + .arg(dest_dir) + .status() + .unwrap_or_else(|err| panic!("Failed to invoke '{exec_path}': {err}")); + + println!("run 'rsync --dry-run --itemize-changes --archive {src_dir} {dest_dir}' to verify'"); + + /* Use rsync with --dry-run and --itemize-changes to compare + src_dir and dest_dir */ + let stdout = Command::new("rsync") + .arg("--dry-run") + .arg("--itemize-changes") + .arg("--archive") + .arg(src_dir) + .arg(dest_dir) + .stdout(Stdio::piped()) + .spawn() + .unwrap() + .stdout + .unwrap(); + + let reader = BufReader::new(stdout); + let line_iter = reader.lines().map(|l| l.unwrap()); + let mut linecount = 0; + for curr in line_iter { + println!("{curr}"); + linecount += 1; + } + println!("Rsync listed {linecount} differences to address"); + + // Cleanup archive + Command::new("rm") + .arg("./tests/archive.mpxar") + .arg("./tests/archive.ppxar") + .status() + .unwrap_or_else(|err| panic!("Failed to invoke 'rm': {err}")); + + // Cleanup destination dir + Command::new("rm") + .arg("-r") + .arg(dest_dir) + .status() + .unwrap_or_else(|err| panic!("Failed to invoke 'rm': {err}")); + + // If source and destination folder contain the same content, + // the output of the rsync invocation should yield no lines. + if linecount != 0 { + panic!("pxar create and extract did not yield the same contents"); + } +} diff --git a/src/acme/client.rs b/src/acme/client.rs index 96cee0fe0..d28ab3eb1 100644 --- a/src/acme/client.rs +++ b/src/acme/client.rs @@ -11,8 +11,8 @@ use nix::sys::stat::Mode; use serde::{Deserialize, Serialize}; use proxmox_acme::account::AccountCreator; -use proxmox_acme::account::AccountData as AcmeAccountData; use proxmox_acme::order::{Order, OrderData}; +use proxmox_acme::types::AccountData as AcmeAccountData; use proxmox_acme::Request as AcmeRequest; use proxmox_acme::{Account, Authorization, Challenge, Directory, Error, ErrorResponse}; use proxmox_http::client::Client; diff --git a/src/api2/access/acl.rs b/src/api2/access/acl.rs index 1ec4bd3d4..6fde99fd3 100644 --- a/src/api2/access/acl.rs +++ b/src/api2/access/acl.rs @@ -233,7 +233,7 @@ pub fn update_acl( if !delete { // Note: we allow to delete non-existent users let user_cfg = pbs_config::user::cached_config()?; - if user_cfg.sections.get(&auth_id.to_string()).is_none() { + if !user_cfg.sections.contains_key(&auth_id.to_string()) { bail!(format!( "no such {}.", if auth_id.is_token() { diff --git a/src/api2/access/domain.rs b/src/api2/access/domain.rs index 31aa62bc2..8f8eebdaf 100644 --- a/src/api2/access/domain.rs +++ b/src/api2/access/domain.rs @@ -1,13 +1,14 @@ //! List Authentication domains/realms -use anyhow::{format_err, Error}; +use anyhow::{bail, format_err, Error}; use serde_json::{json, Value}; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap}; use proxmox_schema::api; use pbs_api_types::{ - Authid, BasicRealmInfo, Realm, PRIV_PERMISSIONS_MODIFY, REMOVE_VANISHED_SCHEMA, UPID_SCHEMA, + Authid, BasicRealmInfo, Realm, RealmRef, RealmType, PRIV_PERMISSIONS_MODIFY, + REMOVE_VANISHED_SCHEMA, UPID_SCHEMA, }; use crate::server::jobstate::Job; @@ -102,6 +103,7 @@ pub fn sync_realm( let upid_str = crate::server::do_realm_sync_job( job, realm.clone(), + realm_type_from_name(&realm)?, &auth_id, None, to_stdout, @@ -120,6 +122,18 @@ pub fn sync_realm( Ok(json!(upid_str)) } +fn realm_type_from_name(realm: &RealmRef) -> Result { + let config = pbs_config::domains::config()?.0; + + for (name, (section_type, _)) in config.sections.iter() { + if name == realm.as_str() { + return Ok(section_type.parse()?); + } + } + + bail!("unable to find realm {realm}") +} + const SYNC_ROUTER: Router = Router::new().post(&API_METHOD_SYNC_REALM); const SYNC_SUBDIRS: SubdirMap = &[("sync", &SYNC_ROUTER)]; diff --git a/src/api2/access/mod.rs b/src/api2/access/mod.rs index 15509fd9d..a60f0f86e 100644 --- a/src/api2/access/mod.rs +++ b/src/api2/access/mod.rs @@ -32,7 +32,7 @@ pub mod user; /// This means that user admins need to type in their own password while editing a user, and /// regular users, which can only change their own settings (checked at the API level), can change /// their own settings using their own password. -pub(self) async fn user_update_auth>( +async fn user_update_auth>( rpcenv: &mut dyn RpcEnvironment, userid: &Userid, password: Option, diff --git a/src/api2/access/user.rs b/src/api2/access/user.rs index e2b74237b..1b4adaf8f 100644 --- a/src/api2/access/user.rs +++ b/src/api2/access/user.rs @@ -147,11 +147,7 @@ pub fn create_user( let (mut section_config, _digest) = pbs_config::user::config()?; - if section_config - .sections - .get(config.userid.as_str()) - .is_some() - { + if section_config.sections.contains_key(config.userid.as_str()) { bail!("user '{}' already exists.", config.userid); } @@ -375,11 +371,8 @@ pub fn delete_user(userid: Userid, digest: Option) -> Result<(), Error> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; } - match config.sections.get(userid.as_str()) { - Some(_) => { - config.sections.remove(userid.as_str()); - } - None => bail!("user '{}' does not exist.", userid), + if config.sections.remove(userid.as_str()).is_none() { + bail!("user '{}' does not exist.", userid); } pbs_config::user::save_config(&config)?; @@ -503,7 +496,7 @@ pub fn generate_token( let tokenid = Authid::from((userid.clone(), Some(token_name.clone()))); let tokenid_string = tokenid.to_string(); - if config.sections.get(&tokenid_string).is_some() { + if config.sections.contains_key(&tokenid_string) { bail!( "token '{}' for user '{}' already exists.", token_name.as_str(), @@ -654,15 +647,12 @@ pub fn delete_token( let tokenid = Authid::from((userid.clone(), Some(token_name.clone()))); let tokenid_string = tokenid.to_string(); - match config.sections.get(&tokenid_string) { - Some(_) => { - config.sections.remove(&tokenid_string); - } - None => bail!( + if config.sections.remove(&tokenid_string).is_none() { + bail!( "token '{}' of user '{}' does not exist.", token_name.as_str(), userid - ), + ); } token_shadow::delete_secret(&tokenid)?; diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index f7164b877..7660dd7f6 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -13,6 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; +use tracing::{info, warn}; use proxmox_async::blocking::WrappedReaderStream; use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; @@ -21,25 +22,27 @@ use proxmox_router::{ http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, }; +use proxmox_rrd_api_types::{RrdMode, RrdTimeframe}; use proxmox_schema::*; use proxmox_sortable_macro::sortable; use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; -use proxmox_sys::{task_log, task_warn}; +use proxmox_time::CalendarEvent; use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, - Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem, - KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, - SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, - MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, - UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, + Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, + PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, + PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -67,7 +70,7 @@ use crate::backup::{ ListAccessibleBackupGroups, NS_PRIVS_OK, }; -use crate::server::jobstate::Job; +use crate::server::jobstate::{compute_schedule_status, Job, JobState}; const GROUP_NOTES_FILE_NAME: &str = "notes"; @@ -268,7 +271,7 @@ pub fn list_groups( }, access: { permission: &Permission::Anybody, - description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\ + description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \ or DATASTORE_PRUNE and being the owner of the group", }, )] @@ -375,7 +378,7 @@ pub async fn list_snapshot_files( }, access: { permission: &Permission::Anybody, - description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\ + description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \ or DATASTORE_PRUNE and being the owner of the group", }, )] @@ -412,7 +415,7 @@ pub async fn delete_snapshot( } #[api( - streaming: true, + serializing: true, input: { properties: { store: { schema: DATASTORE_SCHEMA }, @@ -503,7 +506,7 @@ unsafe fn list_snapshots_blocking( group: group.into(), time: info.backup_dir.backup_time(), }; - let protected = info.backup_dir.is_protected(); + let protected = info.protected; match get_all_snapshot_files(&info) { Ok((manifest, files)) => { @@ -676,8 +679,6 @@ pub async fn status( let user_info = CachedUserInfo::new()?; let store_privs = user_info.lookup_privs(&auth_id, &["datastore", &store]); - let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read)); - let store_stats = if store_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP) != 0 { true } else if store_privs & PRIV_DATASTORE_READ != 0 { @@ -689,7 +690,8 @@ pub async fn status( _ => false, } }; - let datastore = datastore?; // only unwrap no to avoid leaking existence info + + let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let (counts, gc_status) = if verbose { let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { @@ -908,9 +910,9 @@ pub fn verify( )? }; if !failed_dirs.is_empty() { - task_log!(worker, "Failed to verify the following snapshots/groups:"); + info!("Failed to verify the following snapshots/groups:"); for dir in failed_dirs { - task_log!(worker, "\t{}", dir); + info!("\t{dir}"); } bail!("verification failed - please check the log for details"); } @@ -945,12 +947,18 @@ pub fn verify( type: BackupNamespace, optional: true, }, + "use-task": { + type: bool, + default: false, + optional: true, + description: "Spins up an asynchronous task that does the work.", + }, }, }, returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE, access: { permission: &Permission::Anybody, - description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any\ + description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \ or DATASTORE_PRUNE and being the owner of the group", }, )] @@ -961,7 +969,7 @@ pub fn prune( keep_options: KeepOptions, store: String, ns: Option, - _param: Value, + param: Value, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -979,7 +987,20 @@ pub fn prune( let worker_id = format!("{}:{}:{}", store, ns, group); let group = datastore.backup_group(ns.clone(), group); - let mut prune_result = Vec::new(); + #[derive(Debug, serde::Serialize)] + struct PruneResult { + #[serde(rename = "backup-type")] + backup_type: BackupType, + #[serde(rename = "backup-id")] + backup_id: String, + #[serde(rename = "backup-time")] + backup_time: i64, + keep: bool, + protected: bool, + #[serde(skip_serializing_if = "Option::is_none")] + ns: Option, + } + let mut prune_result: Vec = Vec::new(); let list = group.list_backups()?; @@ -992,78 +1013,95 @@ pub fn prune( if dry_run { for (info, mark) in prune_info { let keep = keep_all || mark.keep(); + let backup_dir = &info.backup_dir; - let mut result = json!({ - "backup-type": info.backup_dir.backup_type(), - "backup-id": info.backup_dir.backup_id(), - "backup-time": info.backup_dir.backup_time(), - "keep": keep, - "protected": mark.protected(), - }); - let prune_ns = info.backup_dir.backup_ns(); + let mut result = PruneResult { + backup_type: backup_dir.backup_type(), + backup_id: backup_dir.backup_id().to_owned(), + backup_time: backup_dir.backup_time(), + keep, + protected: mark.protected(), + ns: None, + }; + let prune_ns = backup_dir.backup_ns(); if !prune_ns.is_root() { - result["ns"] = serde_json::to_value(prune_ns)?; + result.ns = Some(prune_ns.to_owned()); } prune_result.push(result); } return Ok(json!(prune_result)); } - // We use a WorkerTask just to have a task log, but run synchrounously - let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?; + let prune_group = move |_worker: Arc| { + if keep_all { + info!("No prune selection - keeping all files."); + } else { + let mut opts = Vec::new(); + if !ns.is_root() { + opts.push(format!("--ns {ns}")); + } + crate::server::cli_keep_options(&mut opts, &keep_options); - if keep_all { - task_log!(worker, "No prune selection - keeping all files."); - } else { - let mut opts = Vec::new(); - if !ns.is_root() { - opts.push(format!("--ns {ns}")); + info!("retention options: {}", opts.join(" ")); + info!( + "Starting prune on {} group \"{}\"", + print_store_and_ns(&store, &ns), + group.group(), + ); } - crate::server::cli_keep_options(&mut opts, &keep_options); - task_log!(worker, "retention options: {}", opts.join(" ")); - task_log!( - worker, - "Starting prune on {} group \"{}\"", - print_store_and_ns(&store, &ns), - group.group(), - ); - } + for (info, mark) in prune_info { + let keep = keep_all || mark.keep(); + let backup_dir = &info.backup_dir; - for (info, mark) in prune_info { - let keep = keep_all || mark.keep(); + let backup_time = backup_dir.backup_time(); + let timestamp = backup_dir.backup_time_string(); + let group: &pbs_api_types::BackupGroup = backup_dir.as_ref(); - let backup_time = info.backup_dir.backup_time(); - let timestamp = info.backup_dir.backup_time_string(); - let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref(); + let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id); - let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,); + info!("{msg}"); - task_log!(worker, "{}", msg); + prune_result.push(PruneResult { + backup_type: group.ty, + backup_id: group.id.clone(), + backup_time, + keep, + protected: mark.protected(), + ns: None, + }); - prune_result.push(json!({ - "backup-type": group.ty, - "backup-id": group.id, - "backup-time": backup_time, - "keep": keep, - "protected": mark.protected(), - })); - - if !(dry_run || keep) { - if let Err(err) = info.backup_dir.destroy(false) { - task_warn!( - worker, - "failed to remove dir {:?}: {}", - info.backup_dir.relative_path(), - err, - ); + if !keep { + if let Err(err) = backup_dir.destroy(false) { + warn!( + "failed to remove dir {:?}: {}", + backup_dir.relative_path(), + err, + ); + } } } + prune_result + }; + + if param["use-task"].as_bool().unwrap_or(false) { + let upid = WorkerTask::spawn( + "prune", + Some(worker_id), + auth_id.to_string(), + true, + move |worker| async move { + let _ = prune_group(worker.clone()); + Ok(()) + }, + )?; + Ok(json!(upid)) + } else { + let (worker, _) = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?; + let result = prune_group(worker.clone()); + worker.log_result(&Ok(())); + Ok(json!(result)) } - - worker.log_result(&Ok(())); - - Ok(json!(prune_result)) } #[api( @@ -1122,9 +1160,7 @@ pub fn prune_datastore( Some(worker_id), auth_id.to_string(), to_stdout, - move |worker| { - crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run) - }, + move |_worker| crate::server::prune_datastore(auth_id, prune_options, datastore, dry_run), )?; Ok(upid_str) @@ -1181,7 +1217,7 @@ pub fn start_garbage_collection( }, }, returns: { - type: GarbageCollectionStatus, + type: GarbageCollectionJobStatus, }, access: { permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), @@ -1192,12 +1228,62 @@ pub fn garbage_collection_status( store: String, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, -) -> Result { +) -> Result { + let (config, _) = pbs_config::datastore::config()?; + let store_config: DataStoreConfig = config.lookup("datastore", &store)?; + + let mut info = GarbageCollectionJobStatus { + store: store.clone(), + schedule: store_config.gc_schedule, + ..Default::default() + }; + let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; + let status_in_memory = datastore.last_gc_status(); + let state_file = JobState::load("garbage_collection", &store) + .map_err(|err| log::error!("could not open GC statefile for {store}: {err}")) + .ok(); - let status = datastore.last_gc_status(); + let mut last = proxmox_time::epoch_i64(); - Ok(status) + if let Some(ref upid) = status_in_memory.upid { + let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default(); + if let Some(state) = state_file { + if let Ok(cs) = compute_schedule_status(&state, Some(upid)) { + computed_schedule = cs; + } + } + + if let Some(endtime) = computed_schedule.last_run_endtime { + last = endtime; + if let Ok(parsed_upid) = upid.parse::() { + info.duration = Some(endtime - parsed_upid.starttime); + } + } + + info.next_run = computed_schedule.next_run; + info.last_run_endtime = computed_schedule.last_run_endtime; + info.last_run_state = computed_schedule.last_run_state; + } + + info.next_run = info + .schedule + .as_ref() + .and_then(|s| { + s.parse::() + .map_err(|err| log::error!("{err}")) + .ok() + }) + .and_then(|e| { + e.compute_next_event(last) + .map_err(|err| log::error!("{err}")) + .ok() + }) + .and_then(|ne| ne); + + info.status = status_in_memory; + + Ok(info) } #[api( @@ -1547,6 +1633,14 @@ pub fn upload_backup_log( .boxed() } +fn decode_path(path: &str) -> Result, Error> { + if path != "root" && path != "/" { + base64::decode(path).map_err(|err| format_err!("base64 decoding of path failed - {err}")) + } else { + Ok(vec![b'/']) + } +} + #[api( input: { properties: { @@ -1562,7 +1656,11 @@ pub fn upload_backup_log( "filepath": { description: "Base64 encoded path.", type: String, - } + }, + "archive-name": { + schema: BACKUP_ARCHIVE_NAME_SCHEMA, + optional: true, + }, }, }, access: { @@ -1577,58 +1675,75 @@ pub async fn catalog( ns: Option, backup_dir: pbs_api_types::BackupDir, filepath: String, + archive_name: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { + let file_name = archive_name + .clone() + .unwrap_or_else(|| CATALOG_NAME.to_string()); + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; - tokio::task::spawn_blocking(move || { - let ns = ns.unwrap_or_default(); + let ns = ns.unwrap_or_default(); - let datastore = check_privs_and_load_store( - &store, - &ns, - &auth_id, - PRIV_DATASTORE_READ, - PRIV_DATASTORE_BACKUP, - Some(Operation::Read), - &backup_dir.group, - )?; + let datastore = check_privs_and_load_store( + &store, + &ns, + &auth_id, + PRIV_DATASTORE_READ, + PRIV_DATASTORE_BACKUP, + Some(Operation::Read), + &backup_dir.group, + )?; - let backup_dir = datastore.backup_dir(ns, backup_dir)?; + let backup_dir = datastore.backup_dir(ns, backup_dir)?; - let file_name = CATALOG_NAME; - - let (manifest, files) = read_backup_index(&backup_dir)?; - for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { - bail!("cannot decode '{}' - is encrypted", file_name); - } + let (manifest, files) = read_backup_index(&backup_dir)?; + for file in files { + if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + bail!("cannot decode '{file_name}' - is encrypted"); } + } - let mut path = datastore.base_path(); - path.push(backup_dir.relative_path()); - path.push(file_name); + if archive_name.is_none() { + tokio::task::spawn_blocking(move || { + let mut path = datastore.base_path(); + path.push(backup_dir.relative_path()); + path.push(&file_name); - let index = DynamicIndexReader::open(&path) - .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; + let index = DynamicIndexReader::open(&path) + .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?; - let (csum, size) = index.compute_csum(); - manifest.verify_file(file_name, &csum, size)?; + let (csum, size) = index.compute_csum(); + manifest.verify_file(&file_name, &csum, size)?; - let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); - let reader = BufferedDynamicReader::new(index, chunk_reader); + let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); + let reader = BufferedDynamicReader::new(index, chunk_reader); - let mut catalog_reader = CatalogReader::new(reader); + let mut catalog_reader = CatalogReader::new(reader); - let path = if filepath != "root" && filepath != "/" { - base64::decode(filepath)? - } else { - vec![b'/'] - }; + let path = decode_path(&filepath)?; + catalog_reader.list_dir_contents(&path) + }) + .await? + } else { + let (archive_name, _payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(&file_name, &manifest)?; + let (reader, archive_size) = + get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &archive_name)?; - catalog_reader.list_dir_contents(&path) - }) - .await? + // only care about the metadata, don't attach a payload reader + let reader = pxar::PxarVariant::Unified(reader); + let accessor = Accessor::new(reader, archive_size).await?; + + let file_path = decode_path(&filepath)?; + pbs_client::tools::pxar_metadata_catalog_lookup( + accessor, + OsStr::from_bytes(&file_path), + None, + ) + .await + } } #[sortable] @@ -1644,6 +1759,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), + ("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA), ]), ) ).access( @@ -1654,6 +1770,29 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( &Permission::Anybody, ); +fn get_local_pxar_reader( + datastore: Arc, + manifest: &BackupManifest, + backup_dir: &BackupDir, + pxar_name: &str, +) -> Result<(LocalDynamicReadAt, u64), Error> { + let mut path = datastore.base_path(); + path.push(backup_dir.relative_path()); + path.push(pxar_name); + + let index = DynamicIndexReader::open(&path) + .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; + + let (csum, size) = index.compute_csum(); + manifest.verify_file(pxar_name, &csum, size)?; + + let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); + let reader = BufferedDynamicReader::new(index, chunk_reader); + let archive_size = reader.archive_size(); + + Ok((LocalDynamicReadAt::new(reader), archive_size)) +} + pub fn pxar_file_download( _parts: Parts, _req_body: Body, @@ -1688,9 +1827,16 @@ pub fn pxar_file_download( components.remove(0); } - let mut split = components.splitn(2, |c| *c == b'/'); - let pxar_name = std::str::from_utf8(split.next().unwrap())?; - let file_path = split.next().unwrap_or(b"/"); + let (pxar_name, file_path) = if let Some(archive_name) = param["archive-name"].as_str() { + let archive_name = archive_name.as_bytes().to_owned(); + (archive_name, base64::decode(&filepath)?) + } else { + let mut split = components.splitn(2, |c| *c == b'/'); + let pxar_name = split.next().unwrap(); + let file_path = split.next().unwrap_or(b"/"); + (pxar_name.to_owned(), file_path.to_owned()) + }; + let pxar_name = std::str::from_utf8(&pxar_name)?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { @@ -1698,24 +1844,22 @@ pub fn pxar_file_download( } } - let mut path = datastore.base_path(); - path.push(backup_dir.relative_path()); - path.push(pxar_name); - - let index = DynamicIndexReader::open(&path) - .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; - - let (csum, size) = index.compute_csum(); - manifest.verify_file(pxar_name, &csum, size)?; - - let chunk_reader = LocalChunkReader::new(datastore, None, CryptMode::None); - let reader = BufferedDynamicReader::new(index, chunk_reader); - let archive_size = reader.archive_size(); - let reader = LocalDynamicReadAt::new(reader); + let (pxar_name, payload_archive_name) = + pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?; + let (reader, archive_size) = + get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?; + let reader = if let Some(payload_archive_name) = payload_archive_name { + let payload_input = + get_local_pxar_reader(datastore, &manifest, &backup_dir, &payload_archive_name)?; + pxar::PxarVariant::Split(reader, payload_input) + } else { + pxar::PxarVariant::Unified(reader) + }; let decoder = Accessor::new(reader, archive_size).await?; + let root = decoder.open_root().await?; - let path = OsStr::from_bytes(file_path).to_os_string(); + let path = OsStr::from_bytes(&file_path).to_os_string(); let file = root .lookup(&path) .await? @@ -1781,10 +1925,10 @@ pub fn pxar_file_download( schema: DATASTORE_SCHEMA, }, timeframe: { - type: RRDTimeFrame, + type: RrdTimeframe, }, cf: { - type: RRDMode, + type: RrdMode, }, }, }, @@ -1796,8 +1940,8 @@ pub fn pxar_file_download( /// Read datastore stats pub fn get_rrd_stats( store: String, - timeframe: RRDTimeFrame, - cf: RRDMode, + timeframe: RrdTimeframe, + cf: RrdMode, _param: Value, ) -> Result { let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; diff --git a/src/api2/admin/gc.rs b/src/api2/admin/gc.rs new file mode 100644 index 000000000..bca06897b --- /dev/null +++ b/src/api2/admin/gc.rs @@ -0,0 +1,55 @@ +use anyhow::Error; +use pbs_api_types::GarbageCollectionJobStatus; + +use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::DATASTORE_SCHEMA; + +use serde_json::Value; + +use crate::api2::admin::datastore::{garbage_collection_status, get_datastore_list}; + +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + optional: true, + }, + }, + }, + returns: { + description: "List configured gc jobs and their status", + type: Array, + items: { type: GarbageCollectionJobStatus }, + }, + access: { + permission: &Permission::Anybody, + description: "Requires Datastore.Audit or Datastore.Modify on datastore.", + }, +)] +/// List all GC jobs (max one per datastore) +pub fn list_all_gc_jobs( + store: Option, + _param: Value, + _info: &ApiMethod, + rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let gc_info = match store { + Some(store) => garbage_collection_status(store, _info, rpcenv).map(|info| vec![info])?, + None => get_datastore_list(Value::Null, _info, rpcenv)? + .into_iter() + .map(|store_list_item| store_list_item.store) + .filter_map(|store| garbage_collection_status(store, _info, rpcenv).ok()) + .collect::>(), + }; + + Ok(gc_info) +} + +const GC_ROUTER: Router = Router::new().get(&API_METHOD_LIST_ALL_GC_JOBS); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_ALL_GC_JOBS) + .match_all("store", &GC_ROUTER); diff --git a/src/api2/admin/mod.rs b/src/api2/admin/mod.rs index 168dc038d..a1c49f8e2 100644 --- a/src/api2/admin/mod.rs +++ b/src/api2/admin/mod.rs @@ -5,6 +5,7 @@ use proxmox_router::{Router, SubdirMap}; use proxmox_sortable_macro::sortable; pub mod datastore; +pub mod gc; pub mod metrics; pub mod namespace; pub mod prune; @@ -17,6 +18,7 @@ const SUBDIRS: SubdirMap = &sorted!([ ("datastore", &datastore::ROUTER), ("metrics", &metrics::ROUTER), ("prune", &prune::ROUTER), + ("gc", &gc::ROUTER), ("sync", &sync::ROUTER), ("traffic-control", &traffic_control::ROUTER), ("verify", &verify::ROUTER), diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 51f4a15b3..99d885e2e 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -2,6 +2,7 @@ use anyhow::{bail, format_err, Error}; use nix::dir::Dir; use std::collections::HashMap; use std::sync::{Arc, Mutex}; +use tracing::info; use ::serde::Serialize; use serde_json::{json, Value}; @@ -101,7 +102,7 @@ impl SharedBackupState { } } -/// `RpcEnvironmet` implementation for backup service +/// `RpcEnvironment` implementation for backup service #[derive(Clone)] pub struct BackupEnvironment { env_type: RpcEnvironmentType, @@ -141,7 +142,7 @@ impl BackupEnvironment { auth_id, worker, datastore, - debug: false, + debug: tracing::enabled!(tracing::Level::DEBUG), formatter: JSON_FORMATTER, backup_dir, last_backup: None, @@ -687,12 +688,16 @@ impl BackupEnvironment { } pub fn log>(&self, msg: S) { - self.worker.log_message(msg); + info!("{}", msg.as_ref()); } pub fn debug>(&self, msg: S) { if self.debug { - self.worker.log_message(msg); + // This is kinda weird, we would like to use tracing::debug! here and automatically + // filter it, but self.debug is set from the client-side and the logs are printed on + // client and server side. This means that if the client sets the log level to debug, + // both server and client need to have 'debug' logs printed. + self.log(msg); } } diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 013043dd0..ea0d0292e 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -9,12 +9,14 @@ use hyper::{Body, Request, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; +use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_router::{http_err, list_subdirs_api_method}; use proxmox_router::{ ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap, }; use proxmox_schema::*; use proxmox_sortable_macro::sortable; +use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, @@ -23,11 +25,11 @@ use pbs_api_types::{ }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{archive_type, ArchiveType}; +use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::json::{required_array_param, required_integer_param, required_string_param}; -use proxmox_rest_server::{H2Service, WorkerTask}; -use proxmox_sys::fs::lock_dir_noblock_shared; + +use crate::api2::ExecInheritLogContext; mod environment; use environment::*; @@ -234,7 +236,8 @@ fn upgrade_to_backup_protocol( .and_then(move |conn| { env2.debug("protocol upgrade done"); - let mut http = hyper::server::conn::Http::new(); + let mut http = hyper::server::conn::Http::new() + .with_executor(ExecInheritLogContext); http.http2_only(true); // increase window size: todo - find optiomal size let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2 @@ -839,7 +842,7 @@ fn download_previous( path.push(&archive_name); { - let index: Option> = match archive_type(&archive_name)? { + let index: Option> = match ArchiveType::from_path(&archive_name)? { ArchiveType::FixedIndex => { let index = env.datastore.open_fixed_reader(&path)?; Some(Box::new(index)) diff --git a/src/api2/config/access/ad.rs b/src/api2/config/access/ad.rs new file mode 100644 index 000000000..c202291aa --- /dev/null +++ b/src/api2/config/access/ad.rs @@ -0,0 +1,348 @@ +use anyhow::{bail, format_err, Error}; +use hex::FromHex; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +use proxmox_ldap::{Config as LdapConfig, Connection}; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::{api, param_bail}; + +use pbs_api_types::{ + AdRealmConfig, AdRealmConfigUpdater, PRIV_REALM_ALLOCATE, PRIV_SYS_AUDIT, + PROXMOX_CONFIG_DIGEST_SCHEMA, REALM_ID_SCHEMA, +}; + +use pbs_config::domains; + +use crate::{auth::AdAuthenticator, auth_helpers}; + +#[api( + input: { + properties: {}, + }, + returns: { + description: "List of configured AD realms.", + type: Array, + items: { type: AdRealmConfig }, + }, + access: { + permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false), + }, +)] +/// List configured AD realms +pub fn list_ad_realms( + _param: Value, + rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let (config, digest) = domains::config()?; + + let list = config.convert_to_typed_array("ad")?; + + rpcenv["digest"] = hex::encode(digest).into(); + + Ok(list) +} + +#[api( + protected: true, + input: { + properties: { + config: { + type: AdRealmConfig, + flatten: true, + }, + password: { + description: "AD bind password", + optional: true, + } + }, + }, + access: { + permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false), + }, +)] +/// Create a new AD realm +pub async fn create_ad_realm( + mut config: AdRealmConfig, + password: Option, +) -> Result<(), Error> { + let domain_config_lock = domains::lock_config()?; + + let (mut domains, _digest) = domains::config()?; + + if domains::exists(&domains, &config.realm) { + param_bail!("realm", "realm '{}' already exists.", config.realm); + } + + let mut ldap_config = + AdAuthenticator::api_type_to_config_with_password(&config, password.clone())?; + + if config.base_dn.is_none() { + ldap_config.base_dn = retrieve_default_naming_context(&ldap_config).await?; + config.base_dn = Some(ldap_config.base_dn.clone()); + } + + let conn = Connection::new(ldap_config); + conn.check_connection() + .await + .map_err(|e| format_err!("{e:#}"))?; + + if let Some(password) = password { + auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?; + } + + domains.set_data(&config.realm, "ad", &config)?; + + domains::save_config(&domains)?; + + Ok(()) +} + +#[api( + input: { + properties: { + realm: { + schema: REALM_ID_SCHEMA, + }, + }, + }, + returns: { type: AdRealmConfig }, + access: { + permission: &Permission::Privilege(&["access", "domains"], PRIV_SYS_AUDIT, false), + }, +)] +/// Read the AD realm configuration +pub fn read_ad_realm( + realm: String, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let (domains, digest) = domains::config()?; + + let config = domains.lookup("ad", &realm)?; + + rpcenv["digest"] = hex::encode(digest).into(); + + Ok(config) +} + +#[api()] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Deletable property name +pub enum DeletableProperty { + /// Fallback AD server address + Server2, + /// Port + Port, + /// Comment + Comment, + /// Verify server certificate + Verify, + /// Mode (ldap, ldap+starttls or ldaps), + Mode, + /// Bind Domain + BindDn, + /// LDAP bind passwort + Password, + /// User filter + Filter, + /// Default options for user sync + SyncDefaultsOptions, + /// user attributes to sync with AD attributes + SyncAttributes, + /// User classes + UserClasses, +} + +#[api( + protected: true, + input: { + properties: { + realm: { + schema: REALM_ID_SCHEMA, + }, + update: { + type: AdRealmConfigUpdater, + flatten: true, + }, + password: { + description: "AD bind password", + optional: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeletableProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + returns: { type: AdRealmConfig }, + access: { + permission: &Permission::Privilege(&["access", "domains"], PRIV_REALM_ALLOCATE, false), + }, +)] +/// Update an AD realm configuration +pub async fn update_ad_realm( + realm: String, + update: AdRealmConfigUpdater, + password: Option, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let domain_config_lock = domains::lock_config()?; + + let (mut domains, expected_digest) = domains::config()?; + + if let Some(ref digest) = digest { + let digest = <[u8; 32]>::from_hex(digest)?; + crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; + } + + let mut config: AdRealmConfig = domains.lookup("ad", &realm)?; + + if let Some(delete) = delete { + for delete_prop in delete { + match delete_prop { + DeletableProperty::Server2 => { + config.server2 = None; + } + DeletableProperty::Comment => { + config.comment = None; + } + DeletableProperty::Port => { + config.port = None; + } + DeletableProperty::Verify => { + config.verify = None; + } + DeletableProperty::Mode => { + config.mode = None; + } + DeletableProperty::BindDn => { + config.bind_dn = None; + } + DeletableProperty::Password => { + auth_helpers::remove_ldap_bind_password(&realm, &domain_config_lock)?; + } + DeletableProperty::Filter => { + config.filter = None; + } + DeletableProperty::SyncDefaultsOptions => { + config.sync_defaults_options = None; + } + DeletableProperty::SyncAttributes => { + config.sync_attributes = None; + } + DeletableProperty::UserClasses => { + config.user_classes = None; + } + } + } + } + + if let Some(server1) = update.server1 { + config.server1 = server1; + } + + if let Some(server2) = update.server2 { + config.server2 = Some(server2); + } + + if let Some(port) = update.port { + config.port = Some(port); + } + + if let Some(base_dn) = update.base_dn { + config.base_dn = Some(base_dn); + } + + if let Some(comment) = update.comment { + let comment = comment.trim().to_string(); + if comment.is_empty() { + config.comment = None; + } else { + config.comment = Some(comment); + } + } + + if let Some(mode) = update.mode { + config.mode = Some(mode); + } + + if let Some(verify) = update.verify { + config.verify = Some(verify); + } + + if let Some(bind_dn) = update.bind_dn { + config.bind_dn = Some(bind_dn); + } + + if let Some(filter) = update.filter { + config.filter = Some(filter); + } + + if let Some(sync_defaults_options) = update.sync_defaults_options { + config.sync_defaults_options = Some(sync_defaults_options); + } + + if let Some(sync_attributes) = update.sync_attributes { + config.sync_attributes = Some(sync_attributes); + } + + if let Some(user_classes) = update.user_classes { + config.user_classes = Some(user_classes); + } + + let mut ldap_config = if password.is_some() { + AdAuthenticator::api_type_to_config_with_password(&config, password.clone())? + } else { + AdAuthenticator::api_type_to_config(&config)? + }; + + if config.base_dn.is_none() { + ldap_config.base_dn = retrieve_default_naming_context(&ldap_config).await?; + config.base_dn = Some(ldap_config.base_dn.clone()); + } + + let conn = Connection::new(ldap_config); + conn.check_connection() + .await + .map_err(|e| format_err!("{e:#}"))?; + + if let Some(password) = password { + auth_helpers::store_ldap_bind_password(&realm, &password, &domain_config_lock)?; + } + + domains.set_data(&realm, "ad", &config)?; + + domains::save_config(&domains)?; + + Ok(()) +} + +async fn retrieve_default_naming_context(ldap_config: &LdapConfig) -> Result { + let conn = Connection::new(ldap_config.clone()); + match conn.retrieve_root_dse_attr("defaultNamingContext").await { + Ok(base_dn) if !base_dn.is_empty() => Ok(base_dn[0].clone()), + Ok(_) => bail!("server did not provide `defaultNamingContext`"), + Err(err) => bail!("failed to determine base_dn: {err}"), + } +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_READ_AD_REALM) + .put(&API_METHOD_UPDATE_AD_REALM) + .delete(&super::ldap::API_METHOD_DELETE_LDAP_REALM); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_AD_REALMS) + .post(&API_METHOD_CREATE_AD_REALM) + .match_all("realm", &ITEM_ROUTER); diff --git a/src/api2/config/access/mod.rs b/src/api2/config/access/mod.rs index 614bd5e6c..b551e662a 100644 --- a/src/api2/config/access/mod.rs +++ b/src/api2/config/access/mod.rs @@ -2,12 +2,14 @@ use proxmox_router::list_subdirs_api_method; use proxmox_router::{Router, SubdirMap}; use proxmox_sortable_macro::sortable; +pub mod ad; pub mod ldap; pub mod openid; pub mod tfa; #[sortable] const SUBDIRS: SubdirMap = &sorted!([ + ("ad", &ad::ROUTER), ("ldap", &ldap::ROUTER), ("openid", &openid::ROUTER), ("tfa", &tfa::ROUTER), diff --git a/src/api2/config/acme.rs b/src/api2/config/acme.rs index 4fa276f6c..422b9720e 100644 --- a/src/api2/config/acme.rs +++ b/src/api2/config/acme.rs @@ -1,22 +1,21 @@ use std::fs; use std::ops::ControlFlow; use std::path::Path; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use std::time::SystemTime; use anyhow::{bail, format_err, Error}; use hex::FromHex; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; +use tracing::{info, warn}; use proxmox_router::{ http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap, }; use proxmox_schema::{api, param_bail}; -use proxmox_sys::{task_log, task_warn}; -use proxmox_acme::account::AccountData as AcmeAccountData; +use proxmox_acme::types::AccountData as AcmeAccountData; use proxmox_acme::Account; use pbs_api_types::{Authid, PRIV_SYS_MODIFY}; @@ -240,10 +239,10 @@ fn register_account( Some(name.to_string()), auth_id.to_string(), true, - move |worker| async move { + move |_worker| async move { let mut client = AcmeClient::new(directory); - task_log!(worker, "Registering ACME account '{}'...", &name); + info!("Registering ACME account '{}'...", &name); let account = do_register_account( &mut client, @@ -255,11 +254,7 @@ fn register_account( ) .await?; - task_log!( - worker, - "Registration successful, account URL: {}", - account.location - ); + info!("Registration successful, account URL: {}", account.location); Ok(()) }, @@ -354,7 +349,7 @@ pub fn deactivate_account( Some(name.to_string()), auth_id.to_string(), true, - move |worker| async move { + move |_worker| async move { match AcmeClient::load(&name) .await? .update_account(&json!({"status": "deactivated"})) @@ -363,12 +358,7 @@ pub fn deactivate_account( Ok(_account) => (), Err(err) if !force => return Err(err), Err(err) => { - task_warn!( - worker, - "error deactivating account {}, proceedeing anyway - {}", - name, - err, - ); + warn!("error deactivating account {name}, proceeding anyway - {err}"); } } crate::config::acme::mark_account_deactivated(&name)?; @@ -439,10 +429,8 @@ impl Serialize for ChallengeSchemaWrapper { } fn get_cached_challenge_schemas() -> Result { - lazy_static! { - static ref CACHE: Mutex>, SystemTime)>> = - Mutex::new(None); - } + static CACHE: LazyLock>, SystemTime)>>> = + LazyLock::new(|| Mutex::new(None)); // the actual loading code let mut last = CACHE.lock().unwrap(); diff --git a/src/api2/config/changer.rs b/src/api2/config/changer.rs index db0ea14a7..31a15abab 100644 --- a/src/api2/config/changer.rs +++ b/src/api2/config/changer.rs @@ -33,6 +33,10 @@ pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> { let (mut section_config, _digest) = pbs_config::drive::config()?; + if section_config.sections.contains_key(&config.name) { + param_bail!("name", "Entry '{}' already exists", config.name); + } + let linux_changers = linux_tape_changer_list(); check_drive_path(&linux_changers, &config.path)?; @@ -40,10 +44,6 @@ pub fn create_changer(config: ScsiTapeChanger) -> Result<(), Error> { let existing: Vec = section_config.convert_to_typed_array("changer")?; for changer in existing { - if changer.name == config.name { - param_bail!("name", "Entry '{}' already exists", config.name); - } - if changer.path == config.path { param_bail!( "path", diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 3081e1f48..ca6edf05a 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -4,16 +4,16 @@ use ::serde::{Deserialize, Serialize}; use anyhow::Error; use hex::FromHex; use serde_json::Value; +use tracing::warn; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::{api, param_bail, ApiType}; use proxmox_section_config::SectionConfigData; -use proxmox_sys::{task_warn, WorkerTaskContext}; use proxmox_uuid::Uuid; use pbs_api_types::{ Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, - PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, + MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, }; use pbs_config::BackupLockGuard; @@ -70,7 +70,6 @@ pub(crate) fn do_create_datastore( _lock: BackupLockGuard, mut config: SectionConfigData, datastore: DataStoreConfig, - worker: Option<&dyn WorkerTaskContext>, ) -> Result<(), Error> { let path: PathBuf = datastore.path.clone().into(); @@ -84,7 +83,6 @@ pub(crate) fn do_create_datastore( path, backup_user.uid, backup_user.gid, - worker, tuning.sync_level.unwrap_or_default(), )?; @@ -118,7 +116,7 @@ pub fn create_datastore( let (section_config, _digest) = pbs_config::datastore::config()?; - if section_config.sections.get(&config.name).is_some() { + if section_config.sections.contains_key(&config.name) { param_bail!("name", "datastore '{}' already exists.", config.name); } @@ -155,11 +153,11 @@ pub fn create_datastore( Some(config.name.to_string()), auth_id.to_string(), to_stdout, - move |worker| { - do_create_datastore(lock, section_config, config, Some(&worker))?; + move |_worker| { + do_create_datastore(lock, section_config, config)?; if let Some(prune_job_config) = prune_job_config { - do_create_prune_job(prune_job_config, Some(&worker)) + do_create_prune_job(prune_job_config) } else { Ok(()) } @@ -222,6 +220,8 @@ pub enum DeletableProperty { NotifyUser, /// Delete the notify property Notify, + /// Delete the notification-mode property + NotificationMode, /// Delete the tuning property Tuning, /// Delete the maintenance-mode property @@ -315,11 +315,14 @@ pub fn update_datastore( DeletableProperty::NotifyUser => { data.notify_user = None; } + DeletableProperty::NotificationMode => { + data.notification_mode = None; + } DeletableProperty::Tuning => { data.tuning = None; } DeletableProperty::MaintenanceMode => { - data.maintenance_mode = None; + data.set_maintenance_mode(None)?; } } } @@ -385,6 +388,10 @@ pub fn update_datastore( data.notify_user = update.notify_user; } + if update.notification_mode.is_some() { + data.notification_mode = update.notification_mode; + } + if update.tuning.is_some() { data.tuning = update.tuning; } @@ -392,7 +399,14 @@ pub fn update_datastore( let mut maintenance_mode_changed = false; if update.maintenance_mode.is_some() { maintenance_mode_changed = data.maintenance_mode != update.maintenance_mode; - data.maintenance_mode = update.maintenance_mode; + + let maintenance_mode = match update.maintenance_mode { + Some(mode_str) => Some(MaintenanceMode::deserialize( + proxmox_schema::de::SchemaDeserializer::new(mode_str, &MaintenanceMode::API_SCHEMA), + )?), + None => None, + }; + data.set_maintenance_mode(maintenance_mode)?; } config.set_data(&name, "datastore", &data)?; @@ -411,8 +425,8 @@ pub fn update_datastore( if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) { - let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid); - let _ = proxmox_rest_server::send_raw_command( + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( sock, &format!( "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", @@ -512,8 +526,8 @@ pub async fn delete_datastore( Some(name.clone()), auth_id.to_string(), to_stdout, - move |worker| { - pbs_datastore::DataStore::destroy(&name, destroy_data, &worker)?; + move |_worker| { + pbs_datastore::DataStore::destroy(&name, destroy_data)?; // ignore errors let _ = jobstate::remove_state_file("prune", &name); @@ -522,7 +536,7 @@ pub async fn delete_datastore( if let Err(err) = proxmox_async::runtime::block_on(crate::server::notify_datastore_removed()) { - task_warn!(worker, "failed to notify after datastore removal: {err}"); + warn!("failed to notify after datastore removal: {err}"); } Ok(()) diff --git a/src/api2/config/drive.rs b/src/api2/config/drive.rs index 02589aaf5..1222ab200 100644 --- a/src/api2/config/drive.rs +++ b/src/api2/config/drive.rs @@ -34,6 +34,10 @@ pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> { let (mut section_config, _digest) = pbs_config::drive::config()?; + if section_config.sections.contains_key(&config.name) { + param_bail!("name", "Entry '{}' already exists", config.name); + } + let lto_drives = lto_tape_device_list(); check_drive_path(<o_drives, &config.path)?; @@ -41,9 +45,6 @@ pub fn create_drive(config: LtoTapeDrive) -> Result<(), Error> { let existing: Vec = section_config.convert_to_typed_array("lto")?; for drive in existing { - if drive.name == config.name { - param_bail!("name", "Entry '{}' already exists", config.name); - } if drive.path == config.path { param_bail!( "path", diff --git a/src/api2/config/media_pool.rs b/src/api2/config/media_pool.rs index 4a4cec56f..9389ea02f 100644 --- a/src/api2/config/media_pool.rs +++ b/src/api2/config/media_pool.rs @@ -31,7 +31,7 @@ pub fn create_pool(config: MediaPoolConfig) -> Result<(), Error> { let (mut section_config, _digest) = pbs_config::media_pool::config()?; - if section_config.sections.get(&config.name).is_some() { + if section_config.sections.contains_key(&config.name) { param_bail!("name", "Media pool '{}' already exists", config.name); } @@ -225,11 +225,8 @@ pub fn delete_pool(name: String) -> Result<(), Error> { let (mut config, _digest) = pbs_config::media_pool::config()?; - match config.sections.get(&name) { - Some(_) => { - config.sections.remove(&name); - } - None => http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name), + if config.sections.remove(&name).is_none() { + http_bail!(NOT_FOUND, "delete pool '{}' failed - no such pool", name); } pbs_config::media_pool::save_config(&config)?; diff --git a/src/api2/config/metrics/influxdbhttp.rs b/src/api2/config/metrics/influxdbhttp.rs index 2b7811ff6..bed292600 100644 --- a/src/api2/config/metrics/influxdbhttp.rs +++ b/src/api2/config/metrics/influxdbhttp.rs @@ -82,7 +82,7 @@ pub async fn create_influxdb_http_server(config: InfluxDbHttp) -> Result<(), Err let (mut metrics, _digest) = metrics::config()?; - if metrics.sections.get(&config.name).is_some() { + if metrics.sections.contains_key(&config.name) { bail!("metric server '{}' already exists.", config.name); } diff --git a/src/api2/config/metrics/influxdbudp.rs b/src/api2/config/metrics/influxdbudp.rs index c6efd5e27..c47a4e191 100644 --- a/src/api2/config/metrics/influxdbudp.rs +++ b/src/api2/config/metrics/influxdbudp.rs @@ -67,7 +67,7 @@ pub async fn create_influxdb_udp_server(config: InfluxDbUdp) -> Result<(), Error let (mut metrics, _digest) = metrics::config()?; - if metrics.sections.get(&config.name).is_some() { + if metrics.sections.contains_key(&config.name) { bail!("metric server '{}' already exists.", config.name); } diff --git a/src/api2/config/mod.rs b/src/api2/config/mod.rs index 6cfeaea10..15dc5db92 100644 --- a/src/api2/config/mod.rs +++ b/src/api2/config/mod.rs @@ -11,6 +11,7 @@ pub mod datastore; pub mod drive; pub mod media_pool; pub mod metrics; +pub mod notifications; pub mod prune; pub mod remote; pub mod sync; @@ -28,6 +29,7 @@ const SUBDIRS: SubdirMap = &sorted!([ ("drive", &drive::ROUTER), ("media-pool", &media_pool::ROUTER), ("metrics", &metrics::ROUTER), + ("notifications", ¬ifications::ROUTER), ("prune", &prune::ROUTER), ("remote", &remote::ROUTER), ("sync", &sync::ROUTER), diff --git a/src/api2/config/notifications/gotify.rs b/src/api2/config/notifications/gotify.rs new file mode 100644 index 000000000..1f14b377b --- /dev/null +++ b/src/api2/config/notifications/gotify.rs @@ -0,0 +1,190 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::endpoints::gotify::{ + DeleteableGotifyProperty, GotifyConfig, GotifyConfigUpdater, GotifyPrivateConfig, + GotifyPrivateConfigUpdater, +}; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of gotify endpoints.", + type: Array, + items: { type: GotifyConfig }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all gotify endpoints. +pub fn list_endpoints( + _param: Value, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let config = pbs_config::notifications::config()?; + + let endpoints = proxmox_notify::api::gotify::get_endpoints(&config)?; + + Ok(endpoints) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + returns: { type: GotifyConfig }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get a gotify endpoint. +pub fn get_endpoint(name: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let config = pbs_config::notifications::config()?; + let endpoint = proxmox_notify::api::gotify::get_endpoint(&config, &name)?; + + rpcenv["digest"] = hex::encode(config.digest()).into(); + + Ok(endpoint) +} + +#[api( + protected: true, + input: { + properties: { + endpoint: { + type: GotifyConfig, + flatten: true, + }, + token: { + description: "Authentication token", + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Add a new gotify endpoint. +pub fn add_endpoint( + endpoint: GotifyConfig, + token: String, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let private_endpoint_config = GotifyPrivateConfig { + name: endpoint.name.clone(), + token, + }; + + proxmox_notify::api::gotify::add_endpoint(&mut config, endpoint, private_endpoint_config)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + updater: { + type: GotifyConfigUpdater, + flatten: true, + }, + token: { + description: "Authentication token", + optional: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeleteableGotifyProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Update gotify endpoint. +pub fn update_endpoint( + name: String, + updater: GotifyConfigUpdater, + token: Option, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let digest = digest.map(hex::decode).transpose()?; + + proxmox_notify::api::gotify::update_endpoint( + &mut config, + &name, + updater, + GotifyPrivateConfigUpdater { token }, + delete.as_deref(), + digest.as_deref(), + )?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Delete gotify endpoint. +pub fn delete_endpoint(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + proxmox_notify::api::gotify::delete_gotify_endpoint(&mut config, &name)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_GET_ENDPOINT) + .put(&API_METHOD_UPDATE_ENDPOINT) + .delete(&API_METHOD_DELETE_ENDPOINT); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_ENDPOINTS) + .post(&API_METHOD_ADD_ENDPOINT) + .match_all("name", &ITEM_ROUTER); diff --git a/src/api2/config/notifications/matchers.rs b/src/api2/config/notifications/matchers.rs new file mode 100644 index 000000000..fba1859cf --- /dev/null +++ b/src/api2/config/notifications/matchers.rs @@ -0,0 +1,170 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::matcher::{DeleteableMatcherProperty, MatcherConfig, MatcherConfigUpdater}; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of matchers.", + type: Array, + items: { type: MatcherConfig }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all notification matchers. +pub fn list_matchers( + _param: Value, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let config = pbs_config::notifications::config()?; + + let matchers = proxmox_notify::api::matcher::get_matchers(&config)?; + + Ok(matchers) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + returns: { type: MatcherConfig }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get a notification matcher. +pub fn get_matcher(name: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let config = pbs_config::notifications::config()?; + let matcher = proxmox_notify::api::matcher::get_matcher(&config, &name)?; + + rpcenv["digest"] = hex::encode(config.digest()).into(); + + Ok(matcher) +} + +#[api( + protected: true, + input: { + properties: { + matcher: { + type: MatcherConfig, + flatten: true, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Add a new notification matcher. +pub fn add_matcher(matcher: MatcherConfig, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + + proxmox_notify::api::matcher::add_matcher(&mut config, matcher)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + updater: { + type: MatcherConfigUpdater, + flatten: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeleteableMatcherProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Update notification matcher. +pub fn update_matcher( + name: String, + updater: MatcherConfigUpdater, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let digest = digest.map(hex::decode).transpose()?; + + proxmox_notify::api::matcher::update_matcher( + &mut config, + &name, + updater, + delete.as_deref(), + digest.as_deref(), + )?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Delete notification matcher. +pub fn delete_matcher(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + proxmox_notify::api::matcher::delete_matcher(&mut config, &name)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_GET_MATCHER) + .put(&API_METHOD_UPDATE_MATCHER) + .delete(&API_METHOD_DELETE_MATCHER); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_MATCHERS) + .post(&API_METHOD_ADD_MATCHER) + .match_all("name", &ITEM_ROUTER); diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs new file mode 100644 index 000000000..dfe82ed03 --- /dev/null +++ b/src/api2/config/notifications/mod.rs @@ -0,0 +1,205 @@ +use anyhow::Error; +use serde::Serialize; +use serde_json::Value; +use std::cmp::Ordering; + +use proxmox_router::{list_subdirs_api_method, ApiMethod, Permission, RpcEnvironment}; +use proxmox_router::{Router, SubdirMap}; +use proxmox_schema::api; +use proxmox_sortable_macro::sortable; + +use crate::api2::admin::datastore::get_datastore_list; +use pbs_api_types::PRIV_SYS_AUDIT; + +use crate::api2::admin::prune::list_prune_jobs; +use crate::api2::admin::sync::list_sync_jobs; +use crate::api2::admin::verify::list_verification_jobs; +use crate::api2::config::media_pool::list_pools; +use crate::api2::tape::backup::list_tape_backup_jobs; + +pub mod gotify; +pub mod matchers; +pub mod sendmail; +pub mod smtp; +pub mod targets; + +#[sortable] +const SUBDIRS: SubdirMap = &sorted!([ + ("endpoints", &ENDPOINT_ROUTER), + ("matcher-fields", &FIELD_ROUTER), + ("matcher-field-values", &VALUE_ROUTER), + ("targets", &targets::ROUTER), + ("matchers", &matchers::ROUTER), +]); + +pub const ROUTER: Router = Router::new() + .get(&list_subdirs_api_method!(SUBDIRS)) + .subdirs(SUBDIRS); + +#[sortable] +const ENDPOINT_SUBDIRS: SubdirMap = &sorted!([ + ("gotify", &gotify::ROUTER), + ("sendmail", &sendmail::ROUTER), + ("smtp", &smtp::ROUTER), +]); + +const ENDPOINT_ROUTER: Router = Router::new() + .get(&list_subdirs_api_method!(ENDPOINT_SUBDIRS)) + .subdirs(ENDPOINT_SUBDIRS); + +const FIELD_ROUTER: Router = Router::new().get(&API_METHOD_GET_FIELDS); +const VALUE_ROUTER: Router = Router::new().get(&API_METHOD_GET_VALUES); + +#[api] +#[derive(Serialize)] +/// A matchable field. +pub struct MatchableField { + /// Name of the field + name: String, +} + +#[api] +#[derive(Serialize)] +/// A matchable metadata field value. +pub struct MatchableValue { + /// Field this value belongs to. + field: String, + /// Notification metadata value known by the system. + value: String, + /// Additional comment for this value. + comment: Option, +} + +#[api( + protected: false, + input: { + properties: {}, + }, + returns: { + description: "List of known metadata fields.", + type: Array, + items: { type: MatchableField }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get all known metadata fields. +pub fn get_fields() -> Result, Error> { + let fields = ["datastore", "hostname", "job-id", "media-pool", "type"] + .into_iter() + .map(Into::into) + .map(|name| MatchableField { name }) + .collect(); + + Ok(fields) +} + +#[api( + protected: false, + input: { + properties: {}, + }, + returns: { + description: "List of known metadata field values.", + type: Array, + items: { type: MatchableValue }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all known, matchable metadata field values. +pub fn get_values( + param: Value, + info: &ApiMethod, + rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let mut values = Vec::new(); + + let datastores = get_datastore_list(param.clone(), info, rpcenv)?; + + for datastore in datastores { + values.push(MatchableValue { + field: "datastore".into(), + value: datastore.store.clone(), + comment: datastore.comment.clone(), + }); + } + + let pools = list_pools(rpcenv)?; + for pool in pools { + values.push(MatchableValue { + field: "media-pool".into(), + value: pool.name.clone(), + comment: None, + }); + } + + let tape_backup_jobs = list_tape_backup_jobs(param.clone(), rpcenv)?; + for job in tape_backup_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); + } + + let prune_jobs = list_prune_jobs(None, param.clone(), rpcenv)?; + for job in prune_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); + } + + let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?; + for job in sync_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); + } + + let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; + for job in verify_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); + } + + values.push(MatchableValue { + field: "hostname".into(), + value: proxmox_sys::nodename().into(), + comment: None, + }); + + for ty in [ + "acme", + "gc", + "package-updates", + "prune", + "sync", + "system-mail", + "tape-backup", + "tape-load", + "verify", + ] { + values.push(MatchableValue { + field: "type".into(), + value: ty.into(), + comment: None, + }); + } + + values.sort_by(|a, b| match a.field.cmp(&b.field) { + Ordering::Equal => a.value.cmp(&b.value), + ord => ord, + }); + + Ok(values) +} diff --git a/src/api2/config/notifications/sendmail.rs b/src/api2/config/notifications/sendmail.rs new file mode 100644 index 000000000..b8ce6b2b1 --- /dev/null +++ b/src/api2/config/notifications/sendmail.rs @@ -0,0 +1,178 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::endpoints::sendmail::{ + DeleteableSendmailProperty, SendmailConfig, SendmailConfigUpdater, +}; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of sendmail endpoints.", + type: Array, + items: { type: SendmailConfig }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all sendmail endpoints. +pub fn list_endpoints( + _param: Value, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let config = pbs_config::notifications::config()?; + + let endpoints = proxmox_notify::api::sendmail::get_endpoints(&config)?; + + Ok(endpoints) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + returns: { type: SendmailConfig }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get a sendmail endpoint. +pub fn get_endpoint( + name: String, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let config = pbs_config::notifications::config()?; + let endpoint = proxmox_notify::api::sendmail::get_endpoint(&config, &name)?; + + rpcenv["digest"] = hex::encode(config.digest()).into(); + + Ok(endpoint) +} + +#[api( + protected: true, + input: { + properties: { + endpoint: { + type: SendmailConfig, + flatten: true, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Add a new sendmail endpoint. +pub fn add_endpoint( + endpoint: SendmailConfig, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + + proxmox_notify::api::sendmail::add_endpoint(&mut config, endpoint)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + updater: { + type: SendmailConfigUpdater, + flatten: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeleteableSendmailProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Update sendmail endpoint. +pub fn update_endpoint( + name: String, + updater: SendmailConfigUpdater, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let digest = digest.map(hex::decode).transpose()?; + + proxmox_notify::api::sendmail::update_endpoint( + &mut config, + &name, + updater, + delete.as_deref(), + digest.as_deref(), + )?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Delete sendmail endpoint. +pub fn delete_endpoint(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + proxmox_notify::api::sendmail::delete_endpoint(&mut config, &name)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_GET_ENDPOINT) + .put(&API_METHOD_UPDATE_ENDPOINT) + .delete(&API_METHOD_DELETE_ENDPOINT); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_ENDPOINTS) + .post(&API_METHOD_ADD_ENDPOINT) + .match_all("name", &ITEM_ROUTER); diff --git a/src/api2/config/notifications/smtp.rs b/src/api2/config/notifications/smtp.rs new file mode 100644 index 000000000..8df2ab18c --- /dev/null +++ b/src/api2/config/notifications/smtp.rs @@ -0,0 +1,191 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::endpoints::smtp::{ + DeleteableSmtpProperty, SmtpConfig, SmtpConfigUpdater, SmtpPrivateConfig, + SmtpPrivateConfigUpdater, +}; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of smtp endpoints.", + type: Array, + items: { type: SmtpConfig }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all smtp endpoints. +pub fn list_endpoints( + _param: Value, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let config = pbs_config::notifications::config()?; + + let endpoints = proxmox_notify::api::smtp::get_endpoints(&config)?; + + Ok(endpoints) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + returns: { type: SmtpConfig }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get a smtp endpoint. +pub fn get_endpoint(name: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let config = pbs_config::notifications::config()?; + let endpoint = proxmox_notify::api::smtp::get_endpoint(&config, &name)?; + + rpcenv["digest"] = hex::encode(config.digest()).into(); + + Ok(endpoint) +} + +#[api( + protected: true, + input: { + properties: { + endpoint: { + type: SmtpConfig, + flatten: true, + }, + password: { + optional: true, + description: "SMTP authentication password" + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Add a new smtp endpoint. +pub fn add_endpoint( + endpoint: SmtpConfig, + password: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let private_endpoint_config = SmtpPrivateConfig { + name: endpoint.name.clone(), + password, + }; + + proxmox_notify::api::smtp::add_endpoint(&mut config, endpoint, private_endpoint_config)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + updater: { + type: SmtpConfigUpdater, + flatten: true, + }, + password: { + description: "SMTP authentication password", + optional: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeleteableSmtpProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Update smtp endpoint. +pub fn update_endpoint( + name: String, + updater: SmtpConfigUpdater, + password: Option, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let digest = digest.map(hex::decode).transpose()?; + + proxmox_notify::api::smtp::update_endpoint( + &mut config, + &name, + updater, + SmtpPrivateConfigUpdater { password }, + delete.as_deref(), + digest.as_deref(), + )?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Delete smtp endpoint. +pub fn delete_endpoint(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + proxmox_notify::api::smtp::delete_endpoint(&mut config, &name)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_GET_ENDPOINT) + .put(&API_METHOD_UPDATE_ENDPOINT) + .delete(&API_METHOD_DELETE_ENDPOINT); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_ENDPOINTS) + .post(&API_METHOD_ADD_ENDPOINT) + .match_all("name", &ITEM_ROUTER); diff --git a/src/api2/config/notifications/targets.rs b/src/api2/config/notifications/targets.rs new file mode 100644 index 000000000..6c5017b0b --- /dev/null +++ b/src/api2/config/notifications/targets.rs @@ -0,0 +1,63 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::api::Target; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap}; +use proxmox_schema::api; +use proxmox_sortable_macro::sortable; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of all entities which can be used as notification targets.", + type: Array, + items: { type: Target }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all notification targets. +pub fn list_targets(_param: Value, _rpcenv: &mut dyn RpcEnvironment) -> Result, Error> { + let config = pbs_config::notifications::config()?; + let targets = proxmox_notify::api::get_targets(&config)?; + + Ok(targets) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + } + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Test a given notification target. +pub fn test_target(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let config = pbs_config::notifications::config()?; + proxmox_notify::api::common::test_target(&config, &name)?; + Ok(()) +} + +#[sortable] +const SUBDIRS: SubdirMap = &sorted!([("test", &TEST_ROUTER),]); +const TEST_ROUTER: Router = Router::new().post(&API_METHOD_TEST_TARGET); +const ITEM_ROUTER: Router = Router::new() + .get(&list_subdirs_api_method!(SUBDIRS)) + .subdirs(SUBDIRS); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_TARGETS) + .match_all("name", &ITEM_ROUTER); diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs index 4f7ce39cd..ce7b8ce56 100644 --- a/src/api2/config/prune.rs +++ b/src/api2/config/prune.rs @@ -1,7 +1,5 @@ use anyhow::Error; use hex::FromHex; -use proxmox_sys::task_log; -use proxmox_sys::WorkerTaskContext; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -15,6 +13,7 @@ use pbs_api_types::{ use pbs_config::prune; use pbs_config::CachedUserInfo; +use tracing::info; #[api( input: { @@ -58,15 +57,12 @@ pub fn list_prune_jobs( Ok(list) } -pub fn do_create_prune_job( - config: PruneJobConfig, - worker: Option<&dyn WorkerTaskContext>, -) -> Result<(), Error> { +pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { let _lock = prune::lock_config()?; let (mut section_config, _digest) = prune::config()?; - if section_config.sections.get(&config.id).is_some() { + if section_config.sections.contains_key(&config.id) { param_bail!("id", "job '{}' already exists.", config.id); } @@ -76,9 +72,7 @@ pub fn do_create_prune_job( crate::server::jobstate::create_state_file("prunejob", &config.id)?; - if let Some(worker) = worker { - task_log!(worker, "Prune job created: {}", config.id); - } + info!("Prune job created: {}", config.id); Ok(()) } @@ -108,7 +102,7 @@ pub fn create_prune_job( user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_MODIFY, true)?; - do_create_prune_job(config, None) + do_create_prune_job(config) } #[api( diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs index 2511c5d5c..069aef28e 100644 --- a/src/api2/config/remote.rs +++ b/src/api2/config/remote.rs @@ -89,7 +89,7 @@ pub fn create_remote(name: String, config: RemoteConfig, password: String) -> Re let (mut section_config, _digest) = pbs_config::remote::config()?; - if section_config.sections.get(&name).is_some() { + if section_config.sections.contains_key(&name) { param_bail!("name", "remote '{}' already exists.", name); } @@ -288,11 +288,8 @@ pub fn delete_remote(name: String, digest: Option) -> Result<(), Error> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; } - match config.sections.get(&name) { - Some(_) => { - config.sections.remove(&name); - } - None => http_bail!(NOT_FOUND, "remote '{}' does not exist.", name), + if config.sections.remove(&name).is_none() { + http_bail!(NOT_FOUND, "remote '{}' does not exist.", name); } pbs_config::remote::save_config(&config)?; diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 8809465cc..6fdc69a9e 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -154,7 +154,7 @@ pub fn create_sync_job( let (mut section_config, _digest) = sync::config()?; - if section_config.sections.get(&config.id).is_some() { + if section_config.sections.contains_key(&config.id) { param_bail!("id", "job '{}' already exists.", config.id); } diff --git a/src/api2/config/tape_backup_job.rs b/src/api2/config/tape_backup_job.rs index 386ff5300..b6db92998 100644 --- a/src/api2/config/tape_backup_job.rs +++ b/src/api2/config/tape_backup_job.rs @@ -75,7 +75,7 @@ pub fn create_tape_backup_job( let (mut config, _digest) = pbs_config::tape_job::config()?; - if config.sections.get(&job.id).is_some() { + if config.sections.contains_key(&job.id) { param_bail!("id", "job '{}' already exists.", job.id); } @@ -132,6 +132,8 @@ pub enum DeletableProperty { LatestOnly, /// Delete the 'notify-user' property NotifyUser, + /// Delete the 'notification-mode' property + NotificationMode, /// Delete the 'group_filter' property GroupFilter, /// Delete the 'max-depth' property @@ -202,6 +204,9 @@ pub fn update_tape_backup_job( DeletableProperty::NotifyUser => { data.setup.notify_user = None; } + DeletableProperty::NotificationMode => { + data.setup.notification_mode = None; + } DeletableProperty::Schedule => { data.schedule = None; } @@ -243,6 +248,9 @@ pub fn update_tape_backup_job( if update.setup.notify_user.is_some() { data.setup.notify_user = update.setup.notify_user; } + if update.setup.notification_mode.is_some() { + data.setup.notification_mode = update.setup.notification_mode; + } if update.setup.group_filter.is_some() { data.setup.group_filter = update.setup.group_filter; } diff --git a/src/api2/config/traffic_control.rs b/src/api2/config/traffic_control.rs index 30ea40ecf..e02aa20a3 100644 --- a/src/api2/config/traffic_control.rs +++ b/src/api2/config/traffic_control.rs @@ -59,7 +59,7 @@ pub fn create_traffic_control(config: TrafficControlRule) -> Result<(), Error> { let (mut section_config, _digest) = pbs_config::traffic_control::config()?; - if section_config.sections.get(&config.name).is_some() { + if section_config.sections.contains_key(&config.name) { param_bail!( "name", "traffic control rule '{}' already exists.", @@ -258,11 +258,8 @@ pub fn delete_traffic_control(name: String, digest: Option) -> Result<() crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; } - match config.sections.get(&name) { - Some(_) => { - config.sections.remove(&name); - } - None => http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name), + if config.sections.remove(&name).is_none() { + http_bail!(NOT_FOUND, "traffic control rule '{}' does not exist.", name); } pbs_config::traffic_control::save_config(&config)?; diff --git a/src/api2/config/verify.rs b/src/api2/config/verify.rs index 82dbe43b1..e71e0c2e6 100644 --- a/src/api2/config/verify.rs +++ b/src/api2/config/verify.rs @@ -85,7 +85,7 @@ pub fn create_verification_job( let (mut section_config, _digest) = verify::config()?; - if section_config.sections.get(&config.id).is_some() { + if section_config.sections.contains_key(&config.id) { param_bail!("id", "job '{}' already exists.", config.id); } diff --git a/src/api2/mod.rs b/src/api2/mod.rs index b404bbdb9..a83e4c205 100644 --- a/src/api2/mod.rs +++ b/src/api2/mod.rs @@ -1,5 +1,7 @@ //! The Proxmox Backup Server API +use std::future::Future; + use proxmox_sortable_macro::sortable; pub mod access; @@ -36,3 +38,21 @@ const SUBDIRS: SubdirMap = &sorted!([ pub const ROUTER: Router = Router::new() .get(&list_subdirs_api_method!(SUBDIRS)) .subdirs(SUBDIRS); + +#[derive(Clone)] +struct ExecInheritLogContext; + +impl hyper::rt::Executor for ExecInheritLogContext +where + Fut: Future + Send + 'static, + Fut::Output: Send, +{ + fn execute(&self, fut: Fut) { + use proxmox_log::LogContext; + + match LogContext::current() { + None => tokio::spawn(fut), + Some(context) => tokio::spawn(context.scope(fut)), + }; + } +} diff --git a/src/api2/node/apt.rs b/src/api2/node/apt.rs index 847c3fcc9..6383edba7 100644 --- a/src/api2/node/apt.rs +++ b/src/api2/node/apt.rs @@ -1,27 +1,21 @@ -use anyhow::{bail, format_err, Error}; -use serde_json::{json, Value}; -use std::os::unix::prelude::OsStrExt; +use anyhow::{bail, Error}; +use proxmox_apt_api_types::{ + APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryHandle, + APTUpdateInfo, APTUpdateOptions, +}; +use proxmox_config_digest::ConfigDigest; +use proxmox_http::ProxyConfig; +use proxmox_rest_server::WorkerTask; use proxmox_router::{ list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, }; use proxmox_schema::api; use proxmox_sys::fs::{replace_file, CreateOptions}; -use proxmox_apt::repositories::{ - APTRepositoryFile, APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, - APTStandardRepository, -}; -use proxmox_http::ProxyConfig; - -use pbs_api_types::{ - APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, - UPID_SCHEMA, -}; +use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA}; use crate::config::node; -use crate::tools::apt; -use proxmox_rest_server::WorkerTask; #[api( input: { @@ -44,16 +38,8 @@ use proxmox_rest_server::WorkerTask; }, )] /// List available APT updates -fn apt_update_available(_param: Value) -> Result { - if let Ok(false) = apt::pkg_cache_expired() { - if let Ok(Some(cache)) = apt::read_pkg_state() { - return Ok(json!(cache.package_status)); - } - } - - let cache = apt::update_cache()?; - - Ok(json!(cache.package_status)) +pub fn apt_update_available() -> Result, Error> { + proxmox_apt::list_available_apt_update(pbs_buildcfg::APT_PKG_STATE_FN) } pub fn update_apt_proxy_config(proxy_config: Option<&ProxyConfig>) -> Result<(), Error> { @@ -83,45 +69,6 @@ fn read_and_update_proxy_config() -> Result, Error> { Ok(proxy_config) } -fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { - if !quiet { - worker.log_message("starting apt-get update") - } - - read_and_update_proxy_config()?; - - let mut command = std::process::Command::new("apt-get"); - command.arg("update"); - - // apt "errors" quite easily, and run_command is a bit rigid, so handle this inline for now. - let output = command - .output() - .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?; - - if !quiet { - worker.log_message(String::from_utf8(output.stdout)?); - } - - // TODO: improve run_command to allow outputting both, stderr and stdout - if !output.status.success() { - if output.status.code().is_some() { - let msg = String::from_utf8(output.stderr) - .map(|m| { - if m.is_empty() { - String::from("no error message") - } else { - m - } - }) - .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)")); - worker.log_warning(msg); - } else { - bail!("terminated by signal"); - } - } - Ok(()) -} - #[api( protected: true, input: { @@ -129,19 +76,10 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { node: { schema: NODE_SCHEMA, }, - notify: { - type: bool, - description: r#"Send notification mail about new package updates available to the - email address configured for 'root@pam')."#, - default: false, - optional: true, - }, - quiet: { - description: "Only produces output suitable for logging, omitting progress indicators.", - type: bool, - default: false, - optional: true, - }, + options: { + type: APTUpdateOptions, + flatten: true, + } }, }, returns: { @@ -153,43 +91,22 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> { )] /// Update the APT database pub fn apt_update_database( - notify: bool, - quiet: bool, + options: APTUpdateOptions, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id = rpcenv.get_auth_id().unwrap(); let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| { - do_apt_update(&worker, quiet)?; - - let mut cache = apt::update_cache()?; - - if notify { - let mut notified = match cache.notified { - Some(notified) => notified, - None => std::collections::HashMap::new(), - }; - let mut to_notify: Vec<&APTUpdateInfo> = Vec::new(); - - for pkg in &cache.package_status { - match notified.insert(pkg.package.to_owned(), pkg.version.to_owned()) { - Some(notified_version) => { - if notified_version != pkg.version { - to_notify.push(pkg); - } - } - None => to_notify.push(pkg), - } - } - if !to_notify.is_empty() { - to_notify.sort_unstable_by_key(|k| &k.package); - crate::server::send_updates_available(&to_notify)?; - } - cache.notified = Some(notified); - apt::write_pkg_cache(&cache)?; - } - + let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |_worker| { + read_and_update_proxy_config()?; + proxmox_apt::update_database( + pbs_buildcfg::APT_PKG_STATE_FN, + &options, + |updates: &[&APTUpdateInfo]| { + crate::server::send_updates_available(updates)?; + Ok(()) + }, + )?; Ok(()) })?; @@ -203,14 +120,9 @@ pub fn apt_update_database( node: { schema: NODE_SCHEMA, }, - name: { - description: "Package name to get changelog of.", - type: String, - }, - version: { - description: "Package version to get changelog of. Omit to use candidate version.", - type: String, - optional: true, + options: { + type: APTGetChangelogOptions, + flatten: true, }, }, }, @@ -222,17 +134,8 @@ pub fn apt_update_database( }, )] /// Retrieve the changelog of the specified package. -fn apt_get_changelog(name: String, version: Option) -> Result { - let mut command = std::process::Command::new("apt-get"); - command.arg("changelog"); - command.arg("-qq"); // don't display download progress - if let Some(ver) = version { - command.arg(format!("{name}={ver}")); - } else { - command.arg(name); - } - let output = proxmox_sys::command::run_command(command, None)?; - Ok(json!(output)) +fn apt_get_changelog(options: APTGetChangelogOptions) -> Result { + proxmox_apt::get_changelog(&options) } #[api( @@ -257,10 +160,8 @@ fn apt_get_changelog(name: String, version: Option) -> Result Result, Error> { const PACKAGES: &[&str] = &[ - "proxmox-backup", "proxmox-backup-docs", "proxmox-backup-client", - "proxmox-backup-server", "proxmox-mail-forward", "proxmox-mini-journalreader", "proxmox-offline-mirror-helper", @@ -270,96 +171,16 @@ pub fn get_versions() -> Result, Error> { "zfs-utils", ]; - fn unknown_package(package: String, extra_info: Option) -> APTUpdateInfo { - APTUpdateInfo { - package, - title: "unknown".into(), - arch: "unknown".into(), - description: "unknown".into(), - version: "unknown".into(), - old_version: "unknown".into(), - origin: "unknown".into(), - priority: "unknown".into(), - section: "unknown".into(), - extra_info, - } - } - - let is_kernel = - |name: &str| name.starts_with("pve-kernel-") || name.starts_with("proxmox-kernel"); - - let mut packages: Vec = Vec::new(); - let pbs_packages = apt::list_installed_apt_packages( - |filter| { - filter.installed_version == Some(filter.active_version) - && (is_kernel(filter.package) || PACKAGES.contains(&filter.package)) - }, - None, - ); - - let running_kernel = format!( - "running kernel: {}", - std::str::from_utf8(nix::sys::utsname::uname()?.release().as_bytes())?.to_owned() - ); - if let Some(proxmox_backup) = pbs_packages - .iter() - .find(|pkg| pkg.package == "proxmox-backup") - { - let mut proxmox_backup = proxmox_backup.clone(); - proxmox_backup.extra_info = Some(running_kernel); - packages.push(proxmox_backup); - } else { - packages.push(unknown_package( - "proxmox-backup".into(), - Some(running_kernel), - )); - } - let version = pbs_buildcfg::PROXMOX_PKG_VERSION; let release = pbs_buildcfg::PROXMOX_PKG_RELEASE; - let daemon_version_info = Some(format!("running version: {}.{}", version, release)); - if let Some(pkg) = pbs_packages - .iter() - .find(|pkg| pkg.package == "proxmox-backup-server") - { - let mut pkg = pkg.clone(); - pkg.extra_info = daemon_version_info; - packages.push(pkg); - } else { - packages.push(unknown_package( - "proxmox-backup".into(), - daemon_version_info, - )); - } + let running_daemon_version = format!("running version: {version}.{release}"); - let mut kernel_pkgs: Vec = pbs_packages - .iter() - .filter(|pkg| is_kernel(&pkg.package)) - .cloned() - .collect(); - // make sure the cache mutex gets dropped before the next call to list_installed_apt_packages - { - let cache = apt_pkg_native::Cache::get_singleton(); - kernel_pkgs.sort_by(|left, right| { - cache - .compare_versions(&left.old_version, &right.old_version) - .reverse() - }); - } - packages.append(&mut kernel_pkgs); - - // add entry for all packages we're interested in, even if not installed - for pkg in PACKAGES.iter() { - if pkg == &"proxmox-backup" || pkg == &"proxmox-backup-server" { - continue; - } - match pbs_packages.iter().find(|item| &item.package == pkg) { - Some(apt_pkg) => packages.push(apt_pkg.to_owned()), - None => packages.push(unknown_package(pkg.to_string(), None)), - } - } - - Ok(packages) + proxmox_apt::get_package_versions( + "proxmox-backup", + "proxmox-backup-server", + &running_daemon_version, + &PACKAGES, + ) } #[api( @@ -371,61 +192,15 @@ pub fn get_versions() -> Result, Error> { }, }, returns: { - type: Object, - description: "Result from parsing the APT repository files in /etc/apt/.", - properties: { - files: { - description: "List of parsed repository files.", - type: Array, - items: { - type: APTRepositoryFile, - }, - }, - errors: { - description: "List of problematic files.", - type: Array, - items: { - type: APTRepositoryFileError, - }, - }, - digest: { - schema: PROXMOX_CONFIG_DIGEST_SCHEMA, - }, - infos: { - description: "List of additional information/warnings about the repositories.", - items: { - type: APTRepositoryInfo, - }, - }, - "standard-repos": { - description: "List of standard repositories and their configuration status.", - items: { - type: APTStandardRepository, - }, - }, - }, + type: APTRepositoriesResult, }, access: { permission: &Permission::Privilege(&[], PRIV_SYS_AUDIT, false), }, )] /// Get APT repository information. -pub fn get_repositories() -> Result { - let (files, errors, digest) = proxmox_apt::repositories::repositories()?; - let digest = hex::encode(digest); - - let suite = proxmox_apt::repositories::get_current_release_codename()?; - - let infos = proxmox_apt::repositories::check_repositories(&files, suite); - let standard_repos = proxmox_apt::repositories::standard_repositories(&files, "pbs", suite); - - Ok(json!({ - "files": files, - "errors": errors, - "digest": digest, - "infos": infos, - "standard-repos": standard_repos, - })) +pub fn get_repositories() -> Result { + proxmox_apt::list_repositories("pbs") } #[api( @@ -438,7 +213,7 @@ pub fn get_repositories() -> Result { type: APTRepositoryHandle, }, digest: { - schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + type: ConfigDigest, optional: true, }, }, @@ -452,61 +227,11 @@ pub fn get_repositories() -> Result { /// If the repository is already configured, it will be set to enabled. /// /// The `digest` parameter asserts that the configuration has not been modified. -pub fn add_repository(handle: APTRepositoryHandle, digest: Option) -> Result<(), Error> { - let (mut files, errors, current_digest) = proxmox_apt::repositories::repositories()?; - - let suite = proxmox_apt::repositories::get_current_release_codename()?; - - if let Some(expected_digest) = digest { - let current_digest = hex::encode(current_digest); - crate::tools::assert_if_modified(&expected_digest, ¤t_digest)?; - } - - // check if it's already configured first - for file in files.iter_mut() { - for repo in file.repositories.iter_mut() { - if repo.is_referenced_repository(handle, "pbs", &suite.to_string()) { - if repo.enabled { - return Ok(()); - } - - repo.set_enabled(true); - file.write()?; - - return Ok(()); - } - } - } - - let (repo, path) = proxmox_apt::repositories::get_standard_repository(handle, "pbs", suite); - - if let Some(error) = errors.iter().find(|error| error.path == path) { - bail!( - "unable to parse existing file {} - {}", - error.path, - error.error, - ); - } - - if let Some(file) = files - .iter_mut() - .find(|file| file.path.as_ref() == Some(&path)) - { - file.repositories.push(repo); - - file.write()?; - } else { - let mut file = match APTRepositoryFile::new(&path)? { - Some(file) => file, - None => bail!("invalid path - {}", path), - }; - - file.repositories.push(repo); - - file.write()?; - } - - Ok(()) +pub fn add_repository( + handle: APTRepositoryHandle, + digest: Option, +) -> Result<(), Error> { + proxmox_apt::add_repository_handle("pbs", handle, digest) } #[api( @@ -523,13 +248,12 @@ pub fn add_repository(handle: APTRepositoryHandle, digest: Option) -> Re description: "Index within the file (starting from 0).", type: usize, }, - enabled: { - description: "Whether the repository should be enabled or not.", - type: bool, - optional: true, + options: { + type: APTChangeRepositoryOptions, + flatten: true, }, digest: { - schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + type: ConfigDigest, optional: true, }, }, @@ -545,38 +269,10 @@ pub fn add_repository(handle: APTRepositoryHandle, digest: Option) -> Re pub fn change_repository( path: String, index: usize, - enabled: Option, - digest: Option, + options: APTChangeRepositoryOptions, + digest: Option, ) -> Result<(), Error> { - let (mut files, errors, current_digest) = proxmox_apt::repositories::repositories()?; - - if let Some(expected_digest) = digest { - let current_digest = hex::encode(current_digest); - crate::tools::assert_if_modified(&expected_digest, ¤t_digest)?; - } - - if let Some(error) = errors.iter().find(|error| error.path == path) { - bail!("unable to parse file {} - {}", error.path, error.error); - } - - if let Some(file) = files - .iter_mut() - .find(|file| file.path.as_ref() == Some(&path)) - { - if let Some(repo) = file.repositories.get_mut(index) { - if let Some(enabled) = enabled { - repo.set_enabled(enabled); - } - - file.write()?; - } else { - bail!("invalid index - {}", index); - } - } else { - bail!("invalid path - {}", path); - } - - Ok(()) + proxmox_apt::change_repository(&path, index, &options, digest) } const SUBDIRS: SubdirMap = &[ diff --git a/src/api2/node/certificates.rs b/src/api2/node/certificates.rs index 6dfcd0c06..61ef910e4 100644 --- a/src/api2/node/certificates.rs +++ b/src/api2/node/certificates.rs @@ -5,16 +5,17 @@ use anyhow::{bail, format_err, Error}; use openssl::pkey::PKey; use openssl::x509::X509; use serde::{Deserialize, Serialize}; +use tracing::info; use proxmox_router::list_subdirs_api_method; use proxmox_router::SubdirMap; use proxmox_router::{Permission, Router, RpcEnvironment}; use proxmox_schema::api; -use proxmox_sys::{task_log, task_warn}; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY}; use pbs_buildcfg::configdir; use pbs_tools::cert; +use tracing::warn; use crate::acme::AcmeClient; use crate::api2::types::AcmeDomain; @@ -301,10 +302,7 @@ async fn order_certificate( }; if domains.is_empty() { - task_log!( - worker, - "No domains configured to be ordered from an ACME server." - ); + info!("No domains configured to be ordered from an ACME server."); return Ok(None); } @@ -312,11 +310,11 @@ async fn order_certificate( let mut acme = node_config.acme_client().await?; - task_log!(worker, "Placing ACME order"); + info!("Placing ACME order"); let order = acme .new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase())) .await?; - task_log!(worker, "Order URL: {}", order.location); + info!("Order URL: {}", order.location); let identifiers: Vec = order .data @@ -328,7 +326,7 @@ async fn order_certificate( .collect(); for auth_url in &order.data.authorizations { - task_log!(worker, "Getting authorization details from '{}'", auth_url); + info!("Getting authorization details from '{auth_url}'"); let mut auth = acme.get_authorization(auth_url).await?; let domain = match &mut auth.identifier { @@ -336,43 +334,35 @@ async fn order_certificate( }; if auth.status == Status::Valid { - task_log!(worker, "{} is already validated!", domain); + info!("{domain} is already validated!"); continue; } - task_log!(worker, "The validation for {} is pending", domain); + info!("The validation for {domain} is pending"); let domain_config: &AcmeDomain = get_domain_config(&domain)?; let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone"); - let mut plugin_cfg = - crate::acme::get_acme_plugin(&plugins, plugin_id)?.ok_or_else(|| { - format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain) - })?; + let mut plugin_cfg = crate::acme::get_acme_plugin(&plugins, plugin_id)? + .ok_or_else(|| format_err!("plugin '{plugin_id}' for domain '{domain}' not found!"))?; - task_log!(worker, "Setting up validation plugin"); + info!("Setting up validation plugin"); let validation_url = plugin_cfg .setup(&mut acme, &auth, domain_config, Arc::clone(&worker)) .await?; - let result = request_validation(&worker, &mut acme, auth_url, validation_url).await; + let result = request_validation(&mut acme, auth_url, validation_url).await; if let Err(err) = plugin_cfg .teardown(&mut acme, &auth, domain_config, Arc::clone(&worker)) .await { - task_warn!( - worker, - "Failed to teardown plugin '{}' for domain '{}' - {}", - plugin_id, - domain, - err - ); + warn!("Failed to teardown plugin '{plugin_id}' for domain '{domain}' - {err}"); } result?; } - task_log!(worker, "All domains validated"); - task_log!(worker, "Creating CSR"); + info!("All domains validated"); + info!("Creating CSR"); let csr = proxmox_acme::util::Csr::generate(&identifiers, &Default::default())?; let mut finalize_error_cnt = 0u8; @@ -385,7 +375,7 @@ async fn order_certificate( match order.status { Status::Pending => { - task_log!(worker, "still pending, trying to finalize anyway"); + info!("still pending, trying to finalize anyway"); let finalize = order .finalize .as_deref() @@ -400,7 +390,7 @@ async fn order_certificate( tokio::time::sleep(Duration::from_secs(5)).await; } Status::Ready => { - task_log!(worker, "order is ready, finalizing"); + info!("order is ready, finalizing"); let finalize = order .finalize .as_deref() @@ -409,18 +399,18 @@ async fn order_certificate( tokio::time::sleep(Duration::from_secs(5)).await; } Status::Processing => { - task_log!(worker, "still processing, trying again in 30 seconds"); + info!("still processing, trying again in 30 seconds"); tokio::time::sleep(Duration::from_secs(30)).await; } Status::Valid => { - task_log!(worker, "valid"); + info!("valid"); break; } other => bail!("order status: {:?}", other), } } - task_log!(worker, "Downloading certificate"); + info!("Downloading certificate"); let certificate = acme .get_certificate( order @@ -437,15 +427,14 @@ async fn order_certificate( } async fn request_validation( - worker: &WorkerTask, acme: &mut AcmeClient, auth_url: &str, validation_url: &str, ) -> Result<(), Error> { - task_log!(worker, "Triggering validation"); + info!("Triggering validation"); acme.request_challenge_validation(validation_url).await?; - task_log!(worker, "Sleeping for 5 seconds"); + info!("Sleeping for 5 seconds"); tokio::time::sleep(Duration::from_secs(5)).await; loop { @@ -454,10 +443,7 @@ async fn request_validation( let auth = acme.get_authorization(auth_url).await?; match auth.status { Status::Pending => { - task_log!( - worker, - "Status is still 'pending', trying again in 10 seconds" - ); + info!("Status is still 'pending', trying again in 10 seconds"); tokio::time::sleep(Duration::from_secs(10)).await; } Status::Valid => return Ok(()), @@ -582,15 +568,12 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result Result, Error> { - lazy_static::lazy_static! { - static ref MOUNT_NAME_REGEX: regex::Regex = regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap(); - } + static MOUNT_NAME_REGEX: LazyLock = + LazyLock::new(|| regex::Regex::new(r"^mnt-datastore-(.+)\.mount$").unwrap()); let mut list = Vec::new(); @@ -155,13 +157,21 @@ pub fn create_datastore_disk( let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); - // check if the default path does exist already and bail if it does + // check if the default path exists already. + // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); match std::fs::metadata(&default_path) { Err(_) => {} // path does not exist - Ok(_) => { - bail!("path {:?} already exists", default_path); + Ok(stat) => { + let basedir_dev = std::fs::metadata(BASE_MOUNT_DIR)?.st_dev(); + if stat.st_dev() != basedir_dev { + bail!("path {default_path:?} already exists and is mountpoint"); + } + let is_empty = default_path.read_dir()?.next().is_none(); + if !is_empty { + bail!("path {default_path:?} already exists and is not empty"); + } } } @@ -170,8 +180,8 @@ pub fn create_datastore_disk( Some(name.clone()), auth_id, to_stdout, - move |worker| { - task_log!(worker, "create datastore '{}' on disk {}", name, disk); + move |_worker| { + info!("create datastore '{name}' on disk {disk}"); let add_datastore = add_datastore.unwrap_or(false); let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); @@ -200,16 +210,11 @@ pub fn create_datastore_disk( let (config, _digest) = pbs_config::datastore::config()?; - if config.sections.get(&datastore.name).is_some() { + if config.sections.contains_key(&datastore.name) { bail!("datastore '{}' already exists.", datastore.name); } - crate::api2::config::datastore::do_create_datastore( - lock, - config, - datastore, - Some(&worker), - )?; + crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?; } Ok(()) @@ -253,7 +258,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> { } // disable systemd mount-unit - let mut mount_unit_name = proxmox_sys::systemd::escape_unit(&path, true); + let mut mount_unit_name = proxmox_systemd::escape_unit(&path, true); mount_unit_name.push_str(".mount"); crate::tools::systemd::disable_unit(&mount_unit_name)?; @@ -289,7 +294,7 @@ fn create_datastore_mount_unit( fs_type: FileSystemType, what: &str, ) -> Result { - let mut mount_unit_name = proxmox_sys::systemd::escape_unit(mount_point, true); + let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true); mount_unit_name.push_str(".mount"); let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name); diff --git a/src/api2/node/disks/mod.rs b/src/api2/node/disks/mod.rs index 711dae7be..4ef4ee2b8 100644 --- a/src/api2/node/disks/mod.rs +++ b/src/api2/node/disks/mod.rs @@ -6,7 +6,7 @@ use proxmox_router::{ }; use proxmox_schema::api; use proxmox_sortable_macro::sortable; -use proxmox_sys::task_log; +use tracing::info; use pbs_api_types::{ BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, NODE_SCHEMA, @@ -164,8 +164,8 @@ pub fn initialize_disk( Some(disk.clone()), auth_id, to_stdout, - move |worker| { - task_log!(worker, "initialize disk {}", disk); + move |_worker| { + info!("initialize disk {disk}"); let disk_manager = DiskManage::new(); let disk_info = disk_manager.disk_by_name(&disk)?; @@ -209,13 +209,13 @@ pub fn wipe_disk(disk: String, rpcenv: &mut dyn RpcEnvironment) -> Result task_log!(worker, "{output}"), + Ok(output) => info!("{output}"), Err(err) => { - task_error!(worker, "{err}"); + error!("{err}"); bail!("Error during 'zpool create', see task log for more details"); } }; @@ -288,7 +282,7 @@ pub fn create_zpool( if std::path::Path::new("/lib/systemd/system/zfs-import@.service").exists() { let import_unit = format!( "zfs-import@{}.service", - proxmox_sys::systemd::escape_unit(&name, false) + proxmox_systemd::escape_unit(&name, false) ); crate::tools::systemd::enable_unit(&import_unit)?; } @@ -299,11 +293,11 @@ pub fn create_zpool( command.arg(&format!("compression={}", compression)); } command.args(["relatime=on", &name]); - task_log!(worker, "# {:?}", command); + info!("# {command:?}"); match proxmox_sys::command::run_command(command, None) { - Ok(output) => task_log!(worker, "{output}"), + Ok(output) => info!("{output}"), Err(err) => { - task_error!(worker, "{err}"); + error!("{err}"); bail!("Error during 'zfs set', see task log for more details"); } }; @@ -315,16 +309,11 @@ pub fn create_zpool( let (config, _digest) = pbs_config::datastore::config()?; - if config.sections.get(&datastore.name).is_some() { + if config.sections.contains_key(&datastore.name) { bail!("datastore '{}' already exists.", datastore.name); } - crate::api2::config::datastore::do_create_datastore( - lock, - config, - datastore, - Some(&worker), - )?; + crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?; } Ok(()) diff --git a/src/api2/node/dns.rs b/src/api2/node/dns.rs index 87a117382..bd1f528f4 100644 --- a/src/api2/node/dns.rs +++ b/src/api2/node/dns.rs @@ -1,9 +1,8 @@ -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, LazyLock, Mutex}; use ::serde::{Deserialize, Serialize}; use anyhow::Error; use const_format::concatcp; -use lazy_static::lazy_static; use openssl::sha; use regex::Regex; use serde_json::{json, Value}; @@ -46,11 +45,10 @@ pub fn read_etc_resolv_conf() -> Result { let data = String::from_utf8(raw)?; - lazy_static! { - static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap(); - static ref SERVER_REGEX: Regex = - Regex::new(concatcp!(r"^\s*nameserver\s+(", IPRE_STR, r")\s*")).unwrap(); - } + static DOMAIN_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap()); + static SERVER_REGEX: LazyLock = + LazyLock::new(|| Regex::new(concatcp!(r"^\s*nameserver\s+(", IPRE_STR, r")\s*")).unwrap()); let mut options = String::new(); @@ -131,9 +129,7 @@ pub fn update_dns( delete: Option>, digest: Option, ) -> Result { - lazy_static! { - static ref MUTEX: Arc> = Arc::new(Mutex::new(())); - } + static MUTEX: LazyLock>> = LazyLock::new(|| Arc::new(Mutex::new(()))); let _guard = MUTEX.lock(); diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs index 1bea28255..af8d710d2 100644 --- a/src/api2/node/mod.rs +++ b/src/api2/node/mod.rs @@ -25,6 +25,7 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fd::fd_change_cloexec; use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_CONSOLE}; +use tracing::{info, warn}; use crate::auth::{private_auth_keyring, public_auth_keyring}; use crate::tools; @@ -181,20 +182,18 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu let stdout = child.stdout.take().expect("no child stdout handle"); let stderr = child.stderr.take().expect("no child stderr handle"); - let worker_stdout = worker.clone(); let stdout_fut = async move { let mut reader = BufReader::new(stdout).lines(); while let Some(line) = reader.next_line().await? { - worker_stdout.log_message(line); + info!(line); } Ok::<(), Error>(()) }; - let worker_stderr = worker.clone(); let stderr_fut = async move { let mut reader = BufReader::new(stderr).lines(); while let Some(line) = reader.next_line().await? { - worker_stderr.log_warning(line); + warn!(line); } Ok::<(), Error>(()) }; @@ -226,9 +225,9 @@ async fn termproxy(cmd: Option, rpcenv: &mut dyn RpcEnvironment) -> Resu } if let Err(err) = child.kill().await { - worker.log_warning(format!("error killing termproxy: {}", err)); + warn!("error killing termproxy: {err}"); } else if let Err(err) = child.wait().await { - worker.log_warning(format!("error awaiting termproxy: {}", err)); + warn!("error awaiting termproxy: {err}"); } } diff --git a/src/api2/node/network.rs b/src/api2/node/network.rs index ade6fe408..273751c4a 100644 --- a/src/api2/node/network.rs +++ b/src/api2/node/network.rs @@ -12,7 +12,9 @@ use pbs_api_types::{ NETWORK_INTERFACE_ARRAY_SCHEMA, NETWORK_INTERFACE_LIST_SCHEMA, NETWORK_INTERFACE_NAME_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, }; -use pbs_config::network::{self, NetworkConfig}; +use pbs_config::network::{ + self, parse_vlan_id_from_name, parse_vlan_raw_device_from_name, NetworkConfig, +}; use proxmox_rest_server::WorkerTask; @@ -147,7 +149,7 @@ pub fn list_network_devices( }, returns: { type: Interface }, access: { - permission: &Permission::Privilege(&["system", "network", "interfaces", "{name}"], PRIV_SYS_AUDIT, false), + permission: &Permission::Privilege(&["system", "network", "interfaces", "{iface}"], PRIV_SYS_AUDIT, false), }, )] /// Read a network interface configuration. @@ -227,9 +229,18 @@ pub fn read_interface(iface: String) -> Result { optional: true, }, bridge_vlan_aware: { - description: "Enable bridge vlan support.", - type: bool, - optional: true, + description: "Enable bridge vlan support.", + type: bool, + optional: true, + }, + "vlan-id": { + description: "VLAN ID.", + type: u16, + optional: true, + }, + "vlan-raw-device": { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + optional: true, }, bond_mode: { type: LinuxBondMode, @@ -269,6 +280,8 @@ pub fn create_interface( mtu: Option, bridge_ports: Option, bridge_vlan_aware: Option, + vlan_id: Option, + vlan_raw_device: Option, bond_mode: Option, bond_primary: Option, bond_xmit_hash_policy: Option, @@ -373,6 +386,24 @@ pub fn create_interface( set_bond_slaves(&mut interface, slaves)?; } } + NetworkInterfaceType::Vlan => { + if vlan_id.is_none() && parse_vlan_id_from_name(&iface).is_none() { + bail!("vlan-id must be set"); + } + interface.vlan_id = vlan_id; + + if let Some(dev) = vlan_raw_device + .as_deref() + .or_else(|| parse_vlan_raw_device_from_name(&iface)) + { + if !config.interfaces.contains_key(dev) { + bail!("vlan-raw-device {dev} does not exist"); + } + } else { + bail!("vlan-raw-device must be set"); + } + interface.vlan_raw_device = vlan_raw_device; + } _ => bail!( "creating network interface type '{:?}' is not supported", interface_type @@ -503,9 +534,18 @@ pub enum DeletableProperty { optional: true, }, bridge_vlan_aware: { - description: "Enable bridge vlan support.", - type: bool, - optional: true, + description: "Enable bridge vlan support.", + type: bool, + optional: true, + }, + "vlan-id": { + description: "VLAN ID.", + type: u16, + optional: true, + }, + "vlan-raw-device": { + schema: NETWORK_INTERFACE_NAME_SCHEMA, + optional: true, }, bond_mode: { type: LinuxBondMode, @@ -557,6 +597,8 @@ pub fn update_interface( mtu: Option, bridge_ports: Option, bridge_vlan_aware: Option, + vlan_id: Option, + vlan_raw_device: Option, bond_mode: Option, bond_primary: Option, bond_xmit_hash_policy: Option, @@ -581,6 +623,15 @@ pub fn update_interface( check_duplicate_gateway_v6(&config, &iface)?; } + if let Some(dev) = vlan_raw_device + .as_deref() + .or_else(|| parse_vlan_raw_device_from_name(&iface)) + { + if !config.interfaces.contains_key(dev) { + bail!("vlan-raw-device {dev} does not exist"); + } + } + let interface = config.lookup_mut(&iface)?; if let Some(interface_type) = param.get("type") { @@ -734,6 +785,13 @@ pub fn update_interface( interface.method6 = Some(NetworkConfigMethod::Manual); } + if vlan_id.is_some() { + interface.vlan_id = vlan_id; + } + if vlan_raw_device.is_some() { + interface.vlan_raw_device = vlan_raw_device; + } + network::save_config(&config)?; Ok(()) diff --git a/src/api2/node/rrd.rs b/src/api2/node/rrd.rs index 89e902b97..352603824 100644 --- a/src/api2/node/rrd.rs +++ b/src/api2/node/rrd.rs @@ -3,17 +3,18 @@ use serde_json::{json, Value}; use std::collections::BTreeMap; use proxmox_router::{Permission, Router}; +use proxmox_rrd_api_types::{RrdMode, RrdTimeframe}; use proxmox_schema::api; -use pbs_api_types::{RRDMode, RRDTimeFrame, NODE_SCHEMA, PRIV_SYS_AUDIT}; +use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT}; -use crate::rrd_cache::extract_rrd_data; +use crate::server::metric_collection::rrd::extract_rrd_data; pub fn create_value_from_rrd( basedir: &str, list: &[&str], - timeframe: RRDTimeFrame, - mode: RRDMode, + timeframe: RrdTimeframe, + mode: RrdMode, ) -> Result { let mut result: Vec = Vec::new(); @@ -64,10 +65,10 @@ pub fn create_value_from_rrd( schema: NODE_SCHEMA, }, timeframe: { - type: RRDTimeFrame, + type: RrdTimeframe, }, cf: { - type: RRDMode, + type: RrdMode, }, }, }, @@ -76,7 +77,7 @@ pub fn create_value_from_rrd( }, )] /// Read node stats -fn get_node_stats(timeframe: RRDTimeFrame, cf: RRDMode, _param: Value) -> Result { +fn get_node_stats(timeframe: RrdTimeframe, cf: RrdMode, _param: Value) -> Result { create_value_from_rrd( "host", &[ diff --git a/src/api2/node/services.rs b/src/api2/node/services.rs index a8db0e27b..2da9e1adb 100644 --- a/src/api2/node/services.rs +++ b/src/api2/node/services.rs @@ -306,7 +306,7 @@ fn stop_service( permission: &Permission::Privilege(&["system", "services", "{service}"], PRIV_SYS_MODIFY, false), }, )] -/// Retart service. +/// Restart service. fn restart_service( service: String, _param: Value, diff --git a/src/api2/node/status.rs b/src/api2/node/status.rs index 07c20444f..78913e4c2 100644 --- a/src/api2/node/status.rs +++ b/src/api2/node/status.rs @@ -98,7 +98,7 @@ async fn get_status( uname.machine(), ); - let disk = crate::tools::fs::fs_info_static(proxmox_lang::c_str!("/")).await?; + let disk = crate::tools::fs::fs_info_static(c"/").await?; let boot_info = boot_mode_to_info(boot_mode::BootMode::query(), boot_mode::SecureBoot::query()); diff --git a/src/api2/node/syslog.rs b/src/api2/node/syslog.rs index 1a377c549..3f18a1da1 100644 --- a/src/api2/node/syslog.rs +++ b/src/api2/node/syslog.rs @@ -119,18 +119,21 @@ fn dump_journal( }, }, returns: { - type: Object, + type: Array, description: "Returns a list of syslog entries.", - properties: { - n: { - type: Integer, - description: "Line number.", + items: { + description: "Syslog line with line number.", + properties: { + n: { + type: Integer, + description: "Line number.", + }, + t: { + type: String, + description: "Line text.", + } }, - t: { - type: String, - description: "Line text.", - } - }, + } }, access: { permission: &Permission::Privilege(&["system", "log"], PRIV_SYS_AUDIT, false), diff --git a/src/api2/node/tasks.rs b/src/api2/node/tasks.rs index 8f08d3af8..7fd07f01b 100644 --- a/src/api2/node/tasks.rs +++ b/src/api2/node/tasks.rs @@ -252,7 +252,7 @@ fn into_task_list_item(info: proxmox_rest_server::TaskListInfo) -> pbs_api_types exitstatus: { type: String, optional: true, - description: "'OK', 'Error: ', or 'unkwown'.", + description: "'OK', 'Error: ', or 'unknown'.", }, }, }, @@ -445,7 +445,7 @@ fn stop_task(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result success, abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort, })?; - task_log!(worker, "pull datastore '{}' end", store); + info!("pull datastore '{store}' end"); Ok(()) }, diff --git a/src/api2/reader/environment.rs b/src/api2/reader/environment.rs index 37039da2f..3b2f06f43 100644 --- a/src/api2/reader/environment.rs +++ b/src/api2/reader/environment.rs @@ -10,8 +10,9 @@ use pbs_datastore::backup_info::BackupDir; use pbs_datastore::DataStore; use proxmox_rest_server::formatter::*; use proxmox_rest_server::WorkerTask; +use tracing::info; -/// `RpcEnvironmet` implementation for backup reader service +/// `RpcEnvironment` implementation for backup reader service #[derive(Clone)] pub struct ReaderEnvironment { env_type: RpcEnvironmentType, @@ -39,7 +40,7 @@ impl ReaderEnvironment { auth_id, worker, datastore, - debug: false, + debug: tracing::enabled!(tracing::Level::DEBUG), formatter: JSON_FORMATTER, backup_dir, allowed_chunks: Arc::new(RwLock::new(HashSet::new())), @@ -47,12 +48,16 @@ impl ReaderEnvironment { } pub fn log>(&self, msg: S) { - self.worker.log_message(msg); + info!("{}", msg.as_ref()); } pub fn debug>(&self, msg: S) { if self.debug { - self.worker.log_message(msg); + // This is kinda weird, we would like to use tracing::debug! here and automatically + // filter it, but self.debug is set from the client-side and the logs are printed on + // client and server side. This means that if the client sets the log level to debug, + // both server and client need to have 'debug' logs printed. + self.log(msg); } } diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index 42b428385..23051653e 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -9,12 +9,14 @@ use hyper::{Body, Request, Response, StatusCode}; use serde::Deserialize; use serde_json::Value; +use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_router::{ http_err, list_subdirs_api_method, ApiHandler, ApiMethod, ApiResponseFuture, Permission, Router, RpcEnvironment, SubdirMap, }; use proxmox_schema::{BooleanSchema, ObjectSchema}; use proxmox_sortable_macro::sortable; +use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, @@ -23,14 +25,13 @@ use pbs_api_types::{ }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{archive_type, ArchiveType}; +use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::json::required_string_param; -use proxmox_rest_server::{H2Service, WorkerTask}; -use proxmox_sys::fs::lock_dir_noblock_shared; use crate::api2::backup::optional_ns_param; use crate::api2::helpers; +use crate::api2::ExecInheritLogContext; mod environment; use environment::*; @@ -183,7 +184,8 @@ fn upgrade_to_backup_reader_protocol( let conn = hyper::upgrade::on(Request::from_parts(parts, req_body)).await?; env2.debug("protocol upgrade done"); - let mut http = hyper::server::conn::Http::new(); + let mut http = + hyper::server::conn::Http::new().with_executor(ExecInheritLogContext); http.http2_only(true); // increase window size: todo - find optiomal size let window_size = 32 * 1024 * 1024; // max = (1 << 31) - 2 @@ -261,7 +263,7 @@ fn download_file( env.log(format!("download {:?}", path.clone())); - let index: Option> = match archive_type(&file_name)? { + let index: Option> = match ArchiveType::from_path(&file_name)? { ArchiveType::FixedIndex => { let index = env.datastore.open_fixed_reader(&path)?; Some(Box::new(index)) diff --git a/src/api2/status/metrics.rs b/src/api2/status/metrics.rs new file mode 100644 index 000000000..a0004ef9d --- /dev/null +++ b/src/api2/status/metrics.rs @@ -0,0 +1,87 @@ +use anyhow::Error; +use pbs_api_types::{Authid, MetricDataPoint, Metrics, PRIV_DATASTORE_AUDIT, PRIV_SYS_AUDIT}; +use pbs_config::CachedUserInfo; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use crate::server::metric_collection::pull_metrics; + +pub const ROUTER: Router = Router::new().get(&API_METHOD_GET_METRICS); + +#[api( + input: { + properties: { + "start-time": { + optional: true, + default: 0, + description: "Only return values with a timestamp > start-time. Only has an effect if 'history' is also set", + }, + "history": { + optional: true, + default: false, + description: "Include historic values (last 30 minutes)", + } + }, + }, + access: { + description: "Users need Sys.Audit on /system/status for host metrics and Datastore.Audit on /datastore/{store} for datastore metrics", + permission: &Permission::Anybody, + }, +)] +/// Return backup server metrics. +pub fn get_metrics( + start_time: i64, + history: bool, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let user_info = CachedUserInfo::new()?; + + let has_any_datastore_audit_privs = + user_info.any_privs_below(&auth_id, &["datastore"], PRIV_DATASTORE_AUDIT)?; + + let has_host_audit_privs = + (CachedUserInfo::lookup_privs(&user_info, &auth_id, &["system", "status"]) + & PRIV_SYS_AUDIT) + != 0; + + if !has_any_datastore_audit_privs && !has_host_audit_privs { + // The `pull_metrics::get_*` calls are expensive, so + // check early if the current user has sufficient privileges to read *any* + // metric data. + // For datastores, we do not yet know for which individual datastores + // we have metrics in the cache, so we just check if we have + // audit permissions for *any* datastore and filter after + // reading the data. + return Ok(Metrics { data: Vec::new() }); + } + + let metrics = if history { + pull_metrics::get_all_metrics(start_time)? + } else { + pull_metrics::get_most_recent_metrics()? + }; + + let filter_by_privs = |point: &MetricDataPoint| { + let id = point.id.as_str(); + if id == "host" { + return has_host_audit_privs; + } else if let Some(datastore_id) = id.strip_prefix("datastore/") { + if !datastore_id.contains('/') { + // Now, check whether we have permissions for the individual datastore + let user_privs = CachedUserInfo::lookup_privs( + &user_info, + &auth_id, + &["datastore", datastore_id], + ); + return (user_privs & PRIV_DATASTORE_AUDIT) != 0; + } + } + log::error!("invalid metric object id: {id:?}"); + false + }; + + Ok(Metrics { + data: metrics.into_iter().filter(filter_by_privs).collect(), + }) +} diff --git a/src/api2/status.rs b/src/api2/status/mod.rs similarity index 89% rename from src/api2/status.rs rename to src/api2/status/mod.rs index 78bc06b55..113aa9852 100644 --- a/src/api2/status.rs +++ b/src/api2/status/mod.rs @@ -5,21 +5,24 @@ use serde_json::Value; use proxmox_router::list_subdirs_api_method; use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap}; +use proxmox_rrd_api_types::{RrdMode, RrdTimeframe}; use proxmox_schema::api; +use proxmox_sortable_macro::sortable; use pbs_api_types::{ - Authid, DataStoreStatusListItem, Operation, RRDMode, RRDTimeFrame, PRIV_DATASTORE_AUDIT, - PRIV_DATASTORE_BACKUP, + Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::DataStore; -use crate::rrd_cache::extract_rrd_data; +use crate::server::metric_collection::rrd::extract_rrd_data; use crate::tools::statistics::linear_regression; use crate::backup::can_access_any_namespace; +pub mod metrics; + #[api( returns: { description: "Lists the Status of the Datastores.", @@ -82,7 +85,7 @@ pub async fn datastore_status( let rrd_dir = format!("datastore/{}", store); let get_rrd = - |what: &str| extract_rrd_data(&rrd_dir, what, RRDTimeFrame::Month, RRDMode::Average); + |what: &str| extract_rrd_data(&rrd_dir, what, RrdTimeframe::Month, RrdMode::Average); let total_res = get_rrd("total")?; let used_res = get_rrd("used")?; @@ -137,10 +140,14 @@ pub async fn datastore_status( Ok(list) } -const SUBDIRS: SubdirMap = &[( - "datastore-usage", - &Router::new().get(&API_METHOD_DATASTORE_STATUS), -)]; +#[sortable] +const SUBDIRS: SubdirMap = &sorted!([ + ( + "datastore-usage", + &Router::new().get(&API_METHOD_DATASTORE_STATUS), + ), + ("metrics", &metrics::ROUTER), +]); pub const ROUTER: Router = Router::new() .get(&list_subdirs_api_method!(SUBDIRS)) diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index 28d7e7208..cf5a01897 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -2,27 +2,29 @@ use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Error}; use serde_json::Value; +use tracing::{info, warn}; use proxmox_lang::try_block; +use proxmox_rest_server::WorkerTask; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::api; -use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; +use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, MediaPoolConfig, Operation, - TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, + TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA, }; use pbs_config::CachedUserInfo; use pbs_datastore::backup_info::{BackupDir, BackupInfo}; use pbs_datastore::{DataStore, StoreProgress}; -use proxmox_rest_server::WorkerTask; +use crate::tape::TapeNotificationMode; use crate::{ server::{ jobstate::{compute_schedule_status, Job, JobState}, - lookup_user_email, TapeBackupJobSummary, + TapeBackupJobSummary, }, tape::{ changer::update_changer_online_status, @@ -162,12 +164,6 @@ pub fn do_tape_backup_job( Some(lock_tape_device(&drive_config, &setup.drive)?) }; - let notify_user = setup - .notify_user - .as_ref() - .unwrap_or_else(|| Userid::root_userid()); - let email = lookup_user_email(notify_user); - let upid_str = WorkerTask::new_thread( &worker_type, Some(job_id.clone()), @@ -181,7 +177,7 @@ pub fn do_tape_backup_job( let job_result = try_block!({ if schedule.is_some() { // for scheduled tape backup jobs, we wait indefinitely for the lock - task_log!(worker, "waiting for drive lock..."); + info!("waiting for drive lock..."); loop { worker.check_abort()?; match lock_tape_device(&drive_config, &setup.drive) { @@ -196,9 +192,9 @@ pub fn do_tape_backup_job( } set_tape_device_state(&setup.drive, &worker.upid().to_string())?; - task_log!(worker, "Starting tape backup job '{}'", job_id); + info!("Starting tape backup job '{job_id}'"); if let Some(event_str) = schedule { - task_log!(worker, "task triggered by schedule '{}'", event_str); + info!("task triggered by schedule '{event_str}'"); } backup_worker( @@ -206,7 +202,6 @@ pub fn do_tape_backup_job( datastore, &pool_config, &setup, - email.clone(), &mut summary, false, ) @@ -214,16 +209,13 @@ pub fn do_tape_backup_job( let status = worker.create_state(&job_result); - if let Some(email) = email { - if let Err(err) = crate::server::send_tape_backup_status( - &email, - Some(job.jobname()), - &setup, - &job_result, - summary, - ) { - eprintln!("send tape backup notification failed: {}", err); - } + if let Err(err) = crate::server::send_tape_backup_status( + Some(job.jobname()), + &setup, + &job_result, + summary, + ) { + eprintln!("send tape backup notification failed: {err}"); } if let Err(err) = job.finish(status) { @@ -328,12 +320,6 @@ pub fn backup( let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive); - let notify_user = setup - .notify_user - .as_ref() - .unwrap_or_else(|| Userid::root_userid()); - let email = lookup_user_email(notify_user); - let upid_str = WorkerTask::new_thread( "tape-backup", Some(job_id), @@ -349,21 +335,14 @@ pub fn backup( datastore, &pool_config, &setup, - email.clone(), &mut summary, force_media_set, ); - if let Some(email) = email { - if let Err(err) = crate::server::send_tape_backup_status( - &email, - None, - &setup, - &job_result, - summary, - ) { - eprintln!("send tape backup notification failed: {}", err); - } + if let Err(err) = + crate::server::send_tape_backup_status(None, &setup, &job_result, summary) + { + eprintln!("send tape backup notification failed: {err}"); } // ignore errors @@ -386,22 +365,27 @@ fn backup_worker( datastore: Arc, pool_config: &MediaPoolConfig, setup: &TapeBackupJobSetup, - email: Option, summary: &mut TapeBackupJobSummary, force_media_set: bool, ) -> Result<(), Error> { let start = std::time::Instant::now(); - task_log!(worker, "update media online status"); + info!("update media online status"); let changer_name = update_media_online_status(&setup.drive)?; let root_namespace = setup.ns.clone().unwrap_or_default(); let ns_magic = !root_namespace.is_root() || setup.max_depth != Some(0); let pool = MediaPool::with_config(TAPE_STATUS_DIR, pool_config, changer_name, false)?; + let notification_mode = TapeNotificationMode::from(setup); - let mut pool_writer = - PoolWriter::new(pool, &setup.drive, worker, email, force_media_set, ns_magic)?; + let mut pool_writer = PoolWriter::new( + pool, + &setup.drive, + notification_mode, + force_media_set, + ns_magic, + )?; let mut group_list = Vec::new(); let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?; @@ -421,11 +405,9 @@ fn backup_worker( None => group_list, }; - task_log!( - worker, - "found {} groups (out of {} total)", - group_list.len(), - group_count_full + info!( + "found {} groups (out of {group_count_full} total)", + group_list.len() ); let mut progress = StoreProgress::new(group_list.len() as u64); @@ -433,10 +415,7 @@ fn backup_worker( let latest_only = setup.latest_only.unwrap_or(false); if latest_only { - task_log!( - worker, - "latest-only: true (only considering latest snapshots)" - ); + info!("latest-only: true (only considering latest snapshots)"); } let datastore_name = datastore.name(); @@ -459,8 +438,7 @@ fn backup_worker( .collect(); if snapshot_list.is_empty() { - task_log!( - worker, + info!( "{}, group {} was empty", print_store_and_ns(datastore_name, group.backup_ns()), group.group() @@ -480,7 +458,7 @@ fn backup_worker( info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { - task_log!(worker, "skip snapshot {}", rel_path); + info!("skip snapshot {rel_path}"); continue; } @@ -493,7 +471,7 @@ fn backup_worker( SnapshotBackupResult::Ignored => {} } progress.done_snapshots = 1; - task_log!(worker, "percentage done: {}", progress); + info!("percentage done: {progress}"); } } else { progress.group_snapshots = snapshot_list.len() as u64; @@ -506,7 +484,7 @@ fn backup_worker( info.backup_dir.backup_ns(), info.backup_dir.as_ref(), ) { - task_log!(worker, "skip snapshot {}", rel_path); + info!("skip snapshot {rel_path}"); continue; } @@ -519,7 +497,7 @@ fn backup_worker( SnapshotBackupResult::Ignored => {} } progress.done_snapshots = snapshot_number as u64 + 1; - task_log!(worker, "percentage done: {}", progress); + info!("percentage done: {progress}"); } } } @@ -527,18 +505,15 @@ fn backup_worker( pool_writer.commit()?; if need_catalog { - task_log!(worker, "append media catalog"); + info!("append media catalog"); let uuid = pool_writer.load_writable_media(worker)?; - let done = pool_writer.append_catalog_archive(worker)?; + let done = pool_writer.append_catalog_archive()?; if !done { - task_log!( - worker, - "catalog does not fit on tape, writing to next volume" - ); + info!("catalog does not fit on tape, writing to next volume"); pool_writer.set_media_status_full(&uuid)?; pool_writer.load_writable_media(worker)?; - let done = pool_writer.append_catalog_archive(worker)?; + let done = pool_writer.append_catalog_archive()?; if !done { bail!("write_catalog_archive failed on second media"); } @@ -546,9 +521,9 @@ fn backup_worker( } if setup.export_media_set.unwrap_or(false) { - pool_writer.export_media_set(worker)?; + pool_writer.export_media_set()?; } else if setup.eject_media.unwrap_or(false) { - pool_writer.eject_media(worker)?; + pool_writer.eject_media()?; } if errors { @@ -558,7 +533,7 @@ fn backup_worker( summary.used_tapes = match pool_writer.get_used_media_labels() { Ok(tapes) => Some(tapes), Err(err) => { - task_warn!(worker, "could not collect list of used tapes: {err}"); + warn!("could not collect list of used tapes: {err}"); None } }; @@ -592,7 +567,7 @@ fn backup_snapshot( snapshot: BackupDir, ) -> Result { let snapshot_path = snapshot.relative_path(); - task_log!(worker, "backup snapshot {:?}", snapshot_path); + info!("backup snapshot {snapshot_path:?}"); let snapshot_reader = match snapshot.locked_reader() { Ok(reader) => reader, @@ -600,15 +575,10 @@ fn backup_snapshot( if !snapshot.full_path().exists() { // we got an error and the dir does not exist, // it probably just vanished, so continue - task_log!(worker, "snapshot {:?} vanished, skipping", snapshot_path); + info!("snapshot {snapshot_path:?} vanished, skipping"); return Ok(SnapshotBackupResult::Ignored); } - task_warn!( - worker, - "failed opening snapshot {:?}: {}", - snapshot_path, - err - ); + warn!("failed opening snapshot {snapshot_path:?}: {err}"); return Ok(SnapshotBackupResult::Error); } }; @@ -654,7 +624,7 @@ fn backup_snapshot( let snapshot_reader = snapshot_reader.lock().unwrap(); - let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; + let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?; if !done { // does not fit on tape, so we try on next volume @@ -663,19 +633,14 @@ fn backup_snapshot( worker.check_abort()?; pool_writer.load_writable_media(worker)?; - let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?; + let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?; if !done { bail!("write_snapshot_archive failed on second media"); } } - task_log!( - worker, - "end backup {}:{:?}", - datastore.name(), - snapshot_path - ); + info!("end backup {}:{snapshot_path:?}", datastore.name()); Ok(SnapshotBackupResult::Success) } diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs index 5a5d39d9e..ba9051de5 100644 --- a/src/api2/tape/drive.rs +++ b/src/api2/tape/drive.rs @@ -3,7 +3,9 @@ use std::panic::UnwindSafe; use std::sync::Arc; use anyhow::{bail, format_err, Error}; +use pbs_tape::sg_tape::SgTape; use serde_json::Value; +use tracing::{info, warn}; use proxmox_router::{ list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap, @@ -11,7 +13,6 @@ use proxmox_router::{ use proxmox_schema::api; use proxmox_section_config::SectionConfigData; use proxmox_sortable_macro::sortable; -use proxmox_sys::{task_log, task_warn}; use proxmox_uuid::Uuid; use pbs_api_types::{ @@ -130,13 +131,8 @@ pub fn load_media( drive.clone(), "load-media", Some(job_id), - move |worker, config| { - task_log!( - worker, - "loading media '{}' into drive '{}'", - label_text, - drive - ); + move |_worker, config| { + info!("loading media '{label_text}' into drive '{drive}'"); let (mut changer, _) = required_media_changer(&config, &drive)?; changer.load_media(&label_text)?; Ok(()) @@ -249,8 +245,8 @@ pub fn unload( drive.clone(), "unload-media", Some(drive.clone()), - move |worker, config| { - task_log!(worker, "unloading media from drive '{}'", drive); + move |_worker, config| { + info!("unloading media from drive '{drive}'"); let (mut changer, _) = required_media_changer(&config, &drive)?; changer.unload_media(target_slot)?; @@ -298,9 +294,9 @@ pub fn format_media( drive.clone(), "format-media", Some(drive.clone()), - move |worker, config| { + move |_worker, config| { if let Some(ref label) = label_text { - task_log!(worker, "try to load media '{}'", label); + info!("try to load media '{label}'"); if let Some((mut changer, _)) = media_changer(&config, &drive)? { changer.load_media(label)?; } @@ -314,11 +310,8 @@ pub fn format_media( let mut handle = LtoTapeHandle::new(file)?; if let Ok(status) = handle.get_drive_and_media_status() { if status.density >= TapeDensity::LTO9 { - task_log!(worker, "Slow formatting LTO9+ media."); - task_log!( - worker, - "This can take a very long time due to media optimization." - ); + info!("Slow formatting LTO9+ media."); + info!("This can take a very long time due to media optimization."); } } } @@ -329,15 +322,15 @@ pub fn format_media( bail!("expected label '{}', found unrelated data", label); } /* assume drive contains no or unrelated data */ - task_log!(worker, "unable to read media label: {}", err); - task_log!(worker, "format anyways"); + info!("unable to read media label: {err}"); + info!("format anyways"); handle.format_media(fast.unwrap_or(true))?; } Ok((None, _)) => { if let Some(label) = label_text { bail!("expected label '{}', found empty tape", label); } - task_log!(worker, "found empty media - format anyways"); + info!("found empty media - format anyways"); handle.format_media(fast.unwrap_or(true))?; } Ok((Some(media_id), _key_config)) => { @@ -351,11 +344,9 @@ pub fn format_media( } } - task_log!( - worker, + info!( "found media '{}' with uuid '{}'", - media_id.label.label_text, - media_id.label.uuid, + media_id.label.label_text, media_id.label.uuid, ); let mut inventory = Inventory::new(TAPE_STATUS_DIR); @@ -494,7 +485,7 @@ pub fn label_media( if let Some(ref pool) = pool { let (pool_config, _digest) = pbs_config::media_pool::config()?; - if pool_config.sections.get(pool).is_none() { + if !pool_config.sections.contains_key(pool) { bail!("no such pool ('{}')", pool); } } @@ -503,7 +494,7 @@ pub fn label_media( drive.clone(), "label-media", Some(drive.clone()), - move |worker, config| { + move |_worker, config| { let mut drive = open_drive(&config, &drive)?; drive.rewind()?; @@ -525,7 +516,7 @@ pub fn label_media( pool: pool.clone(), }; - write_media_label(worker, &mut drive, label, pool) + write_media_label(&mut drive, label, pool) }, )?; @@ -533,7 +524,6 @@ pub fn label_media( } fn write_media_label( - worker: Arc, drive: &mut Box, label: MediaLabel, pool: Option, @@ -548,18 +538,9 @@ fn write_media_label( } drive.label_tape(&label)?; if let Some(ref pool) = pool { - task_log!( - worker, - "Label media '{}' for pool '{}'", - label.label_text, - pool - ); + info!("Label media '{}' for pool '{pool}'", label.label_text); } else { - task_log!( - worker, - "Label media '{}' (no pool assignment)", - label.label_text - ); + info!("Label media '{}' (no pool assignment)", label.label_text); } let media_id = MediaId { @@ -605,6 +586,8 @@ fn write_media_label( drive.rewind()?; + drive.write_additional_attributes(Some(media_id.label.label_text), pool); + Ok(()) } @@ -746,10 +729,10 @@ pub fn clean_drive(drive: String, rpcenv: &mut dyn RpcEnvironment) -> Result Result Result { - task_warn!(worker, "error getting media by unique label: {err}"); + warn!("error getting media by unique label: {err}"); // we can't be sure which uuid it is continue; } @@ -947,37 +930,28 @@ pub fn update_inventory( } if let Err(err) = changer.load_media(&label_text) { - task_warn!(worker, "unable to load media '{}' - {}", label_text, err); + warn!("unable to load media '{label_text}' - {err}"); continue; } let mut drive = open_drive(&config, &drive)?; match drive.read_label() { Err(err) => { - task_warn!( - worker, - "unable to read label form media '{}' - {}", - label_text, - err - ); + warn!("unable to read label form media '{label_text}' - {err}"); } Ok((None, _)) => { - task_log!(worker, "media '{}' is empty", label_text); + info!("media '{label_text}' is empty"); } Ok((Some(media_id), _key_config)) => { if label_text != media_id.label.label_text { - task_warn!( - worker, - "label text mismatch ({} != {})", - label_text, + warn!( + "label text mismatch ({label_text} != {})", media_id.label.label_text ); continue; } - task_log!( - worker, - "inventorize media '{}' with uuid '{}'", - label_text, + info!( + "inventorize media '{label_text}' with uuid '{}'", media_id.label.uuid ); @@ -999,15 +973,11 @@ pub fn update_inventory( if catalog { let media_set = inventory.compute_media_set_members(&set.uuid)?; if let Err(err) = fast_catalog_restore( - &worker, &mut drive, &media_set, &media_id.label.uuid, ) { - task_warn!( - worker, - "could not restore catalog for {label_text}: {err}" - ); + warn!("could not restore catalog for {label_text}: {err}"); } } } else { @@ -1053,7 +1023,7 @@ pub fn barcode_label_media( if let Some(ref pool) = pool { let (pool_config, _digest) = pbs_config::media_pool::config()?; - if pool_config.sections.get(pool).is_none() { + if !pool_config.sections.contains_key(pool) { bail!("no such pool ('{}')", pool); } } @@ -1063,14 +1033,13 @@ pub fn barcode_label_media( drive.clone(), "barcode-label-media", Some(drive.clone()), - move |worker, config| barcode_label_media_worker(worker, drive, &config, pool), + move |_worker, config| barcode_label_media_worker(drive, &config, pool), )?; Ok(upid_str.into()) } fn barcode_label_media_worker( - worker: Arc, drive: String, drive_config: &SectionConfigData, pool: Option, @@ -1103,24 +1072,20 @@ fn barcode_label_media_worker( inventory.reload()?; match inventory.find_media_by_label_text(&label_text) { Ok(Some(_)) => { - task_log!( - worker, - "media '{}' already inventoried (already labeled)", - label_text - ); + info!("media '{label_text}' already inventoried (already labeled)"); continue; } Err(err) => { - task_warn!(worker, "error getting media by unique label: {err}",); + warn!("error getting media by unique label: {err}",); continue; } Ok(None) => {} // ok to label } - task_log!(worker, "checking/loading media '{}'", label_text); + info!("checking/loading media '{label_text}'"); if let Err(err) = changer.load_media(&label_text) { - task_warn!(worker, "unable to load media '{}' - {}", label_text, err); + warn!("unable to load media '{label_text}' - {err}"); continue; } @@ -1129,21 +1094,13 @@ fn barcode_label_media_worker( match drive.read_next_file() { Ok(_reader) => { - task_log!( - worker, - "media '{}' is not empty (format it first)", - label_text - ); + info!("media '{label_text}' is not empty (format it first)"); continue; } Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ } Err(BlockReadError::EndOfStream) => { /* tape is empty */ } Err(_err) => { - task_warn!( - worker, - "media '{}' read error (maybe not empty - format it first)", - label_text - ); + warn!("media '{label_text}' read error (maybe not empty - format it first)"); continue; } } @@ -1156,7 +1113,7 @@ fn barcode_label_media_worker( pool: pool.clone(), }; - write_media_label(worker.clone(), &mut drive, label, pool.clone())? + write_media_label(&mut drive, label, pool.clone())? } Ok(()) @@ -1315,14 +1272,12 @@ pub fn catalog_media( let media_id = match drive.read_label()? { (Some(media_id), key_config) => { - task_log!( - worker, + info!( "found media label: {}", serde_json::to_string_pretty(&serde_json::to_value(&media_id)?)? ); if key_config.is_some() { - task_log!( - worker, + info!( "encryption key config: {}", serde_json::to_string_pretty(&serde_json::to_value(&key_config)?)? ); @@ -1336,7 +1291,7 @@ pub fn catalog_media( let (_media_set_lock, media_set_uuid) = match media_id.media_set_label { None => { - task_log!(worker, "media is empty"); + info!("media is empty"); let _pool_lock = if let Some(pool) = media_id.pool() { lock_media_pool(TAPE_STATUS_DIR, &pool)? } else { @@ -1349,7 +1304,7 @@ pub fn catalog_media( Some(ref set) => { if set.unassigned() { // media is empty - task_log!(worker, "media is empty"); + info!("media is empty"); let _lock = lock_unassigned_media_pool(TAPE_STATUS_DIR)?; MediaCatalog::destroy(TAPE_STATUS_DIR, &media_id.label.uuid)?; inventory.store(media_id.clone(), false)?; @@ -1374,14 +1329,14 @@ pub fn catalog_media( if !scan { let media_set = inventory.compute_media_set_members(media_set_uuid)?; - if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? { + if fast_catalog_restore(&mut drive, &media_set, &media_id.label.uuid)? { return Ok(()); } - task_log!(worker, "no catalog found"); + info!("no catalog found"); } - task_log!(worker, "scanning entire media to reconstruct catalog"); + info!("scanning entire media to reconstruct catalog"); drive.rewind()?; drive.read_label()?; // skip over labels - we already read them above @@ -1411,6 +1366,12 @@ pub fn catalog_media( schema: CHANGER_NAME_SCHEMA, optional: true, }, + "query-activity": { + type: bool, + description: "If true, queries and returns the drive activity for each drive.", + optional: true, + default: false, + }, }, }, returns: { @@ -1428,6 +1389,7 @@ pub fn catalog_media( /// List drives pub fn list_drives( changer: Option, + query_activity: bool, _param: Value, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { @@ -1454,10 +1416,16 @@ pub fn list_drives( let info = lookup_device_identification(<o_drives, &drive.path); let state = get_tape_device_state(&config, &drive.name)?; + let activity = if query_activity { + SgTape::device_activity(&drive).ok() + } else { + None + }; let entry = DriveListEntry { config: drive, info, state, + activity, }; list.push(entry); } diff --git a/src/api2/tape/media.rs b/src/api2/tape/media.rs index 07bed86a2..a7c8483ae 100644 --- a/src/api2/tape/media.rs +++ b/src/api2/tape/media.rs @@ -204,6 +204,7 @@ pub async fn list_media( media_set_uuid, media_set_name, seq_nr, + bytes_used: media.bytes_used(), }); } } @@ -232,6 +233,7 @@ pub async fn list_media( media_set_ctime: None, seq_nr: None, pool: None, + bytes_used: inventory.get_media_bytes_used(&media_id.label.uuid), }); } } @@ -242,7 +244,7 @@ pub async fn list_media( let media_id = inventory.lookup_media(uuid).unwrap(); if let Some(pool) = media_id.pool() { - if config.sections.get(&pool).is_some() { + if config.sections.contains_key(&pool) { continue; } @@ -279,6 +281,7 @@ pub async fn list_media( media_set_uuid, media_set_name, seq_nr, + bytes_used: inventory.get_media_bytes_used(&media_id.label.uuid), }); } diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 8273c867a..b28db6e39 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -6,36 +6,39 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use serde_json::Value; +use tracing::{info, warn}; use proxmox_human_byte::HumanByte; use proxmox_io::ReadExt; +use proxmox_rest_server::WorkerTask; use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType}; use proxmox_schema::{api, ApiType}; use proxmox_section_config::SectionConfigData; use proxmox_sys::fs::{replace_file, CreateOptions}; -use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; use proxmox_uuid::Uuid; +use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode, - Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, - DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, - PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, + NotificationMode, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, + DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, + TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; +use pbs_client::tools::handle_root_with_optional_format_version_prelude; use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, }; -use proxmox_rest_server::WorkerTask; use crate::backup::check_ns_modification_privs; +use crate::tape::TapeNotificationMode; use crate::{ - server::lookup_user_email, tape::{ drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver}, file_formats::{ @@ -289,6 +292,10 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE); type: Userid, optional: true, }, + "notification-mode": { + type: NotificationMode, + optional: true, + }, "snapshots": { description: "List of snapshots.", type: Array, @@ -322,6 +329,7 @@ pub fn restore( namespaces: Option>, media_set: String, notify_user: Option, + notification_mode: Option, snapshots: Option>, owner: Option, rpcenv: &mut dyn RpcEnvironment, @@ -329,6 +337,8 @@ pub fn restore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let user_info = CachedUserInfo::new()?; + let notification_mode = TapeNotificationMode::from((notify_user, notification_mode)); + let mut store_map = DataStoreMap::try_from(store) .map_err(|err| format_err!("cannot parse store mapping: {err}"))?; let namespaces = if let Some(maps) = namespaces { @@ -394,17 +404,12 @@ pub fn restore( let restore_owner = owner.as_ref().unwrap_or(&auth_id); - let email = notify_user - .as_ref() - .and_then(lookup_user_email) - .or_else(|| lookup_user_email(&auth_id.clone().into())); - - task_log!(worker, "Mediaset '{media_set}'"); - task_log!(worker, "Pool: {pool}"); + info!("Mediaset '{media_set}'"); + info!("Pool: {pool}"); let res = if snapshots.is_some() || namespaces { restore_list_worker( - worker.clone(), + worker, snapshots.unwrap_or_default(), inventory, media_set_uuid, @@ -412,28 +417,28 @@ pub fn restore( &drive, store_map, restore_owner, - email, + ¬ification_mode, user_info, &auth_id, ) } else { restore_full_worker( - worker.clone(), + worker, inventory, media_set_uuid, drive_config, &drive, store_map, restore_owner, - email, + ¬ification_mode, &auth_id, ) }; if res.is_ok() { - task_log!(worker, "Restore mediaset '{media_set}' done"); + info!("Restore mediaset '{media_set}' done"); } if let Err(err) = set_tape_device_state(&drive, "") { - task_log!(worker, "could not unset drive state for {drive}: {err}"); + info!("could not unset drive state for {drive}: {err}"); } res @@ -452,7 +457,7 @@ fn restore_full_worker( drive_name: &str, store_map: DataStoreMap, restore_owner: &Authid, - email: Option, + notification_mode: &TapeNotificationMode, auth_id: &Authid, ) -> Result<(), Error> { let members = inventory.compute_media_set_members(&media_set_uuid)?; @@ -475,7 +480,7 @@ fn restore_full_worker( if encryption_key_fingerprint.is_none() && set.encryption_key_fingerprint.is_some() { - encryption_key_fingerprint = set.encryption_key_fingerprint.clone(); + encryption_key_fingerprint.clone_from(&set.encryption_key_fingerprint); } } media_id_list.push(media_id); @@ -484,7 +489,7 @@ fn restore_full_worker( } if let Some(fingerprint) = encryption_key_fingerprint { - task_log!(worker, "Encryption key fingerprint: {fingerprint}"); + info!("Encryption key fingerprint: {fingerprint}"); } let used_datastores = store_map.used_datastores(); @@ -493,13 +498,9 @@ fn restore_full_worker( .map(|(t, _)| String::from(t.name())) .collect::>() .join(", "); - task_log!(worker, "Datastore(s): {datastore_list}",); - task_log!(worker, "Drive: {drive_name}"); - log_required_tapes( - &worker, - &inventory, - media_id_list.iter().map(|id| &id.label.uuid), - ); + info!("Datastore(s): {datastore_list}",); + info!("Drive: {drive_name}"); + log_required_tapes(&inventory, media_id_list.iter().map(|id| &id.label.uuid)); let mut datastore_locks = Vec::new(); for (target, _) in used_datastores.values() { @@ -519,7 +520,7 @@ fn restore_full_worker( &store_map, &mut checked_chunks_map, restore_owner, - &email, + notification_mode, auth_id, )?; } @@ -529,7 +530,6 @@ fn restore_full_worker( #[allow(clippy::too_many_arguments)] fn check_snapshot_restorable( - worker: &WorkerTask, store_map: &DataStoreMap, store: &str, snapshot: &str, @@ -570,7 +570,7 @@ fn check_snapshot_restorable( auth_id, Some(restore_owner), ) { - task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'"); + warn!("cannot restore {store}:{snapshot} to {ns}: '{err}'"); continue; } @@ -578,8 +578,7 @@ fn check_snapshot_restorable( if let Ok(owner) = datastore.get_owner(&ns, dir.as_ref()) { if restore_owner != &owner { // only the owner is allowed to create additional snapshots - task_warn!( - worker, + warn!( "restore of '{snapshot}' to {ns} failed, owner check failed ({restore_owner} \ != {owner})", ); @@ -590,10 +589,7 @@ fn check_snapshot_restorable( have_some_permissions = true; if datastore.snapshot_path(&ns, dir).exists() { - task_warn!( - worker, - "found snapshot {snapshot} on target datastore/namespace, skipping...", - ); + warn!("found snapshot {snapshot} on target datastore/namespace, skipping...",); continue; } can_restore_some = true; @@ -606,11 +602,7 @@ fn check_snapshot_restorable( Ok(can_restore_some) } -fn log_required_tapes<'a>( - worker: &WorkerTask, - inventory: &Inventory, - list: impl Iterator, -) { +fn log_required_tapes<'a>(inventory: &Inventory, list: impl Iterator) { let mut tape_list = list .map(|uuid| { inventory @@ -622,7 +614,7 @@ fn log_required_tapes<'a>( }) .collect::>(); tape_list.sort_unstable(); - task_log!(worker, "Required media list: {}", tape_list.join(";")); + info!("Required media list: {}", tape_list.join(";")); } #[allow(clippy::too_many_arguments)] @@ -635,7 +627,7 @@ fn restore_list_worker( drive_name: &str, store_map: DataStoreMap, restore_owner: &Authid, - email: Option, + notification_mode: &TapeNotificationMode, user_info: Arc, auth_id: &Authid, ) -> Result<(), Error> { @@ -654,14 +646,13 @@ fn restore_list_worker( let (ns, dir) = match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir), Err(err) => { - task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}"); + warn!("couldn't parse snapshot {snapshot} - {err}"); continue; } _ => continue, }; let snapshot = print_ns_and_snapshot(&ns, &dir); match check_snapshot_restorable( - &worker, &store_map, store, &snapshot, @@ -675,7 +666,7 @@ fn restore_list_worker( Ok(true) => restorable.push((store.to_string(), snapshot.to_string(), ns, dir)), Ok(false) => {} Err(err) => { - task_warn!(worker, "{err}"); + warn!("{err}"); skipped.push(format!("{store}:{snapshot}")); } } @@ -693,7 +684,6 @@ fn restore_list_worker( match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) => { match check_snapshot_restorable( - &worker, &store_map, store, snapshot, @@ -709,14 +699,14 @@ fn restore_list_worker( } Ok(false) => None, Err(err) => { - task_warn!(worker, "{err}"); + warn!("{err}"); skipped.push(format!("{store}:{snapshot}")); None } } } Err(err) => { - task_warn!(worker, "could not restore {store_snapshot}: {err}"); + warn!("could not restore {store_snapshot}: {err}"); skipped.push(store_snapshot); None } @@ -734,10 +724,7 @@ fn restore_list_worker( let media_id = inventory.lookup_media(media_uuid).unwrap(); (media_id, file_num) } else { - task_warn!( - worker, - "did not find snapshot '{store}:{snapshot}' in media set", - ); + warn!("did not find snapshot '{store}:{snapshot}' in media set",); skipped.push(format!("{store}:{snapshot}")); continue; }; @@ -750,26 +737,25 @@ fn restore_list_worker( .or_default(); file_list.push(file_num); - task_log!( - worker, + info!( "found snapshot {snapshot} on {}: file {file_num}", media_id.label.label_text, ); } if snapshot_file_hash.is_empty() { - task_log!(worker, "nothing to restore, skipping remaining phases..."); + info!("nothing to restore, skipping remaining phases..."); if !skipped.is_empty() { - task_log!(worker, "skipped the following snapshots:"); + info!("skipped the following snapshots:"); for snap in skipped { - task_log!(worker, " {snap}"); + info!(" {snap}"); } } return Ok(()); } - task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir"); - log_required_tapes(&worker, &inventory, snapshot_file_hash.keys()); + info!("Phase 1: temporarily restore snapshots to temp dir"); + log_required_tapes(&inventory, snapshot_file_hash.keys()); let mut datastore_chunk_map: HashMap> = HashMap::new(); let mut tmp_paths = Vec::new(); for (media_uuid, file_list) in snapshot_file_hash.iter_mut() { @@ -779,7 +765,7 @@ fn restore_list_worker( &drive_config, drive_name, &media_id.label, - &email, + notification_mode, )?; file_list.sort_unstable(); @@ -820,10 +806,10 @@ fn restore_list_worker( drop(catalog); if !media_file_chunk_map.is_empty() { - task_log!(worker, "Phase 2: restore chunks to datastores"); - log_required_tapes(&worker, &inventory, media_file_chunk_map.keys()); + info!("Phase 2: restore chunks to datastores"); + log_required_tapes(&inventory, media_file_chunk_map.keys()); } else { - task_log!(worker, "All chunks are already present, skip phase 2..."); + info!("All chunks are already present, skip phase 2..."); } for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { @@ -833,15 +819,12 @@ fn restore_list_worker( &drive_config, drive_name, &media_id.label, - &email, + notification_mode, )?; restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?; } - task_log!( - worker, - "Phase 3: copy snapshots from temp dir to datastores" - ); + info!("Phase 3: copy snapshots from temp dir to datastores"); let mut errors = false; for (source_datastore, snapshot, source_ns, backup_dir) in snapshots.into_iter() { if let Err(err) = proxmox_lang::try_block!({ @@ -898,20 +881,14 @@ fn restore_list_worker( Ok(()) }) { - task_warn!( - worker, - "could not restore {source_datastore}:{snapshot}: '{err}'" - ); + warn!("could not restore {source_datastore}:{snapshot}: '{err}'"); skipped.push(format!("{source_datastore}:{snapshot}")); } } - task_log!(worker, "Restore snapshot '{}' done", snapshot); + info!("Restore snapshot '{snapshot}' done"); Ok::<_, Error>(()) }) { - task_warn!( - worker, - "could not copy {source_datastore}:{snapshot}: {err}" - ); + warn!("could not copy {source_datastore}:{snapshot}: {err}"); errors = true; } } @@ -921,7 +898,7 @@ fn restore_list_worker( std::fs::remove_dir_all(&tmp_path) .map_err(|err| format_err!("remove_dir_all failed - {err}")) }) { - task_warn!(worker, "could not clean up temp dir {tmp_path:?}: {err}"); + warn!("could not clean up temp dir {tmp_path:?}: {err}"); errors = true; }; } @@ -930,19 +907,16 @@ fn restore_list_worker( bail!("errors during copy occurred"); } if !skipped.is_empty() { - task_log!(worker, "(partially) skipped the following snapshots:"); + info!("(partially) skipped the following snapshots:"); for snap in skipped { - task_log!(worker, " {snap}"); + info!(" {snap}"); } } Ok(()) }); if res.is_err() { - task_warn!( - worker, - "Error during restore, partially restored snapshots will NOT be cleaned up" - ); + warn!("Error during restore, partially restored snapshots will NOT be cleaned up"); } for (datastore, _) in store_map.used_datastores().values() { @@ -950,7 +924,7 @@ fn restore_list_worker( match std::fs::remove_dir_all(tmp_path) { Ok(()) => {} Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} - Err(err) => task_warn!(worker, "error cleaning up: {}", err), + Err(err) => warn!("error cleaning up: {err}"), } } @@ -1033,13 +1007,10 @@ fn restore_snapshots_to_tmpdir( for file_num in file_list { let current_file_number = drive.current_file_number()?; if current_file_number != *file_num { - task_log!( - worker, - "was at file {current_file_number}, moving to {file_num}" - ); + info!("was at file {current_file_number}, moving to {file_num}"); drive.move_to_file(*file_num)?; let current_file_number = drive.current_file_number()?; - task_log!(worker, "now at file {}", current_file_number); + info!("now at file {current_file_number}"); } let mut reader = drive.read_next_file()?; @@ -1061,20 +1032,15 @@ fn restore_snapshots_to_tmpdir( let source_datastore = archive_header.store; let snapshot = archive_header.snapshot; - task_log!( - worker, - "File {file_num}: snapshot archive {source_datastore}:{snapshot}", - ); + info!("File {file_num}: snapshot archive {source_datastore}:{snapshot}",); - let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; + let mut decoder = + pxar::decoder::sync::Decoder::from_std(pxar::PxarVariant::Unified(reader))?; let target_datastore = match store_map.target_store(&source_datastore) { Some(datastore) => datastore, None => { - task_warn!( - worker, - "could not find target datastore for {source_datastore}:{snapshot}", - ); + warn!("could not find target datastore for {source_datastore}:{snapshot}",); continue; } }; @@ -1095,7 +1061,7 @@ fn restore_snapshots_to_tmpdir( let mut archive_path = tmp_path.to_owned(); archive_path.push(&item.filename); - let index: Box = match archive_type(&item.filename)? { + let index: Box = match ArchiveType::from_path(&item.filename)? { ArchiveType::DynamicIndex => { Box::new(DynamicIndexReader::open(&archive_path)?) } @@ -1126,10 +1092,10 @@ fn restore_file_chunk_map( for (nr, chunk_map) in file_chunk_map.iter_mut() { let current_file_number = drive.current_file_number()?; if current_file_number != *nr { - task_log!(worker, "was at file {current_file_number}, moving to {nr}"); + info!("was at file {current_file_number}, moving to {nr}"); drive.move_to_file(*nr)?; let current_file_number = drive.current_file_number()?; - task_log!(worker, "now at file {}", current_file_number); + info!("now at file {current_file_number}"); } let mut reader = drive.read_next_file()?; let header: MediaContentHeader = unsafe { reader.read_le_value()? }; @@ -1146,10 +1112,7 @@ fn restore_file_chunk_map( let source_datastore = archive_header.store; - task_log!( - worker, - "File {nr}: chunk archive for datastore '{source_datastore}'", - ); + info!("File {nr}: chunk archive for datastore '{source_datastore}'",); let datastore = store_map.target_store(&source_datastore).ok_or_else(|| { format_err!("unexpected chunk archive for store: {source_datastore}") @@ -1161,7 +1124,7 @@ fn restore_file_chunk_map( datastore.clone(), chunk_map, )?; - task_log!(worker, "restored {count} chunks"); + info!("restored {count} chunks"); } _ => bail!("unexpected content magic {:?}", header.content_magic), } @@ -1221,8 +1184,7 @@ fn restore_partial_chunk_archive<'a>( let elapsed = start_time.elapsed()?.as_secs_f64(); let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64; - task_log!( - worker, + info!( "restored {} ({:.2}/s)", HumanByte::new_decimal(bytes), HumanByte::new_decimal(bytes / elapsed), @@ -1241,7 +1203,7 @@ pub fn request_and_restore_media( store_map: &DataStoreMap, checked_chunks_map: &mut HashMap>, restore_owner: &Authid, - email: &Option, + notification_mode: &TapeNotificationMode, auth_id: &Authid, ) -> Result<(), Error> { let media_set_uuid = match media_id.media_set_label { @@ -1249,8 +1211,13 @@ pub fn request_and_restore_media( Some(ref set) => &set.uuid, }; - let (mut drive, info) = - request_and_load_media(&worker, drive_config, drive_name, &media_id.label, email)?; + let (mut drive, info) = request_and_load_media( + &worker, + drive_config, + drive_name, + &media_id.label, + notification_mode, + )?; match info.media_set_label { None => { @@ -1301,15 +1268,11 @@ pub fn restore_media( let current_file_number = drive.current_file_number()?; let reader = match drive.read_next_file() { Err(BlockReadError::EndOfFile) => { - task_log!( - worker, - "skip unexpected filemark at pos {}", - current_file_number - ); + info!("skip unexpected filemark at pos {current_file_number}"); continue; } Err(BlockReadError::EndOfStream) => { - task_log!(worker, "detected EOT after {} files", current_file_number); + info!("detected EOT after {current_file_number} files"); break; } Err(BlockReadError::Error(err)) => { @@ -1373,13 +1336,7 @@ fn restore_archive<'a>( let datastore_name = archive_header.store; let snapshot = archive_header.snapshot; - task_log!( - worker, - "File {}: snapshot archive {}:{}", - current_file_number, - datastore_name, - snapshot - ); + info!("File {current_file_number}: snapshot archive {datastore_name}:{snapshot}"); let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?; @@ -1413,16 +1370,16 @@ fn restore_archive<'a>( path.push(rel_path); if is_new { - task_log!(worker, "restore snapshot {}", backup_dir); + info!("restore snapshot {backup_dir}"); - match restore_snapshot_archive(worker.clone(), reader, &path) { + match restore_snapshot_archive(worker, reader, &path) { Err(err) => { std::fs::remove_dir_all(&path)?; bail!("restore snapshot {} failed - {}", backup_dir, err); } Ok(false) => { std::fs::remove_dir_all(&path)?; - task_log!(worker, "skip incomplete snapshot {}", backup_dir); + info!("skip incomplete snapshot {backup_dir}"); } Ok(true) => { catalog.register_snapshot( @@ -1438,7 +1395,7 @@ fn restore_archive<'a>( return Ok(()); } } else { - task_log!(worker, "skipping..."); + info!("skipping..."); } } @@ -1465,12 +1422,7 @@ fn restore_archive<'a>( let source_datastore = archive_header.store; - task_log!( - worker, - "File {}: chunk archive for datastore '{}'", - current_file_number, - source_datastore - ); + info!("File {current_file_number}: chunk archive for datastore '{source_datastore}'"); let datastore = target .as_ref() .and_then(|t| t.0.target_store(&source_datastore)); @@ -1487,15 +1439,9 @@ fn restore_archive<'a>( .or_default(); let chunks = if let Some(datastore) = datastore { - restore_chunk_archive( - worker.clone(), - reader, - datastore, - checked_chunks, - verbose, - )? + restore_chunk_archive(worker, reader, datastore, checked_chunks, verbose)? } else { - scan_chunk_archive(worker.clone(), reader, verbose)? + scan_chunk_archive(worker, reader, verbose)? }; if let Some(chunks) = chunks { @@ -1505,12 +1451,12 @@ fn restore_archive<'a>( &source_datastore, &chunks[..], )?; - task_log!(worker, "register {} chunks", chunks.len()); + info!("register {} chunks", chunks.len()); catalog.commit_if_large()?; } return Ok(()); } else if target.is_some() { - task_log!(worker, "skipping..."); + info!("skipping..."); } reader.skip_data()?; // read all data @@ -1521,10 +1467,8 @@ fn restore_archive<'a>( let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data) .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?; - task_log!( - worker, - "File {}: skip catalog '{}'", - current_file_number, + info!( + "File {current_file_number}: skip catalog '{}'", archive_header.uuid ); @@ -1560,7 +1504,7 @@ fn scan_chunk_archive<'a>( // check if this is an aborted stream without end marker if let Ok(false) = reader.has_end_marker() { - task_log!(worker, "missing stream end marker"); + info!("missing stream end marker"); return Ok(None); } @@ -1572,7 +1516,7 @@ fn scan_chunk_archive<'a>( worker.check_abort()?; if verbose { - task_log!(worker, "Found chunk: {}", hex::encode(digest)); + info!("Found chunk: {}", hex::encode(digest)); } chunks.push(digest); @@ -1596,8 +1540,6 @@ fn restore_chunk_archive<'a>( let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0)); let bytes2 = bytes.clone(); - let worker2 = worker.clone(); - let writer_pool = ParallelHandler::new( "tape restore chunk writer", 4, @@ -1605,7 +1547,7 @@ fn restore_chunk_archive<'a>( let chunk_exists = datastore.cond_touch_chunk(&digest, false)?; if !chunk_exists { if verbose { - task_log!(worker2, "Insert chunk: {}", hex::encode(digest)); + info!("Insert chunk: {}", hex::encode(digest)); } bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst); // println!("verify and write {}", hex::encode(&digest)); @@ -1616,7 +1558,7 @@ fn restore_chunk_archive<'a>( datastore.insert_chunk(&chunk, &digest)?; } else if verbose { - task_log!(worker2, "Found existing chunk: {}", hex::encode(digest)); + info!("Found existing chunk: {}", hex::encode(digest)); } Ok(()) }, @@ -1638,7 +1580,7 @@ fn restore_chunk_archive<'a>( // check if this is an aborted stream without end marker if let Ok(false) = reader.has_end_marker() { - task_log!(worker, "missing stream end marker"); + info!("missing stream end marker"); return Ok(None); } @@ -1662,8 +1604,7 @@ fn restore_chunk_archive<'a>( let elapsed = start_time.elapsed()?.as_secs_f64(); let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64; - task_log!( - worker, + info!( "restored {} ({:.2}/s)", HumanByte::new_decimal(bytes), HumanByte::new_decimal(bytes / elapsed), @@ -1677,7 +1618,7 @@ fn restore_snapshot_archive<'a>( reader: Box, snapshot_path: &Path, ) -> Result { - let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?; + let mut decoder = pxar::decoder::sync::Decoder::from_std(pxar::PxarVariant::Unified(reader))?; match try_restore_snapshot_archive(worker, &mut decoder, snapshot_path) { Ok(_) => Ok(true), Err(err) => { @@ -1704,17 +1645,11 @@ fn try_restore_snapshot_archive( decoder: &mut pxar::decoder::sync::Decoder, snapshot_path: &Path, ) -> Result { - let _root = match decoder.next() { - None => bail!("missing root entry"), - Some(root) => { - let root = root?; - match root.kind() { - pxar::EntryKind::Directory => { /* Ok */ } - _ => bail!("wrong root entry type"), - } - root - } - }; + let (root, _) = handle_root_with_optional_format_version_prelude(decoder)?; + match root.kind() { + pxar::EntryKind::Directory => { /* Ok */ } + _ => bail!("wrong root entry type"), + } let root_path = Path::new("/"); let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); @@ -1744,7 +1679,7 @@ fn try_restore_snapshot_archive( } let filename = entry.file_name(); - let mut contents = match decoder.contents() { + let mut contents = match decoder.contents()? { None => bail!("missing file content"), Some(contents) => contents, }; @@ -1814,7 +1749,6 @@ fn try_restore_snapshot_archive( /// Try to restore media catalogs (form catalog_archives) pub fn fast_catalog_restore( - worker: &WorkerTask, drive: &mut Box, media_set: &MediaSet, uuid: &Uuid, // current media Uuid @@ -1835,14 +1769,11 @@ pub fn fast_catalog_restore( // limit reader scope let mut reader = match drive.read_next_file() { Err(BlockReadError::EndOfFile) => { - task_log!( - worker, - "skip unexpected filemark at pos {current_file_number}" - ); + info!("skip unexpected filemark at pos {current_file_number}"); continue; } Err(BlockReadError::EndOfStream) => { - task_log!(worker, "detected EOT after {current_file_number} files"); + info!("detected EOT after {current_file_number} files"); break; } Err(BlockReadError::Error(err)) => { @@ -1859,7 +1790,7 @@ pub fn fast_catalog_restore( if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 || header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1 { - task_log!(worker, "found catalog at pos {}", current_file_number); + info!("found catalog at pos {current_file_number}"); let header_data = reader.read_exact_allocated(header.size as usize)?; @@ -1869,11 +1800,7 @@ pub fn fast_catalog_restore( })?; if &archive_header.media_set_uuid != media_set.uuid() { - task_log!( - worker, - "skipping unrelated catalog at pos {}", - current_file_number - ); + info!("skipping unrelated catalog at pos {current_file_number}"); reader.skip_data()?; // read all data continue; } @@ -1886,11 +1813,7 @@ pub fn fast_catalog_restore( }); if !wanted { - task_log!( - worker, - "skip catalog because media '{}' not inventarized", - catalog_uuid - ); + info!("skip catalog because media '{catalog_uuid}' not inventarized"); reader.skip_data()?; // read all data continue; } @@ -1900,11 +1823,7 @@ pub fn fast_catalog_restore( } else { // only restore if catalog does not exist if MediaCatalog::exists(TAPE_STATUS_DIR, catalog_uuid) { - task_log!( - worker, - "catalog for media '{}' already exists", - catalog_uuid - ); + info!("catalog for media '{catalog_uuid}' already exists"); reader.skip_data()?; // read all data continue; } @@ -1920,19 +1839,11 @@ pub fn fast_catalog_restore( match MediaCatalog::parse_catalog_header(&mut file)? { (true, Some(media_uuid), Some(media_set_uuid)) => { if &media_uuid != catalog_uuid { - task_log!( - worker, - "catalog uuid mismatch at pos {}", - current_file_number - ); + info!("catalog uuid mismatch at pos {current_file_number}"); continue; } if media_set_uuid != archive_header.media_set_uuid { - task_log!( - worker, - "catalog media_set mismatch at pos {}", - current_file_number - ); + info!("catalog media_set mismatch at pos {current_file_number}"); continue; } @@ -1943,18 +1854,14 @@ pub fn fast_catalog_restore( )?; if catalog_uuid == uuid { - task_log!(worker, "successfully restored catalog"); + info!("successfully restored catalog"); found_catalog = true } else { - task_log!( - worker, - "successfully restored related catalog {}", - media_uuid - ); + info!("successfully restored related catalog {media_uuid}"); } } _ => { - task_warn!(worker, "got incomplete catalog header - skip file"); + warn!("got incomplete catalog header - skip file"); continue; } } @@ -1968,7 +1875,7 @@ pub fn fast_catalog_restore( } moved_to_eom = true; - task_log!(worker, "searching for catalog at EOT (moving to EOT)"); + info!("searching for catalog at EOT (moving to EOT)"); drive.move_to_last_file()?; let new_file_number = drive.current_file_number()?; diff --git a/src/auth.rs b/src/auth.rs index 04fb3a1d7..d0fb0a9f4 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -9,17 +9,20 @@ use std::pin::Pin; use anyhow::{bail, Error}; use futures::Future; use once_cell::sync::{Lazy, OnceCell}; +use pbs_config::open_backup_lockfile; use proxmox_router::http_bail; use serde_json::json; use proxmox_auth_api::api::{Authenticator, LockedTfaConfig}; use proxmox_auth_api::ticket::{Empty, Ticket}; use proxmox_auth_api::types::Authid; -use proxmox_auth_api::Keyring; +use proxmox_auth_api::{HMACKey, Keyring}; use proxmox_ldap::{Config, Connection, ConnectionMode}; use proxmox_tfa::api::{OpenUserChallengeData, TfaConfig}; -use pbs_api_types::{LdapMode, LdapRealmConfig, OpenIdRealmConfig, RealmRef, Userid, UsernameRef}; +use pbs_api_types::{ + AdRealmConfig, LdapMode, LdapRealmConfig, OpenIdRealmConfig, RealmRef, Userid, UsernameRef, +}; use pbs_buildcfg::configdir; use crate::auth_helpers; @@ -28,20 +31,33 @@ pub const TERM_PREFIX: &str = "PBSTERM"; struct PbsAuthenticator; -const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json"); +pub(crate) const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json"); +pub(crate) const SHADOW_LOCK_FILENAME: &str = configdir!("/shadow.json.lock"); impl Authenticator for PbsAuthenticator { fn authenticate_user<'a>( - &self, + &'a self, username: &'a UsernameRef, password: &'a str, - _client_ip: Option<&'a IpAddr>, + client_ip: Option<&'a IpAddr>, ) -> Pin> + Send + 'a>> { Box::pin(async move { let data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?; match data[username.as_str()].as_str() { None => bail!("no password set"), - Some(enc_password) => proxmox_sys::crypt::verify_crypt_pw(password, enc_password)?, + Some(enc_password) => { + proxmox_sys::crypt::verify_crypt_pw(password, enc_password)?; + + // if the password hash is not based on the current hashing function (as + // identified by its prefix), rehash the password. + if !enc_password.starts_with(proxmox_sys::crypt::HASH_PREFIX) { + // only log that we could not upgrade a password, we already know that the + // user has a valid password, no reason the deny to log in attempt. + if let Err(e) = self.store_password(username, password, client_ip) { + log::warn!("could not upgrade a users password! - {e}"); + } + } + } } Ok(()) }) @@ -54,6 +70,8 @@ impl Authenticator for PbsAuthenticator { _client_ip: Option<&IpAddr>, ) -> Result<(), Error> { let enc_password = proxmox_sys::crypt::encrypt_pw(password)?; + + let _guard = open_backup_lockfile(SHADOW_LOCK_FILENAME, None, true); let mut data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?; data[username.as_str()] = enc_password.into(); @@ -70,6 +88,8 @@ impl Authenticator for PbsAuthenticator { } fn remove_password(&self, username: &UsernameRef) -> Result<(), Error> { + let _guard = open_backup_lockfile(SHADOW_LOCK_FILENAME, None, true); + let mut data = proxmox_sys::fs::file_get_json(SHADOW_CONFIG_FILENAME, Some(json!({})))?; if let Some(map) = data.as_object_mut() { map.remove(username.as_str()); @@ -202,6 +222,80 @@ impl LdapAuthenticator { } } +pub struct AdAuthenticator { + config: AdRealmConfig, +} + +impl AdAuthenticator { + pub fn api_type_to_config(config: &AdRealmConfig) -> Result { + Self::api_type_to_config_with_password( + config, + auth_helpers::get_ldap_bind_password(&config.realm)?, + ) + } + + pub fn api_type_to_config_with_password( + config: &AdRealmConfig, + password: Option, + ) -> Result { + let mut servers = vec![config.server1.clone()]; + if let Some(server) = &config.server2 { + servers.push(server.clone()); + } + + let (ca_store, trusted_cert) = lookup_ca_store_or_cert_path(config.capath.as_deref()); + + Ok(Config { + servers, + port: config.port, + user_attr: "sAMAccountName".to_owned(), + base_dn: config.base_dn.clone().unwrap_or_default(), + bind_dn: config.bind_dn.clone(), + bind_password: password, + tls_mode: ldap_to_conn_mode(config.mode.unwrap_or_default()), + verify_certificate: config.verify.unwrap_or_default(), + additional_trusted_certificates: trusted_cert, + certificate_store_path: ca_store, + }) + } +} + +impl Authenticator for AdAuthenticator { + /// Authenticate user in AD realm + fn authenticate_user<'a>( + &'a self, + username: &'a UsernameRef, + password: &'a str, + _client_ip: Option<&'a IpAddr>, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { + let ldap_config = Self::api_type_to_config(&self.config)?; + let ldap = Connection::new(ldap_config); + ldap.authenticate_user(username.as_str(), password).await?; + Ok(()) + }) + } + + fn store_password( + &self, + _username: &UsernameRef, + _password: &str, + _client_ip: Option<&IpAddr>, + ) -> Result<(), Error> { + http_bail!( + NOT_IMPLEMENTED, + "storing passwords is not implemented for Active Directory realms" + ); + } + + fn remove_password(&self, _username: &UsernameRef) -> Result<(), Error> { + http_bail!( + NOT_IMPLEMENTED, + "removing passwords is not implemented for Active Directory realms" + ); + } +} + fn ldap_to_conn_mode(mode: LdapMode) -> ConnectionMode { match mode { LdapMode::Ldap => ConnectionMode::Ldap, @@ -234,6 +328,8 @@ pub(crate) fn lookup_authenticator( let (domains, _digest) = pbs_config::domains::config()?; if let Ok(config) = domains.lookup::("ldap", realm) { Ok(Box::new(LdapAuthenticator { config })) + } else if let Ok(config) = domains.lookup::("ad", realm) { + Ok(Box::new(AdAuthenticator { config })) } else if domains.lookup::("openid", realm).is_ok() { Ok(Box::new(OpenIdAuthenticator())) } else { @@ -258,9 +354,9 @@ pub(crate) fn authenticate_user<'a>( } static PRIVATE_KEYRING: Lazy = - Lazy::new(|| Keyring::with_private_key(crate::auth_helpers::private_auth_key().clone().into())); + Lazy::new(|| Keyring::with_private_key(crate::auth_helpers::private_auth_key().clone())); static PUBLIC_KEYRING: Lazy = - Lazy::new(|| Keyring::with_public_key(crate::auth_helpers::public_auth_key().clone().into())); + Lazy::new(|| Keyring::with_public_key(crate::auth_helpers::public_auth_key().clone())); static AUTH_CONTEXT: OnceCell = OnceCell::new(); pub fn setup_auth_context(use_private_key: bool) { @@ -273,7 +369,7 @@ pub fn setup_auth_context(use_private_key: bool) { AUTH_CONTEXT .set(PbsAuthContext { keyring, - csrf_secret: crate::auth_helpers::csrf_secret().to_vec(), + csrf_secret: crate::auth_helpers::csrf_secret(), }) .map_err(drop) .expect("auth context setup twice"); @@ -291,7 +387,7 @@ pub(crate) fn public_auth_keyring() -> &'static Keyring { struct PbsAuthContext { keyring: &'static Keyring, - csrf_secret: Vec, + csrf_secret: &'static HMACKey, } impl proxmox_auth_api::api::AuthContext for PbsAuthContext { @@ -327,14 +423,14 @@ impl proxmox_auth_api::api::AuthContext for PbsAuthContext { /// Access the TFA config with an exclusive lock. fn tfa_config_write_lock(&self) -> Result, Error> { Ok(Box::new(PbsLockedTfaConfig { - _lock: crate::config::tfa::read_lock()?, + _lock: crate::config::tfa::write_lock()?, config: crate::config::tfa::read()?, })) } /// CSRF prevention token secret data. - fn csrf_secret(&self) -> &[u8] { - &self.csrf_secret + fn csrf_secret(&self) -> &'static HMACKey { + self.csrf_secret } /// Verify a token secret. diff --git a/src/auth_helpers.rs b/src/auth_helpers.rs index c2eaaef10..cb745eebd 100644 --- a/src/auth_helpers.rs +++ b/src/auth_helpers.rs @@ -1,81 +1,17 @@ use std::path::PathBuf; +use std::sync::OnceLock; -use anyhow::{bail, format_err, Error}; -use lazy_static::lazy_static; -use openssl::pkey::{PKey, Private, Public}; -use openssl::rsa::Rsa; -use openssl::sha; +use anyhow::Error; use pbs_config::BackupLockGuard; -use proxmox_lang::try_block; +use proxmox_auth_api::{HMACKey, PrivateKey, PublicKey}; use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions}; -use pbs_api_types::Userid; use pbs_buildcfg::configdir; use serde_json::json; pub use crate::auth::setup_auth_context; - -fn compute_csrf_secret_digest(timestamp: i64, secret: &[u8], userid: &Userid) -> String { - let mut hasher = sha::Sha256::new(); - let data = format!("{:08X}:{}:", timestamp, userid); - hasher.update(data.as_bytes()); - hasher.update(secret); - - base64::encode_config(hasher.finish(), base64::STANDARD_NO_PAD) -} - -pub fn assemble_csrf_prevention_token(secret: &[u8], userid: &Userid) -> String { - let epoch = proxmox_time::epoch_i64(); - - let digest = compute_csrf_secret_digest(epoch, secret, userid); - - format!("{:08X}:{}", epoch, digest) -} - -pub fn verify_csrf_prevention_token( - secret: &[u8], - userid: &Userid, - token: &str, - min_age: i64, - max_age: i64, -) -> Result { - use std::collections::VecDeque; - - let mut parts: VecDeque<&str> = token.split(':').collect(); - - try_block!({ - if parts.len() != 2 { - bail!("format error - wrong number of parts."); - } - - let timestamp = parts.pop_front().unwrap(); - let sig = parts.pop_front().unwrap(); - - let ttime = i64::from_str_radix(timestamp, 16) - .map_err(|err| format_err!("timestamp format error - {}", err))?; - - let digest = compute_csrf_secret_digest(ttime, secret, userid); - - if digest != sig { - bail!("invalid signature."); - } - - let now = proxmox_time::epoch_i64(); - - let age = now - ttime; - if age < min_age { - bail!("timestamp newer than expected."); - } - - if age > max_age { - bail!("timestamp too old."); - } - - Ok(age) - }) - .map_err(|err| format_err!("invalid csrf token - {}", err)) -} +pub use proxmox_auth_api::api::assemble_csrf_prevention_token; pub fn generate_csrf_key() -> Result<(), Error> { let path = PathBuf::from(configdir!("/csrf.key")); @@ -84,17 +20,14 @@ pub fn generate_csrf_key() -> Result<(), Error> { return Ok(()); } - let rsa = Rsa::generate(2048).unwrap(); - - let pem = rsa.private_key_to_pem()?; + let key = HMACKey::generate()?.to_base64()?; use nix::sys::stat::Mode; - let backup_user = pbs_config::backup_user()?; replace_file( &path, - &pem, + key.as_bytes(), CreateOptions::new() .perm(Mode::from_bits_truncate(0o0640)) .owner(nix::unistd::ROOT) @@ -115,26 +48,22 @@ pub fn generate_auth_key() -> Result<(), Error> { return Ok(()); } - let rsa = Rsa::generate(4096).unwrap(); - - let priv_pem = rsa.private_key_to_pem()?; + let key = proxmox_auth_api::PrivateKey::generate_ec()?; use nix::sys::stat::Mode; replace_file( &priv_path, - &priv_pem, + &key.private_key_to_pem()?, CreateOptions::new().perm(Mode::from_bits_truncate(0o0600)), true, )?; - let public_pem = rsa.public_key_to_pem()?; - let backup_user = pbs_config::backup_user()?; replace_file( &public_path, - &public_pem, + &key.public_key_to_pem()?, CreateOptions::new() .perm(Mode::from_bits_truncate(0o0640)) .owner(nix::unistd::ROOT) @@ -145,44 +74,39 @@ pub fn generate_auth_key() -> Result<(), Error> { Ok(()) } -pub fn csrf_secret() -> &'static [u8] { - lazy_static! { - static ref SECRET: Vec = file_get_contents(configdir!("/csrf.key")).unwrap(); - } +pub fn csrf_secret() -> &'static HMACKey { + static SECRET: OnceLock = OnceLock::new(); - &SECRET + SECRET.get_or_init(|| { + let bytes = file_get_contents(configdir!("/csrf.key")).unwrap(); + std::str::from_utf8(&bytes) + .map_err(anyhow::Error::new) + .and_then(HMACKey::from_base64) + // legacy fall back to load legacy csrf secrets + // TODO: remove once we move away from legacy token verification + .unwrap_or_else(|_| { + let key_as_b64 = base64::encode_config(bytes, base64::STANDARD_NO_PAD); + HMACKey::from_base64(&key_as_b64).unwrap() + }) + }) } -fn load_public_auth_key() -> Result, Error> { - let pem = file_get_contents(configdir!("/authkey.pub"))?; - let rsa = Rsa::public_key_from_pem(&pem)?; - let key = PKey::from_rsa(rsa)?; +pub fn public_auth_key() -> &'static PublicKey { + static KEY: OnceLock = OnceLock::new(); - Ok(key) + KEY.get_or_init(|| { + let pem = file_get_contents(configdir!("/authkey.pub")).unwrap(); + PublicKey::from_pem(&pem).unwrap() + }) } -pub fn public_auth_key() -> &'static PKey { - lazy_static! { - static ref KEY: PKey = load_public_auth_key().unwrap(); - } +pub fn private_auth_key() -> &'static PrivateKey { + static KEY: OnceLock = OnceLock::new(); - &KEY -} - -fn load_private_auth_key() -> Result, Error> { - let pem = file_get_contents(configdir!("/authkey.key"))?; - let rsa = Rsa::private_key_from_pem(&pem)?; - let key = PKey::from_rsa(rsa)?; - - Ok(key) -} - -pub fn private_auth_key() -> &'static PKey { - lazy_static! { - static ref KEY: PKey = load_private_auth_key().unwrap(); - } - - &KEY + KEY.get_or_init(|| { + let pem = file_get_contents(configdir!("/authkey.key")).unwrap(); + PrivateKey::from_pem(&pem).unwrap() + }) } const LDAP_PASSWORDS_FILENAME: &str = configdir!("/ldap_passwords.json"); diff --git a/src/backup/verify.rs b/src/backup/verify.rs index c972e5328..6ef7e8eb3 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -1,12 +1,14 @@ -use nix::dir::Dir; use std::collections::HashSet; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use std::time::Instant; use anyhow::{bail, format_err, Error}; +use nix::dir::Dir; +use tracing::{error, info}; -use proxmox_sys::{task_log, WorkerTaskContext}; +use proxmox_sys::fs::lock_dir_noblock_shared; +use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode, @@ -14,9 +16,8 @@ use pbs_api_types::{ }; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo}; +use pbs_datastore::manifest::{ArchiveType, BackupManifest, FileInfo}; use pbs_datastore::{DataBlob, DataStore, StoreProgress}; -use proxmox_sys::fs::lock_dir_noblock_shared; use crate::tools::parallel_handler::ParallelHandler; @@ -69,11 +70,7 @@ fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> { } } -fn rename_corrupted_chunk( - datastore: Arc, - digest: &[u8; 32], - worker: &dyn WorkerTaskContext, -) { +fn rename_corrupted_chunk(datastore: Arc, digest: &[u8; 32]) { let (path, digest_str) = datastore.chunk_path(digest); let mut counter = 0; @@ -89,17 +86,12 @@ fn rename_corrupted_chunk( match std::fs::rename(&path, &new_path) { Ok(_) => { - task_log!(worker, "corrupted chunk renamed to {:?}", &new_path); + info!("corrupted chunk renamed to {:?}", &new_path); } Err(err) => { match err.kind() { std::io::ErrorKind::NotFound => { /* ignored */ } - _ => task_log!( - worker, - "could not rename corrupted chunk {:?} - {}", - &path, - err - ), + _ => info!("could not rename corrupted chunk {:?} - {err}", &path), } } }; @@ -117,7 +109,6 @@ fn verify_index_chunks( let mut read_bytes = 0; let mut decoded_bytes = 0; - let worker2 = Arc::clone(&verify_worker.worker); let datastore2 = Arc::clone(&verify_worker.datastore); let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks); let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks); @@ -130,7 +121,7 @@ fn verify_index_chunks( let chunk_crypt_mode = match chunk.crypt_mode() { Err(err) => { corrupt_chunks2.lock().unwrap().insert(digest); - task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err); + info!("can't verify chunk, unknown CryptMode - {err}"); errors2.fetch_add(1, Ordering::SeqCst); return Ok(()); } @@ -138,20 +129,17 @@ fn verify_index_chunks( }; if chunk_crypt_mode != crypt_mode { - task_log!( - worker2, - "chunk CryptMode {:?} does not match index CryptMode {:?}", - chunk_crypt_mode, - crypt_mode + info!( + "chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}" ); errors2.fetch_add(1, Ordering::SeqCst); } if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) { corrupt_chunks2.lock().unwrap().insert(digest); - task_log!(worker2, "{}", err); + info!("{err}"); errors2.fetch_add(1, Ordering::SeqCst); - rename_corrupted_chunk(datastore2.clone(), &digest, &worker2); + rename_corrupted_chunk(datastore2.clone(), &digest); } else { verified_chunks2.lock().unwrap().insert(digest); } @@ -175,11 +163,7 @@ fn verify_index_chunks( .contains(digest) { let digest_str = hex::encode(digest); - task_log!( - verify_worker.worker, - "chunk {} was marked as corrupt", - digest_str - ); + info!("chunk {digest_str} was marked as corrupt"); errors.fetch_add(1, Ordering::SeqCst); true } else { @@ -218,17 +202,9 @@ fn verify_index_chunks( .lock() .unwrap() .insert(info.digest); - task_log!( - verify_worker.worker, - "can't verify chunk, load failed - {}", - err - ); + error!("can't verify chunk, load failed - {err}"); errors.fetch_add(1, Ordering::SeqCst); - rename_corrupted_chunk( - verify_worker.datastore.clone(), - &info.digest, - &verify_worker.worker, - ); + rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest); } Ok(chunk) => { let size = info.size(); @@ -251,15 +227,8 @@ fn verify_index_chunks( let error_count = errors.load(Ordering::SeqCst); - task_log!( - verify_worker.worker, - " verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)", - read_bytes_mib, - decoded_bytes_mib, - elapsed, - read_speed, - decode_speed, - error_count, + info!( + " verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)" ); if errors.load(Ordering::SeqCst) > 0 { @@ -329,8 +298,7 @@ pub fn verify_backup_dir( filter: Option<&dyn Fn(&BackupManifest) -> bool>, ) -> Result { if !backup_dir.full_path().exists() { - task_log!( - verify_worker.worker, + info!( "SKIPPED: verify {}:{} - snapshot does not exist (anymore).", verify_worker.datastore.name(), backup_dir.dir(), @@ -348,8 +316,7 @@ pub fn verify_backup_dir( verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock) } Err(err) => { - task_log!( - verify_worker.worker, + info!( "SKIPPED: verify {}:{} - could not acquire snapshot lock: {}", verify_worker.datastore.name(), backup_dir.dir(), @@ -371,8 +338,7 @@ pub fn verify_backup_dir_with_lock( let manifest = match backup_dir.load_manifest() { Ok((manifest, _)) => manifest, Err(err) => { - task_log!( - verify_worker.worker, + info!( "verify {}:{} - manifest load error: {}", verify_worker.datastore.name(), backup_dir.dir(), @@ -384,8 +350,7 @@ pub fn verify_backup_dir_with_lock( if let Some(filter) = filter { if !filter(&manifest) { - task_log!( - verify_worker.worker, + info!( "SKIPPED: verify {}:{} (recently verified)", verify_worker.datastore.name(), backup_dir.dir(), @@ -394,8 +359,7 @@ pub fn verify_backup_dir_with_lock( } } - task_log!( - verify_worker.worker, + info!( "verify {}:{}", verify_worker.datastore.name(), backup_dir.dir() @@ -406,8 +370,8 @@ pub fn verify_backup_dir_with_lock( let mut verify_result = VerifyState::Ok; for info in manifest.files() { let result = proxmox_lang::try_block!({ - task_log!(verify_worker.worker, " check {}", info.filename); - match archive_type(&info.filename)? { + info!(" check {}", info.filename); + match ArchiveType::from_path(&info.filename)? { ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info), ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info), ArchiveType::Blob => verify_blob(backup_dir, info), @@ -418,8 +382,7 @@ pub fn verify_backup_dir_with_lock( verify_worker.worker.fail_on_shutdown()?; if let Err(err) = result { - task_log!( - verify_worker.worker, + info!( "verify {}:{}/{} failed: {}", verify_worker.datastore.name(), backup_dir.dir(), @@ -463,8 +426,7 @@ pub fn verify_backup_group( let mut list = match group.list_backups() { Ok(list) => list, Err(err) => { - task_log!( - verify_worker.worker, + info!( "verify {}, group {} - unable to list backups: {}", print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()), group.group(), @@ -475,8 +437,7 @@ pub fn verify_backup_group( }; let snapshot_count = list.len(); - task_log!( - verify_worker.worker, + info!( "verify group {}:{} ({} snapshots)", verify_worker.datastore.name(), group.group(), @@ -494,9 +455,8 @@ pub fn verify_backup_group( )); } progress.done_snapshots = pos as u64 + 1; - task_log!(verify_worker.worker, "percentage done: {}", progress); + info!("percentage done: {progress}"); } - Ok(errors) } @@ -516,16 +476,11 @@ pub fn verify_all_backups( filter: Option<&dyn Fn(&BackupManifest) -> bool>, ) -> Result, Error> { let mut errors = Vec::new(); - let worker = Arc::clone(&verify_worker.worker); - task_log!( - worker, - "verify datastore {}", - verify_worker.datastore.name() - ); + info!("verify datastore {}", verify_worker.datastore.name()); let owner_filtered = if let Some(owner) = &owner { - task_log!(worker, "limiting to backups owned by {}", owner); + info!("limiting to backups owned by {owner}"); true } else { false @@ -553,7 +508,7 @@ pub fn verify_all_backups( } Err(err) => { // we don't filter by owner, but we want to log the error - task_log!(worker, "error on iterating groups in ns '{ns}' - {err}"); + info!("error on iterating groups in ns '{ns}' - {err}"); errors.push(err.to_string()); None } @@ -563,7 +518,7 @@ pub fn verify_all_backups( }) .collect::>(), Err(err) => { - task_log!(worker, "unable to list backups: {}", err,); + info!("unable to list backups: {err}"); return Ok(errors); } }; @@ -571,7 +526,7 @@ pub fn verify_all_backups( list.sort_unstable_by(|a, b| a.group().cmp(b.group())); let group_count = list.len(); - task_log!(worker, "found {} groups", group_count); + info!("found {group_count} groups"); let mut progress = StoreProgress::new(group_count as u64); diff --git a/src/bin/docgen.rs b/src/bin/docgen.rs index 7ee991a0c..66d71423a 100644 --- a/src/bin/docgen.rs +++ b/src/bin/docgen.rs @@ -31,6 +31,10 @@ fn main() -> Result<(), Error> { "apidata.js" => generate_api_tree(), "datastore.cfg" => dump_section_config(&pbs_config::datastore::CONFIG), "domains.cfg" => dump_section_config(&pbs_config::domains::CONFIG), + "notifications.cfg" => dump_section_config(proxmox_notify::config::config_parser()), + "notifications-priv.cfg" => { + dump_section_config(proxmox_notify::config::private_config_parser()) + } "tape.cfg" => dump_section_config(&pbs_config::drive::CONFIG), "tape-job.cfg" => dump_section_config(&pbs_config::tape_job::CONFIG), "user.cfg" => dump_section_config(&pbs_config::user::CONFIG), diff --git a/src/bin/pbs2to3.rs b/src/bin/pbs2to3.rs index 3b3714f6d..1f895abd0 100644 --- a/src/bin/pbs2to3.rs +++ b/src/bin/pbs2to3.rs @@ -5,7 +5,8 @@ use anyhow::{format_err, Error}; use regex::Regex; use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor}; -use proxmox_apt::repositories::{self, APTRepositoryFile, APTRepositoryPackageType}; +use proxmox_apt::repositories; +use proxmox_apt_api_types::{APTRepositoryFile, APTRepositoryPackageType}; use proxmox_backup::api2::node::apt; const OLD_SUITE: &str = "bullseye"; @@ -50,19 +51,18 @@ impl Checker { fn check_upgradable_packages(&mut self) -> Result<(), Error> { self.output.log_info("Checking for package updates..")?; - let result = Self::get_upgradable_packages(); + let result = apt::apt_update_available(); match result { Err(err) => { self.output.log_warn(format!("{err}"))?; self.output .log_fail("unable to retrieve list of package updates!")?; } - Ok(cache) => { - if cache.package_status.is_empty() { + Ok(package_status) => { + if package_status.is_empty() { self.output.log_pass("all packages up-to-date")?; } else { - let pkgs = cache - .package_status + let pkgs = package_status .iter() .map(|pkg| pkg.package.clone()) .collect::>() @@ -452,20 +452,6 @@ impl Checker { } Ok(()) } - - fn get_upgradable_packages() -> Result { - let cache = if let Ok(false) = proxmox_backup::tools::apt::pkg_cache_expired() { - if let Ok(Some(cache)) = proxmox_backup::tools::apt::read_pkg_state() { - cache - } else { - proxmox_backup::tools::apt::update_cache()? - } - } else { - proxmox_backup::tools::apt::update_cache()? - }; - - Ok(cache) - } } #[derive(PartialEq)] diff --git a/src/bin/proxmox-backup-api.rs b/src/bin/proxmox-backup-api.rs index e46557a0f..7a72d49a4 100644 --- a/src/bin/proxmox-backup-api.rs +++ b/src/bin/proxmox-backup-api.rs @@ -1,17 +1,18 @@ use std::future::Future; -use std::pin::Pin; +use std::pin::{pin, Pin}; use anyhow::{bail, Error}; use futures::*; use http::Response; use hyper::{Body, StatusCode}; +use tracing::level_filters::LevelFilter; use proxmox_lang::try_block; +use proxmox_log::init_logger; +use proxmox_rest_server::{ApiConfig, RestServer}; use proxmox_router::RpcEnvironmentType; use proxmox_sys::fs::CreateOptions; -use proxmox_rest_server::{daemon, ApiConfig, RestServer}; - use proxmox_backup::auth_helpers::*; use proxmox_backup::config; use proxmox_backup::server::auth::check_pbs_auth; @@ -40,13 +41,7 @@ fn get_index() -> Pin> + Send>> { } async fn run() -> Result<(), Error> { - if let Err(err) = syslog::init( - syslog::Facility::LOG_DAEMON, - log::LevelFilter::Info, - Some("proxmox-backup-api"), - ) { - bail!("unable to inititialize syslog - {}", err); - } + init_logger("PBS_LOG", LevelFilter::INFO)?; config::create_configdir()?; @@ -56,6 +51,7 @@ async fn run() -> Result<(), Error> { proxmox_backup::server::create_state_dir()?; proxmox_backup::server::create_active_operations_dir()?; proxmox_backup::server::jobstate::create_jobstate_dir()?; + proxmox_backup::server::notifications::create_spool_dir()?; proxmox_backup::tape::create_tape_status_dir()?; proxmox_backup::tape::create_drive_state_dir()?; proxmox_backup::tape::create_changer_state_dir()?; @@ -72,12 +68,10 @@ async fn run() -> Result<(), Error> { let _ = csrf_secret(); // load with lazy_static proxmox_backup::auth_helpers::setup_auth_context(true); + proxmox_backup::server::notifications::init()?; let backup_user = pbs_config::backup_user()?; - let mut command_sock = proxmox_rest_server::CommandSocket::new( - proxmox_rest_server::our_ctrl_sock(), - backup_user.gid, - ); + let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid); let dir_opts = CreateOptions::new() .owner(backup_user.uid) @@ -110,17 +104,17 @@ async fn run() -> Result<(), Error> { )?; // http server future: - let server = daemon::create_daemon( + let server = proxmox_daemon::server::create_daemon( ([127, 0, 0, 1], 82).into(), move |listener| { let incoming = hyper::server::conn::AddrIncoming::from_listener(listener)?; Ok(async { - daemon::systemd_notify(daemon::SystemdNotify::Ready)?; + proxmox_systemd::notify::SystemdNotify::Ready.notify()?; hyper::Server::builder(incoming) .serve(rest_server) - .with_graceful_shutdown(proxmox_rest_server::shutdown_future()) + .with_graceful_shutdown(proxmox_daemon::shutdown_future()) .map_err(Error::from) .await }) @@ -132,9 +126,9 @@ async fn run() -> Result<(), Error> { let init_result: Result<(), Error> = try_block!({ proxmox_rest_server::register_task_control_commands(&mut command_sock)?; - command_sock.spawn()?; - proxmox_rest_server::catch_shutdown_signal()?; - proxmox_rest_server::catch_reload_signal()?; + command_sock.spawn(proxmox_rest_server::last_worker_future())?; + proxmox_daemon::catch_shutdown_signal(proxmox_rest_server::last_worker_future())?; + proxmox_daemon::catch_reload_signal(proxmox_rest_server::last_worker_future())?; Ok(()) }); @@ -153,11 +147,23 @@ async fn run() -> Result<(), Error> { std::thread::sleep(std::time::Duration::from_secs(3)); }); + start_notification_worker(); + server.await?; log::info!("server shutting down, waiting for active workers to complete"); - proxmox_rest_server::last_worker_future().await?; + proxmox_rest_server::last_worker_future().await; log::info!("done - exit server"); Ok(()) } + +fn start_notification_worker() { + let future = proxmox_backup::server::notifications::notification_worker(); + let abort_future = proxmox_daemon::shutdown_future(); + tokio::spawn(async move { + let future = pin!(future); + let abort_future = pin!(abort_future); + futures::future::select(future, abort_future).await; + }); +} diff --git a/src/bin/proxmox-backup-debug.rs b/src/bin/proxmox-backup-debug.rs index a3589c167..35ad11c70 100644 --- a/src/bin/proxmox-backup-debug.rs +++ b/src/bin/proxmox-backup-debug.rs @@ -1,5 +1,6 @@ +use proxmox_log::init_cli_logger; use proxmox_router::{ - cli::{init_cli_logger, run_cli_command, CliCommandMap, CliEnvironment}, + cli::{run_cli_command, CliCommandMap, CliEnvironment}, RpcEnvironment, }; @@ -7,7 +8,7 @@ mod proxmox_backup_debug; use proxmox_backup_debug::*; fn main() { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger"); let cmd_def = CliCommandMap::new() .insert("inspect", inspect::inspect_commands()) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index e5142174b..08d327d3c 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -3,6 +3,7 @@ use std::io::{self, Write}; use std::str::FromStr; use anyhow::{format_err, Error}; +use proxmox_log::init_cli_logger; use serde_json::{json, Value}; use proxmox_router::{cli::*, RpcEnvironment}; @@ -93,6 +94,64 @@ async fn garbage_collection_status(param: Value) -> Result { Ok(Value::Null) } +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List garbage collection job status for all datastores, including datastores without gc jobs. +async fn garbage_collection_list_jobs(param: Value) -> Result { + let output_format = get_output_format(¶m); + + let client = connect_to_localhost()?; + + let path = "api2/json/admin/gc"; + + let mut result = client.get(path, None).await?; + let mut data = result["data"].take(); + let return_type = &api2::admin::gc::API_METHOD_LIST_ALL_GC_JOBS.returns; + + use pbs_tools::format::{render_bytes_human_readable, render_duration, render_epoch}; + let options = default_table_format_options() + .column(ColumnConfig::new("store")) + .column( + ColumnConfig::new("last-run-endtime") + .right_align(false) + .renderer(render_epoch), + ) + .column( + ColumnConfig::new("duration") + .right_align(false) + .renderer(render_duration), + ) + .column( + ColumnConfig::new("removed-bytes") + .right_align(false) + .renderer(render_bytes_human_readable), + ) + .column( + ColumnConfig::new("pending-bytes") + .right_align(false) + .renderer(render_bytes_human_readable), + ) + .column(ColumnConfig::new("last-run-state")) + .column(ColumnConfig::new("schedule")) + .column( + ColumnConfig::new("next-run") + .right_align(false) + .renderer(render_epoch), + ); + + format_and_print_result_full(&mut data, return_type, &output_format, &options); + + Ok(Value::Null) +} + fn garbage_collection_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert( @@ -106,6 +165,10 @@ fn garbage_collection_commands() -> CommandLineInterface { CliCommand::new(&API_METHOD_START_GARBAGE_COLLECTION) .arg_param(&["store"]) .completion_cb("store", pbs_config::datastore::complete_datastore_name), + ) + .insert( + "list", + CliCommand::new(&API_METHOD_GARBAGE_COLLECTION_LIST_JOBS), ); cmd_def.into() @@ -430,7 +493,8 @@ async fn get_versions(verbose: bool, param: Value) -> Result { } async fn run() -> Result<(), Error> { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?; + proxmox_backup::server::notifications::init()?; let cmd_def = CliCommandMap::new() .insert("acl", acl_commands()) @@ -438,8 +502,10 @@ async fn run() -> Result<(), Error> { .insert("disk", disk_commands()) .insert("dns", dns_commands()) .insert("ldap", ldap_commands()) + .insert("ad", ad_commands()) .insert("network", network_commands()) .insert("node", node_commands()) + .insert("notification", notification_commands()) .insert("user", user_commands()) .insert("openid", openid_commands()) .insert("remote", remote_commands()) @@ -489,12 +555,9 @@ async fn run() -> Result<(), Error> { file_opts, )?; - let mut command_sock = proxmox_rest_server::CommandSocket::new( - proxmox_rest_server::our_ctrl_sock(), - backup_user.gid, - ); + let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid); proxmox_rest_server::register_task_control_commands(&mut command_sock)?; - command_sock.spawn()?; + command_sock.spawn(proxmox_rest_server::last_worker_future())?; } let mut rpcenv = CliEnvironment::new(); diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index f79ec2f52..859f5b0f8 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -1,4 +1,5 @@ use std::path::{Path, PathBuf}; +use std::pin::pin; use std::sync::{Arc, Mutex}; use anyhow::{bail, format_err, Context, Error}; @@ -7,18 +8,18 @@ use http::request::Parts; use http::Response; use hyper::header; use hyper::{Body, StatusCode}; +use tracing::level_filters::LevelFilter; +use tracing::{info, warn}; use url::form_urlencoded; use openssl::ssl::SslAcceptor; use serde_json::{json, Value}; use proxmox_lang::try_block; -use proxmox_metrics::MetricsData; +use proxmox_log::init_logger; use proxmox_router::{RpcEnvironment, RpcEnvironmentType}; -use proxmox_sys::fs::{CreateOptions, FileSystemInformation}; -use proxmox_sys::linux::procfs::{Loadavg, ProcFsMemInfo, ProcFsNetDev, ProcFsStat}; +use proxmox_sys::fs::CreateOptions; use proxmox_sys::logrotate::LogRotate; -use proxmox_sys::{task_log, task_warn}; use pbs_datastore::DataStore; @@ -27,15 +28,11 @@ use proxmox_rest_server::{ RestEnvironment, RestServer, WorkerTask, }; -use proxmox_backup::rrd_cache::{ - initialize_rrd_cache, rrd_sync_journal, rrd_update_derive, rrd_update_gauge, -}; use proxmox_backup::{ server::{ auth::check_pbs_auth, jobstate::{self, Job}, }, - tools::disks::BlockDevStat, traffic_control_cache::{SharedRateLimit, TRAFFIC_CONTROL_CACHE}, }; @@ -47,14 +44,9 @@ use pbs_api_types::{ VerificationJobConfig, }; -use proxmox_rest_server::daemon; - use proxmox_backup::auth_helpers::*; -use proxmox_backup::server; -use proxmox_backup::tools::{ - disks::{zfs_dataset_stats, DiskManage}, - PROXMOX_BACKUP_TCP_KEEPALIVE_TIME, -}; +use proxmox_backup::server::{self, metric_collection}; +use proxmox_backup::tools::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME; use proxmox_backup::api2::pull::do_sync_job; use proxmox_backup::api2::tape::backup::do_tape_backup_job; @@ -181,26 +173,11 @@ async fn get_index_future(env: RestEnvironment, parts: Parts) -> Response } async fn run() -> Result<(), Error> { - // Note: To debug early connection error use - // PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy - let debug = std::env::var("PROXMOX_DEBUG").is_ok(); - - if let Err(err) = syslog::init( - syslog::Facility::LOG_DAEMON, - if debug { - log::LevelFilter::Debug - } else { - log::LevelFilter::Info - }, - Some("proxmox-backup-proxy"), - ) { - bail!("unable to inititialize syslog - {err}"); - } + init_logger("PBS_LOG", LevelFilter::INFO)?; proxmox_backup::auth_helpers::setup_auth_context(false); - - let rrd_cache = initialize_rrd_cache()?; - rrd_cache.apply_journal()?; + proxmox_backup::server::notifications::init()?; + metric_collection::init()?; let mut indexpath = PathBuf::from(pbs_buildcfg::JS_DIR); indexpath.push("index.hbs"); @@ -226,10 +203,7 @@ async fn run() -> Result<(), Error> { ]); let backup_user = pbs_config::backup_user()?; - let mut command_sock = proxmox_rest_server::CommandSocket::new( - proxmox_rest_server::our_ctrl_sock(), - backup_user.gid, - ); + let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid); let dir_opts = CreateOptions::new() .owner(backup_user.uid) @@ -300,27 +274,27 @@ async fn run() -> Result<(), Error> { })?; let connections = proxmox_rest_server::connection::AcceptBuilder::new() - .debug(debug) + .debug(tracing::enabled!(tracing::Level::DEBUG)) .rate_limiter_lookup(Arc::new(lookup_rate_limiter)) .tcp_keepalive_time(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME); - let server = daemon::create_daemon( + let server = proxmox_daemon::server::create_daemon( ([0, 0, 0, 0, 0, 0, 0, 0], 8007).into(), move |listener| { let (secure_connections, insecure_connections) = connections.accept_tls_optional(listener, acceptor); Ok(async { - daemon::systemd_notify(daemon::SystemdNotify::Ready)?; + proxmox_systemd::notify::SystemdNotify::Ready.notify()?; let secure_server = hyper::Server::builder(secure_connections) .serve(rest_server) - .with_graceful_shutdown(proxmox_rest_server::shutdown_future()) + .with_graceful_shutdown(proxmox_daemon::shutdown_future()) .map_err(Error::from); let insecure_server = hyper::Server::builder(insecure_connections) .serve(redirector) - .with_graceful_shutdown(proxmox_rest_server::shutdown_future()) + .with_graceful_shutdown(proxmox_daemon::shutdown_future()) .map_err(Error::from); let (secure_res, insecure_res) = @@ -349,9 +323,9 @@ async fn run() -> Result<(), Error> { let init_result: Result<(), Error> = try_block!({ proxmox_rest_server::register_task_control_commands(&mut command_sock)?; - command_sock.spawn()?; - proxmox_rest_server::catch_shutdown_signal()?; - proxmox_rest_server::catch_reload_signal()?; + command_sock.spawn(proxmox_rest_server::last_worker_future())?; + proxmox_daemon::catch_shutdown_signal(proxmox_rest_server::last_worker_future())?; + proxmox_daemon::catch_reload_signal(proxmox_rest_server::last_worker_future())?; Ok(()) }); @@ -371,12 +345,12 @@ async fn run() -> Result<(), Error> { }); start_task_scheduler(); - start_stat_generator(); + metric_collection::start_collection_task(); start_traffic_control_updater(); server.await?; log::info!("server shutting down, waiting for active workers to complete"); - proxmox_rest_server::last_worker_future().await?; + proxmox_rest_server::last_worker_future().await; log::info!("done - exit server"); Ok(()) @@ -404,25 +378,20 @@ fn make_tls_acceptor() -> Result { acceptor.build() } -fn start_stat_generator() { - let abort_future = proxmox_rest_server::shutdown_future(); - let future = Box::pin(run_stat_generator()); - let task = futures::future::select(future, abort_future); - tokio::spawn(task.map(|_| ())); -} - fn start_task_scheduler() { - let abort_future = proxmox_rest_server::shutdown_future(); - let future = Box::pin(run_task_scheduler()); - let task = futures::future::select(future, abort_future); - tokio::spawn(task.map(|_| ())); + tokio::spawn(async { + let abort_future = pin!(proxmox_daemon::shutdown_future()); + let future = pin!(run_task_scheduler()); + futures::future::select(future, abort_future).await; + }); } fn start_traffic_control_updater() { - let abort_future = proxmox_rest_server::shutdown_future(); - let future = Box::pin(run_traffic_control_updater()); - let task = futures::future::select(future, abort_future); - tokio::spawn(task.map(|_| ())); + tokio::spawn(async { + let abort_future = pin!(proxmox_daemon::shutdown_future()); + let future = pin!(run_traffic_control_updater()); + futures::future::select(future, abort_future).await; + }); } use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; @@ -749,7 +718,7 @@ async fn schedule_task_log_rotate() { false, move |worker| { job.start(&worker.upid().to_string())?; - task_log!(worker, "starting task log rotation"); + info!("starting task log rotation"); let result = try_block!({ let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file @@ -774,9 +743,9 @@ async fn schedule_task_log_rotate() { )?; if has_rotated { - task_log!(worker, "task log archive was rotated"); + info!("task log archive was rotated"); } else { - task_log!(worker, "task log archive was not rotated"); + info!("task log archive was not rotated"); } let max_size = 32 * 1024 * 1024 - 1; @@ -792,9 +761,9 @@ async fn schedule_task_log_rotate() { if logrotate.rotate(max_size)? { println!("rotated access log, telling daemons to re-open log file"); proxmox_async::runtime::block_on(command_reopen_access_logfiles())?; - task_log!(worker, "API access log was rotated"); + info!("API access log was rotated"); } else { - task_log!(worker, "API access log was not rotated"); + info!("API access log was not rotated"); } let mut logrotate = LogRotate::new( @@ -807,15 +776,15 @@ async fn schedule_task_log_rotate() { if logrotate.rotate(max_size)? { println!("rotated auth log, telling daemons to re-open log file"); proxmox_async::runtime::block_on(command_reopen_auth_logfiles())?; - task_log!(worker, "API authentication log was rotated"); + info!("API authentication log was rotated"); } else { - task_log!(worker, "API authentication log was not rotated"); + info!("API authentication log was not rotated"); } if has_rotated { - task_log!(worker, "cleaning up old task logs"); - if let Err(err) = cleanup_old_tasks(&worker, true) { - task_warn!(worker, "could not completely cleanup old tasks: {err}"); + info!("cleaning up old task logs"); + if let Err(err) = cleanup_old_tasks(true) { + warn!("could not completely cleanup old tasks: {err}"); } } @@ -838,14 +807,14 @@ async fn schedule_task_log_rotate() { async fn command_reopen_access_logfiles() -> Result<(), Error> { // only care about the most recent daemon instance for each, proxy & api, as other older ones // should not respond to new requests anyway, but only finish their current one and then exit. - let sock = proxmox_rest_server::our_ctrl_sock(); + let sock = proxmox_daemon::command_socket::this_path(); let f1 = - proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); + proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"api-access-log-reopen\"}\n"); let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?; - let sock = proxmox_rest_server::ctrl_sock_from_pid(pid); + let sock = proxmox_daemon::command_socket::path_from_pid(pid); let f2 = - proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-access-log-reopen\"}\n"); + proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"api-access-log-reopen\"}\n"); match futures::join!(f1, f2) { (Err(e1), Err(e2)) => Err(format_err!( @@ -860,12 +829,14 @@ async fn command_reopen_access_logfiles() -> Result<(), Error> { async fn command_reopen_auth_logfiles() -> Result<(), Error> { // only care about the most recent daemon instance for each, proxy & api, as other older ones // should not respond to new requests anyway, but only finish their current one and then exit. - let sock = proxmox_rest_server::our_ctrl_sock(); - let f1 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); + let sock = proxmox_daemon::command_socket::this_path(); + let f1 = + proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); let pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?; - let sock = proxmox_rest_server::ctrl_sock_from_pid(pid); - let f2 = proxmox_rest_server::send_raw_command(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); + let sock = proxmox_daemon::command_socket::path_from_pid(pid); + let f2 = + proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"api-auth-log-reopen\"}\n"); match futures::join!(f1, f2) { (Err(e1), Err(e2)) => Err(format_err!( @@ -877,349 +848,6 @@ async fn command_reopen_auth_logfiles() -> Result<(), Error> { } } -async fn run_stat_generator() { - loop { - let delay_target = Instant::now() + Duration::from_secs(10); - - let stats_future = tokio::task::spawn_blocking(|| { - let hoststats = collect_host_stats_sync(); - let (hostdisk, datastores) = collect_disk_stats_sync(); - Arc::new((hoststats, hostdisk, datastores)) - }); - let stats = match stats_future.await { - Ok(res) => res, - Err(err) => { - log::error!("collecting host stats panicked: {err}"); - tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; - continue; - } - }; - - let rrd_future = tokio::task::spawn_blocking({ - let stats = Arc::clone(&stats); - move || { - rrd_update_host_stats_sync(&stats.0, &stats.1, &stats.2); - rrd_sync_journal(); - } - }); - - let metrics_future = send_data_to_metric_servers(stats); - - let (rrd_res, metrics_res) = join!(rrd_future, metrics_future); - if let Err(err) = rrd_res { - log::error!("rrd update panicked: {err}"); - } - if let Err(err) = metrics_res { - log::error!("error during metrics sending: {err}"); - } - - tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await; - } -} - -async fn send_data_to_metric_servers( - stats: Arc<(HostStats, DiskStat, Vec)>, -) -> Result<(), Error> { - let (config, _digest) = pbs_config::metrics::config()?; - let channel_list = get_metric_server_connections(config)?; - - if channel_list.is_empty() { - return Ok(()); - } - - let ctime = proxmox_time::epoch_i64(); - let nodename = proxmox_sys::nodename(); - - let mut values = Vec::new(); - - let mut cpuvalue = match &stats.0.proc { - Some(stat) => serde_json::to_value(stat)?, - None => json!({}), - }; - - if let Some(loadavg) = &stats.0.load { - cpuvalue["avg1"] = Value::from(loadavg.0); - cpuvalue["avg5"] = Value::from(loadavg.1); - cpuvalue["avg15"] = Value::from(loadavg.2); - } - - values.push(Arc::new( - MetricsData::new("cpustat", ctime, cpuvalue)? - .tag("object", "host") - .tag("host", nodename), - )); - - if let Some(stat) = &stats.0.meminfo { - values.push(Arc::new( - MetricsData::new("memory", ctime, stat)? - .tag("object", "host") - .tag("host", nodename), - )); - } - - if let Some(netdev) = &stats.0.net { - for item in netdev { - values.push(Arc::new( - MetricsData::new("nics", ctime, item)? - .tag("object", "host") - .tag("host", nodename) - .tag("instance", item.device.clone()), - )); - } - } - - values.push(Arc::new( - MetricsData::new("blockstat", ctime, stats.1.to_value())? - .tag("object", "host") - .tag("host", nodename), - )); - - for datastore in stats.2.iter() { - values.push(Arc::new( - MetricsData::new("blockstat", ctime, datastore.to_value())? - .tag("object", "host") - .tag("host", nodename) - .tag("datastore", datastore.name.clone()), - )); - } - - // we must have a concrete functions, because the inferred lifetime from a - // closure is not general enough for the tokio::spawn call we are in here... - fn map_fn(item: &(proxmox_metrics::Metrics, String)) -> &proxmox_metrics::Metrics { - &item.0 - } - - let results = - proxmox_metrics::send_data_to_channels(&values, channel_list.iter().map(map_fn)).await; - for (res, name) in results - .into_iter() - .zip(channel_list.iter().map(|(_, name)| name)) - { - if let Err(err) = res { - log::error!("error sending into channel of {name}: {err}"); - } - } - - futures::future::join_all(channel_list.into_iter().map(|(channel, name)| async move { - if let Err(err) = channel.join().await { - log::error!("error sending to metric server {name}: {err}"); - } - })) - .await; - - Ok(()) -} - -/// Get the metric server connections from a config -pub fn get_metric_server_connections( - metric_config: proxmox_section_config::SectionConfigData, -) -> Result, Error> { - let mut res = Vec::new(); - - for config in - metric_config.convert_to_typed_array::("influxdb-udp")? - { - if !config.enable { - continue; - } - let future = proxmox_metrics::influxdb_udp(&config.host, config.mtu); - res.push((future, config.name)); - } - - for config in - metric_config.convert_to_typed_array::("influxdb-http")? - { - if !config.enable { - continue; - } - let future = proxmox_metrics::influxdb_http( - &config.url, - config.organization.as_deref().unwrap_or("proxmox"), - config.bucket.as_deref().unwrap_or("proxmox"), - config.token.as_deref(), - config.verify_tls.unwrap_or(true), - config.max_body_size.unwrap_or(25_000_000), - )?; - res.push((future, config.name)); - } - Ok(res) -} - -struct HostStats { - proc: Option, - meminfo: Option, - net: Option>, - load: Option, -} - -struct DiskStat { - name: String, - usage: Option, - dev: Option, -} - -impl DiskStat { - fn to_value(&self) -> Value { - let mut value = json!({}); - if let Some(usage) = &self.usage { - value["total"] = Value::from(usage.total); - value["used"] = Value::from(usage.used); - value["avail"] = Value::from(usage.available); - } - - if let Some(dev) = &self.dev { - value["read_ios"] = Value::from(dev.read_ios); - value["read_bytes"] = Value::from(dev.read_sectors * 512); - value["write_ios"] = Value::from(dev.write_ios); - value["write_bytes"] = Value::from(dev.write_sectors * 512); - value["io_ticks"] = Value::from(dev.io_ticks / 1000); - } - value - } -} - -fn collect_host_stats_sync() -> HostStats { - use proxmox_sys::linux::procfs::{ - read_loadavg, read_meminfo, read_proc_net_dev, read_proc_stat, - }; - - let proc = match read_proc_stat() { - Ok(stat) => Some(stat), - Err(err) => { - eprintln!("read_proc_stat failed - {err}"); - None - } - }; - - let meminfo = match read_meminfo() { - Ok(stat) => Some(stat), - Err(err) => { - eprintln!("read_meminfo failed - {err}"); - None - } - }; - - let net = match read_proc_net_dev() { - Ok(netdev) => Some(netdev), - Err(err) => { - eprintln!("read_prox_net_dev failed - {err}"); - None - } - }; - - let load = match read_loadavg() { - Ok(loadavg) => Some(loadavg), - Err(err) => { - eprintln!("read_loadavg failed - {err}"); - None - } - }; - - HostStats { - proc, - meminfo, - net, - load, - } -} - -fn collect_disk_stats_sync() -> (DiskStat, Vec) { - let disk_manager = DiskManage::new(); - - let root = gather_disk_stats(disk_manager.clone(), Path::new("/"), "host"); - - let mut datastores = Vec::new(); - match pbs_config::datastore::config() { - Ok((config, _)) => { - let datastore_list: Vec = config - .convert_to_typed_array("datastore") - .unwrap_or_default(); - - for config in datastore_list { - if config - .get_maintenance_mode() - .map_or(false, |mode| mode.check(Some(Operation::Read)).is_err()) - { - continue; - } - let path = std::path::Path::new(&config.path); - datastores.push(gather_disk_stats(disk_manager.clone(), path, &config.name)); - } - } - Err(err) => { - eprintln!("read datastore config failed - {err}"); - } - } - - (root, datastores) -} - -fn rrd_update_host_stats_sync(host: &HostStats, hostdisk: &DiskStat, datastores: &[DiskStat]) { - if let Some(stat) = &host.proc { - rrd_update_gauge("host/cpu", stat.cpu); - rrd_update_gauge("host/iowait", stat.iowait_percent); - } - - if let Some(meminfo) = &host.meminfo { - rrd_update_gauge("host/memtotal", meminfo.memtotal as f64); - rrd_update_gauge("host/memused", meminfo.memused as f64); - rrd_update_gauge("host/swaptotal", meminfo.swaptotal as f64); - rrd_update_gauge("host/swapused", meminfo.swapused as f64); - } - - if let Some(netdev) = &host.net { - use pbs_config::network::is_physical_nic; - let mut netin = 0; - let mut netout = 0; - for item in netdev { - if !is_physical_nic(&item.device) { - continue; - } - netin += item.receive; - netout += item.send; - } - rrd_update_derive("host/netin", netin as f64); - rrd_update_derive("host/netout", netout as f64); - } - - if let Some(loadavg) = &host.load { - rrd_update_gauge("host/loadavg", loadavg.0); - } - - rrd_update_disk_stat(hostdisk, "host"); - - for stat in datastores { - let rrd_prefix = format!("datastore/{}", stat.name); - rrd_update_disk_stat(stat, &rrd_prefix); - } -} - -fn rrd_update_disk_stat(disk: &DiskStat, rrd_prefix: &str) { - if let Some(status) = &disk.usage { - let rrd_key = format!("{}/total", rrd_prefix); - rrd_update_gauge(&rrd_key, status.total as f64); - let rrd_key = format!("{}/used", rrd_prefix); - rrd_update_gauge(&rrd_key, status.used as f64); - let rrd_key = format!("{}/available", rrd_prefix); - rrd_update_gauge(&rrd_key, status.available as f64); - } - - if let Some(stat) = &disk.dev { - let rrd_key = format!("{}/read_ios", rrd_prefix); - rrd_update_derive(&rrd_key, stat.read_ios as f64); - let rrd_key = format!("{}/read_bytes", rrd_prefix); - rrd_update_derive(&rrd_key, (stat.read_sectors * 512) as f64); - - let rrd_key = format!("{}/write_ios", rrd_prefix); - rrd_update_derive(&rrd_key, stat.write_ios as f64); - let rrd_key = format!("{}/write_bytes", rrd_prefix); - rrd_update_derive(&rrd_key, (stat.write_sectors * 512) as f64); - - let rrd_key = format!("{}/io_ticks", rrd_prefix); - rrd_update_derive(&rrd_key, (stat.io_ticks as f64) / 1000.0); - } -} - fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { let event: CalendarEvent = match event_str.parse() { Ok(event) => event, @@ -1250,53 +878,6 @@ fn check_schedule(worker_type: &str, event_str: &str, id: &str) -> bool { next <= now } -fn gather_disk_stats(disk_manager: Arc, path: &Path, name: &str) -> DiskStat { - let usage = match proxmox_sys::fs::fs_info(path) { - Ok(status) => Some(status), - Err(err) => { - eprintln!("read fs info on {path:?} failed - {err}"); - None - } - }; - - let dev = match disk_manager.find_mounted_device(path) { - Ok(None) => None, - Ok(Some((fs_type, device, source))) => { - let mut device_stat = None; - match (fs_type.as_str(), source) { - ("zfs", Some(source)) => match source.into_string() { - Ok(dataset) => match zfs_dataset_stats(&dataset) { - Ok(stat) => device_stat = Some(stat), - Err(err) => eprintln!("zfs_dataset_stats({dataset:?}) failed - {err}"), - }, - Err(source) => { - eprintln!("zfs_pool_stats({source:?}) failed - invalid characters") - } - }, - _ => { - if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) { - match disk.read_stat() { - Ok(stat) => device_stat = stat, - Err(err) => eprintln!("disk.read_stat {path:?} failed - {err}"), - } - } - } - } - device_stat - } - Err(err) => { - eprintln!("find_mounted_device failed - {err}"); - None - } - }; - - DiskStat { - name: name.to_string(), - usage, - dev, - } -} - // Rate Limiter lookup async fn run_traffic_control_updater() { loop { diff --git a/src/bin/proxmox-daily-update.rs b/src/bin/proxmox-daily-update.rs index 4e2fc85b8..929af3fdf 100644 --- a/src/bin/proxmox-daily-update.rs +++ b/src/bin/proxmox-daily-update.rs @@ -1,6 +1,7 @@ use anyhow::Error; use serde_json::json; +use proxmox_notify::context::pbs::PBS_CONTEXT; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; // use proxmox_subscription::SubscriptionStatus; use proxmox_sys::fs::CreateOptions; @@ -98,12 +99,11 @@ async fn run(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { file_opts.clone(), )?; - let mut command_sock = proxmox_rest_server::CommandSocket::new( - proxmox_rest_server::our_ctrl_sock(), - backup_user.gid, - ); + let mut command_sock = proxmox_daemon::command_socket::CommandSocket::new(backup_user.gid); proxmox_rest_server::register_task_control_commands(&mut command_sock)?; - command_sock.spawn()?; + command_sock.spawn(proxmox_rest_server::last_worker_future())?; + + proxmox_notify::context::set_context(&PBS_CONTEXT); do_update(rpcenv).await } diff --git a/src/bin/proxmox-tape.rs b/src/bin/proxmox-tape.rs index 83793c346..8e8584b35 100644 --- a/src/bin/proxmox-tape.rs +++ b/src/bin/proxmox-tape.rs @@ -5,6 +5,7 @@ use serde_json::{json, Value}; use proxmox_human_byte::HumanByte; use proxmox_io::ReadExt; +use proxmox_log::init_cli_logger; use proxmox_router::cli::*; use proxmox_router::RpcEnvironment; use proxmox_schema::api; @@ -997,7 +998,7 @@ async fn catalog_media(mut param: Value) -> Result<(), Error> { } fn main() { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO).expect("failed to initiate logger"); let cmd_def = CliCommandMap::new() .insert( diff --git a/src/bin/proxmox_backup_debug/api.rs b/src/bin/proxmox_backup_debug/api.rs index 42fa51f41..139910e7e 100644 --- a/src/bin/proxmox_backup_debug/api.rs +++ b/src/bin/proxmox_backup_debug/api.rs @@ -79,7 +79,7 @@ async fn complete_api_path_do(mut complete_me: &str, capability: Option<&str>) - if list.len() == 1 && old_len != 1 && list[0].ends_with('/') { // we added only one match and it was a directory, lookup again - lookup_path = list[0].clone(); + lookup_path.clone_from(&list[0]); filter = ""; continue; } @@ -226,14 +226,18 @@ async fn call_api_code( nix::unistd::setuid(backup_user.uid)?; } match method.handler { - ApiHandler::StreamingSync(handler) => { + ApiHandler::SerializingSync(handler) => { let res = (handler)(params, method, rpcenv)?.to_value()?; Ok(res) } - ApiHandler::StreamingAsync(handler) => { + ApiHandler::SerializingAsync(handler) => { let res = (handler)(params, method, rpcenv).await?.to_value()?; Ok(res) } + ApiHandler::StreamSync(handler) => (handler)(params, method, rpcenv)?.try_collect(), + ApiHandler::StreamAsync(handler) => { + (handler)(params, method, rpcenv).await?.try_collect().await + } ApiHandler::AsyncHttp(_handler) => { bail!("not implemented"); } diff --git a/src/bin/proxmox_backup_debug/diff.rs b/src/bin/proxmox_backup_debug/diff.rs index 5b68941a4..b0436d048 100644 --- a/src/bin/proxmox_backup_debug/diff.rs +++ b/src/bin/proxmox_backup_debug/diff.rs @@ -277,7 +277,7 @@ async fn open_dynamic_index( let reader = BufferedDynamicReader::new(index, chunk_reader); let archive_size = reader.archive_size(); let reader: Arc = Arc::new(LocalDynamicReadAt::new(reader)); - let accessor = Accessor::new(reader, archive_size).await?; + let accessor = Accessor::new(pxar::PxarVariant::Unified(reader), archive_size).await?; Ok((lookup_index, accessor)) } @@ -787,7 +787,7 @@ impl FileEntryPrinter { Ok(()) } - fn write_column_seperator(&mut self) -> Result<(), Error> { + fn write_column_separator(&mut self) -> Result<(), Error> { write!(self.stream, " ")?; Ok(()) } @@ -800,25 +800,25 @@ impl FileEntryPrinter { operation: FileOperation, ) -> Result<(), Error> { self.write_operation(operation)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_entry_type(entry, changed.entry_type)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_uid(entry, changed.uid)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_gid(entry, changed.gid)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_mode(entry, changed.mode)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_filesize(entry, changed.size)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_mtime(entry, changed.mtime)?; - self.write_column_seperator()?; + self.write_column_separator()?; self.write_file_name(entry, changed.content)?; writeln!(self.stream)?; diff --git a/src/bin/proxmox_backup_manager/acme.rs b/src/bin/proxmox_backup_manager/acme.rs index f3e62115b..e98402a9f 100644 --- a/src/bin/proxmox_backup_manager/acme.rs +++ b/src/bin/proxmox_backup_manager/acme.rs @@ -126,6 +126,8 @@ async fn register_account( } Ok(n) if n == KNOWN_ACME_DIRECTORIES.len() => { input.clear(); + print!("Enter custom directory URI: "); + std::io::stdout().flush()?; std::io::stdin().read_line(&mut input)?; break (input.trim().to_owned(), true); } diff --git a/src/bin/proxmox_backup_manager/ad.rs b/src/bin/proxmox_backup_manager/ad.rs new file mode 100644 index 000000000..90b341436 --- /dev/null +++ b/src/bin/proxmox_backup_manager/ad.rs @@ -0,0 +1,105 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::REALM_ID_SCHEMA; + +use crate::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List configured AD realms +fn list_ad_realms(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::access::ad::API_METHOD_LIST_AD_REALMS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("realm")) + .column(ColumnConfig::new("server1")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + realm: { + schema: REALM_ID_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show AD realm configuration +pub fn show_ad_realm(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::access::ad::API_METHOD_READ_AD_REALM; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn ad_commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_AD_REALMS)) + .insert( + "show", + CliCommand::new(&crate::API_METHOD_SHOW_AD_REALM) + .arg_param(&["realm"]) + .completion_cb("realm", pbs_config::domains::complete_ad_realm_name), + ) + .insert( + "create", + CliCommand::new(&api2::config::access::ad::API_METHOD_CREATE_AD_REALM) + .arg_param(&["realm"]) + .completion_cb("realm", pbs_config::domains::complete_ad_realm_name), + ) + .insert( + "update", + CliCommand::new(&api2::config::access::ad::API_METHOD_UPDATE_AD_REALM) + .arg_param(&["realm"]) + .completion_cb("realm", pbs_config::domains::complete_ad_realm_name), + ) + .insert( + "delete", + CliCommand::new(&api2::config::access::ldap::API_METHOD_DELETE_LDAP_REALM) + .arg_param(&["realm"]) + .completion_cb("realm", pbs_config::domains::complete_ad_realm_name), + ) + .insert( + "sync", + CliCommand::new(&crate::API_METHOD_SYNC_LDAP_REALM) + .arg_param(&["realm"]) + .completion_cb("realm", pbs_config::domains::complete_ad_realm_name), + ); + + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/disk.rs b/src/bin/proxmox_backup_manager/disk.rs index 9c55a989b..cd7a0b7aa 100644 --- a/src/bin/proxmox_backup_manager/disk.rs +++ b/src/bin/proxmox_backup_manager/disk.rs @@ -3,7 +3,7 @@ use serde_json::Value; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; use proxmox_schema::api; -use std::io::{IsTerminal, Write}; +use std::io::IsTerminal; use pbs_api_types::{ ZfsCompressionType, ZfsRaidLevel, BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, @@ -155,17 +155,16 @@ async fn wipe_disk(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result< // If we're on a TTY, query the user if std::io::stdin().is_terminal() { - println!("You are about to wipe block device {}.", param["disk"]); - print!("Are you sure you want to continue? (y/N): "); - let _ = std::io::stdout().flush(); - use std::io::{BufRead, BufReader}; - let mut line = String::new(); - match BufReader::new(std::io::stdin()).read_line(&mut line) { - Ok(_) => match line.trim() { - "y" | "Y" => (), // continue - _ => bail!("Aborting."), - }, - Err(err) => bail!("Failed to read line - {err}."), + let confirmation = Confirmation::query_with_default( + format!( + "You are about to wipe block device {}.\nAre you sure you want to continue?", + param["disk"] + ) + .as_str(), + Confirmation::No, + )?; + if confirmation.is_no() { + bail!("Aborting."); } } diff --git a/src/bin/proxmox_backup_manager/ldap.rs b/src/bin/proxmox_backup_manager/ldap.rs index 7ff4ad1d8..196825a6f 100644 --- a/src/bin/proxmox_backup_manager/ldap.rs +++ b/src/bin/proxmox_backup_manager/ldap.rs @@ -98,7 +98,7 @@ fn show_ldap_realm(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result Result { +pub async fn sync_ldap_realm(param: Value) -> Result { let realm = required_string_param(¶m, "realm")?; let client = connect_to_localhost()?; diff --git a/src/bin/proxmox_backup_manager/mod.rs b/src/bin/proxmox_backup_manager/mod.rs index af6444a81..218ae4fbe 100644 --- a/src/bin/proxmox_backup_manager/mod.rs +++ b/src/bin/proxmox_backup_manager/mod.rs @@ -2,6 +2,8 @@ mod acl; pub use acl::*; mod acme; pub use acme::*; +mod ad; +pub use ad::*; mod cert; pub use cert::*; mod datastore; @@ -28,6 +30,8 @@ mod disk; pub use disk::*; mod node; pub use node::*; +mod notifications; +pub use notifications::*; mod openid; pub use openid::*; mod traffic_control; diff --git a/src/bin/proxmox_backup_manager/network.rs b/src/bin/proxmox_backup_manager/network.rs index deac5b1b2..0f0a50a89 100644 --- a/src/bin/proxmox_backup_manager/network.rs +++ b/src/bin/proxmox_backup_manager/network.rs @@ -129,6 +129,24 @@ fn pending_network_changes( Ok(Value::Null) } +#[api()] +/// Reload network changes +async fn reload_network_changes( + mut param: Value, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + param["node"] = "localhost".into(); + + let info = &api2::node::network::API_METHOD_RELOAD_NETWORK_CONFIG; + let result = match info.handler { + ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?, + _ => unreachable!(), + }; + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + + Ok(Value::Null) +} + pub fn network_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_NETWORK_DEVICES)) @@ -168,8 +186,7 @@ pub fn network_commands() -> CommandLineInterface { ) .insert( "reload", - CliCommand::new(&api2::node::network::API_METHOD_RELOAD_NETWORK_CONFIG) - .fixed_param("node", String::from("localhost")), + CliCommand::new(&API_METHOD_RELOAD_NETWORK_CHANGES), ); cmd_def.into() diff --git a/src/bin/proxmox_backup_manager/notifications/gotify.rs b/src/bin/proxmox_backup_manager/notifications/gotify.rs new file mode 100644 index 000000000..541df82e7 --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/gotify.rs @@ -0,0 +1,93 @@ +use anyhow::Error; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List all endpoints. +fn list_endpoints(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::gotify::API_METHOD_LIST_ENDPOINTS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("server")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show a single endpoint. +fn show_endpoint(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::gotify::API_METHOD_GET_ENDPOINT; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_ENDPOINTS)) + .insert( + "show", + CliCommand::new(&API_METHOD_SHOW_ENDPOINT).arg_param(&["name"]), + ) + .insert( + "create", + CliCommand::new(&api2::config::notifications::gotify::API_METHOD_ADD_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "update", + CliCommand::new(&api2::config::notifications::gotify::API_METHOD_UPDATE_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "delete", + CliCommand::new(&api2::config::notifications::gotify::API_METHOD_DELETE_ENDPOINT) + .arg_param(&["name"]), + ); + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/notifications/matchers.rs b/src/bin/proxmox_backup_manager/notifications/matchers.rs new file mode 100644 index 000000000..cfa3aaa40 --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/matchers.rs @@ -0,0 +1,93 @@ +use anyhow::Error; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List notification matchers. +fn list_matchers(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::matchers::API_METHOD_LIST_MATCHERS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("origin")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show a single matcher. +fn show_matcher(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::matchers::API_METHOD_GET_MATCHER; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_MATCHERS)) + .insert( + "show", + CliCommand::new(&API_METHOD_SHOW_MATCHER).arg_param(&["name"]), + ) + .insert( + "create", + CliCommand::new(&api2::config::notifications::matchers::API_METHOD_ADD_MATCHER) + .arg_param(&["name"]), + ) + .insert( + "update", + CliCommand::new(&api2::config::notifications::matchers::API_METHOD_UPDATE_MATCHER) + .arg_param(&["name"]), + ) + .insert( + "delete", + CliCommand::new(&api2::config::notifications::matchers::API_METHOD_DELETE_MATCHER) + .arg_param(&["name"]), + ); + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/notifications/mod.rs b/src/bin/proxmox_backup_manager/notifications/mod.rs new file mode 100644 index 000000000..678f9c54d --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/mod.rs @@ -0,0 +1,21 @@ +use proxmox_router::cli::{CliCommandMap, CommandLineInterface}; + +mod gotify; +mod matchers; +mod sendmail; +mod smtp; +mod targets; + +pub fn notification_commands() -> CommandLineInterface { + let endpoint_def = CliCommandMap::new() + .insert("gotify", gotify::commands()) + .insert("sendmail", sendmail::commands()) + .insert("smtp", smtp::commands()); + + let cmd_def = CliCommandMap::new() + .insert("endpoint", endpoint_def) + .insert("matcher", matchers::commands()) + .insert("target", targets::commands()); + + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/notifications/sendmail.rs b/src/bin/proxmox_backup_manager/notifications/sendmail.rs new file mode 100644 index 000000000..202e15b00 --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/sendmail.rs @@ -0,0 +1,94 @@ +use anyhow::Error; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List all endpoints. +fn list_endpoints(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::sendmail::API_METHOD_LIST_ENDPOINTS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("mailto")) + .column(ColumnConfig::new("mailto-user")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show a single endpoint. +fn show_endpoint(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::sendmail::API_METHOD_GET_ENDPOINT; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_ENDPOINTS)) + .insert( + "show", + CliCommand::new(&API_METHOD_SHOW_ENDPOINT).arg_param(&["name"]), + ) + .insert( + "create", + CliCommand::new(&api2::config::notifications::sendmail::API_METHOD_ADD_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "update", + CliCommand::new(&api2::config::notifications::sendmail::API_METHOD_UPDATE_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "delete", + CliCommand::new(&api2::config::notifications::sendmail::API_METHOD_DELETE_ENDPOINT) + .arg_param(&["name"]), + ); + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/notifications/smtp.rs b/src/bin/proxmox_backup_manager/notifications/smtp.rs new file mode 100644 index 000000000..3a2b15ef6 --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/smtp.rs @@ -0,0 +1,96 @@ +use anyhow::Error; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List all endpoints. +fn list_endpoints(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::smtp::API_METHOD_LIST_ENDPOINTS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("server")) + .column(ColumnConfig::new("from-address")) + .column(ColumnConfig::new("mailto")) + .column(ColumnConfig::new("mailto-user")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show a single endpoint. +fn show_endpoint(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::smtp::API_METHOD_GET_ENDPOINT; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_ENDPOINTS)) + .insert( + "show", + CliCommand::new(&API_METHOD_SHOW_ENDPOINT).arg_param(&["name"]), + ) + .insert( + "create", + CliCommand::new(&api2::config::notifications::smtp::API_METHOD_ADD_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "update", + CliCommand::new(&api2::config::notifications::smtp::API_METHOD_UPDATE_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "delete", + CliCommand::new(&api2::config::notifications::smtp::API_METHOD_DELETE_ENDPOINT) + .arg_param(&["name"]), + ); + cmd_def.into() +} diff --git a/src/bin/proxmox_backup_manager/notifications/targets.rs b/src/bin/proxmox_backup_manager/notifications/targets.rs new file mode 100644 index 000000000..37603a927 --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/targets.rs @@ -0,0 +1,51 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List targets. +fn list_targets(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::targets::API_METHOD_LIST_TARGETS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("type")) + .column(ColumnConfig::new("origin")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_TARGETS)) + .insert( + "test", + CliCommand::new(&api2::config::notifications::targets::API_METHOD_TEST_TARGET) + .arg_param(&["name"]), + ); + + cmd_def.into() +} diff --git a/src/bin/sg-tape-cmd.rs b/src/bin/sg-tape-cmd.rs index 56399044d..cd14b660a 100644 --- a/src/bin/sg-tape-cmd.rs +++ b/src/bin/sg-tape-cmd.rs @@ -10,6 +10,7 @@ use pbs_tape::sg_tape::SgTape; use proxmox_backup::tape::encryption_keys::load_key; use serde_json::Value; +use proxmox_log::init_cli_logger; use proxmox_router::{cli::*, RpcEnvironment}; use proxmox_schema::api; use proxmox_uuid::Uuid; @@ -124,7 +125,7 @@ fn set_encryption( } fn main() -> Result<(), Error> { - init_cli_logger("PBS_LOG", "info"); + init_cli_logger("PBS_LOG", proxmox_log::LevelFilter::INFO)?; // check if we are user root or backup let backup_uid = pbs_config::backup_user()?.uid; diff --git a/src/config/acme/mod.rs b/src/config/acme/mod.rs index 3ef620fc8..e1d2241f9 100644 --- a/src/config/acme/mod.rs +++ b/src/config/acme/mod.rs @@ -27,19 +27,15 @@ fn root_only() -> CreateOptions { .perm(nix::sys::stat::Mode::from_bits_truncate(0o700)) } -fn create_acme_subdir(dir: &str) -> nix::Result<()> { - match proxmox_sys::fs::create_dir(dir, root_only()) { - Ok(()) => Ok(()), - Err(err) if err.already_exists() => Ok(()), - Err(err) => Err(err), - } +fn create_acme_subdir(dir: &str) -> Result<(), Error> { + proxmox_sys::fs::ensure_dir_exists(dir, &root_only(), false) } -pub(crate) fn make_acme_dir() -> nix::Result<()> { +pub(crate) fn make_acme_dir() -> Result<(), Error> { create_acme_subdir(ACME_DIR) } -pub(crate) fn make_acme_account_dir() -> nix::Result<()> { +pub(crate) fn make_acme_account_dir() -> Result<(), Error> { make_acme_dir()?; create_acme_subdir(ACME_ACCOUNT_DIR) } diff --git a/src/config/acme/plugin.rs b/src/config/acme/plugin.rs index d3b2189d3..74ef04592 100644 --- a/src/config/acme/plugin.rs +++ b/src/config/acme/plugin.rs @@ -1,5 +1,6 @@ +use std::sync::LazyLock; + use anyhow::Error; -use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -15,9 +16,7 @@ pub const PLUGIN_ID_SCHEMA: Schema = StringSchema::new("ACME Challenge Plugin ID .max_length(32) .schema(); -lazy_static! { - pub static ref CONFIG: SectionConfig = init(); -} +pub static CONFIG: LazyLock = LazyLock::new(init); #[api( properties: { @@ -147,7 +146,7 @@ pub fn config() -> Result<(PluginData, [u8; 32]), Error> { let digest = openssl::sha::sha256(content.as_bytes()); let mut data = CONFIG.parse(ACME_PLUGIN_CFG_FILENAME, &content)?; - if data.sections.get("standalone").is_none() { + if !data.sections.contains_key("standalone") { let standalone = StandalonePlugin::default(); data.set_data("standalone", "standalone", &standalone) .unwrap(); diff --git a/src/lib.rs b/src/lib.rs index c89884c87..8633378ca 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -31,8 +31,6 @@ pub mod acme; pub mod client_helpers; -pub mod rrd_cache; - pub mod traffic_control_cache; /// Get the server's certificate info (from `proxy.pem`). diff --git a/src/rrd_cache.rs b/src/rrd_cache.rs deleted file mode 100644 index 02bb93723..000000000 --- a/src/rrd_cache.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! Round Robin Database cache -//! -//! RRD files are stored under `/var/lib/proxmox-backup/rrdb/`. Only a -//! single process may access and update those files, so we initialize -//! and update RRD data inside `proxmox-backup-proxy`. - -use std::path::Path; - -use anyhow::{format_err, Error}; -use once_cell::sync::OnceCell; - -use proxmox_rrd::rrd::{AggregationFn, DataSourceType, Database}; -use proxmox_rrd::Cache; -use proxmox_sys::fs::CreateOptions; - -use pbs_api_types::{RRDMode, RRDTimeFrame}; -use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M; - -const RRD_CACHE_BASEDIR: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/rrdb"); - -static RRD_CACHE: OnceCell = OnceCell::new(); - -/// Get the RRD cache instance -pub fn get_rrd_cache() -> Result<&'static Cache, Error> { - RRD_CACHE - .get() - .ok_or_else(|| format_err!("RRD cache not initialized!")) -} - -/// Initialize the RRD cache instance -/// -/// Note: Only a single process must do this (proxmox-backup-proxy) -pub fn initialize_rrd_cache() -> Result<&'static Cache, Error> { - let backup_user = pbs_config::backup_user()?; - - let file_options = CreateOptions::new() - .owner(backup_user.uid) - .group(backup_user.gid); - - let dir_options = CreateOptions::new() - .owner(backup_user.uid) - .group(backup_user.gid); - - let apply_interval = 30.0 * 60.0; // 30 minutes - - let cache = Cache::new( - RRD_CACHE_BASEDIR, - Some(file_options), - Some(dir_options), - apply_interval, - load_callback, - )?; - - RRD_CACHE - .set(cache) - .map_err(|_| format_err!("RRD cache already initialized!"))?; - - Ok(RRD_CACHE.get().unwrap()) -} - -fn load_callback(path: &Path, _rel_path: &str, dst: DataSourceType) -> Database { - match Database::load(path, true) { - Ok(rrd) => rrd, - Err(err) => { - if err.kind() != std::io::ErrorKind::NotFound { - log::warn!( - "overwriting RRD file {:?}, because of load error: {}", - path, - err - ); - } - Cache::create_proxmox_backup_default_rrd(dst) - } - } -} - -/// Extracts data for the specified time frame from from RRD cache -pub fn extract_rrd_data( - basedir: &str, - name: &str, - timeframe: RRDTimeFrame, - mode: RRDMode, -) -> Result, Error> { - let end = proxmox_time::epoch_f64() as u64; - - let (start, resolution) = match timeframe { - RRDTimeFrame::Hour => (end - 3600, 60), - RRDTimeFrame::Day => (end - 3600 * 24, 60), - RRDTimeFrame::Week => (end - 3600 * 24 * 7, 30 * 60), - RRDTimeFrame::Month => (end - 3600 * 24 * 30, 30 * 60), - RRDTimeFrame::Year => (end - 3600 * 24 * 365, 6 * 60 * 60), - RRDTimeFrame::Decade => (end - 10 * 3600 * 24 * 366, 7 * 86400), - }; - - let cf = match mode { - RRDMode::Max => AggregationFn::Maximum, - RRDMode::Average => AggregationFn::Average, - }; - - let rrd_cache = get_rrd_cache()?; - - rrd_cache.extract_cached_data(basedir, name, cf, resolution, Some(start), Some(end)) -} - -/// Sync/Flush the RRD journal -pub fn rrd_sync_journal() { - if let Ok(rrd_cache) = get_rrd_cache() { - if let Err(err) = rrd_cache.sync_journal() { - log::error!("rrd_sync_journal failed - {}", err); - } - } -} -/// Update RRD Gauge values -pub fn rrd_update_gauge(name: &str, value: f64) { - if let Ok(rrd_cache) = get_rrd_cache() { - let now = proxmox_time::epoch_f64(); - if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Gauge) { - log::error!("rrd::update_value '{}' failed - {}", name, err); - } - } -} - -/// Update RRD Derive values -pub fn rrd_update_derive(name: &str, value: f64) { - if let Ok(rrd_cache) = get_rrd_cache() { - let now = proxmox_time::epoch_f64(); - if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Derive) { - log::error!("rrd::update_value '{}' failed - {}", name, err); - } - } -} diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs deleted file mode 100644 index 43b556563..000000000 --- a/src/server/email_notifications.rs +++ /dev/null @@ -1,763 +0,0 @@ -use anyhow::Error; -use serde_json::json; - -use handlebars::{ - Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, TemplateError, -}; - -use proxmox_human_byte::HumanByte; -use proxmox_lang::try_block; -use proxmox_schema::ApiType; -use proxmox_sys::email::sendmail; - -use pbs_api_types::{ - APTUpdateInfo, DataStoreConfig, DatastoreNotify, GarbageCollectionStatus, Notify, - SyncJobConfig, TapeBackupJobSetup, User, Userid, VerificationJobConfig, -}; - -const GC_OK_TEMPLATE: &str = r###" - -Datastore: {{datastore}} -Task ID: {{status.upid}} -Index file count: {{status.index-file-count}} - -Removed garbage: {{human-bytes status.removed-bytes}} -Removed chunks: {{status.removed-chunks}} -Removed bad chunks: {{status.removed-bad}} - -Leftover bad chunks: {{status.still-bad}} -Pending removals: {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks) - -Original Data usage: {{human-bytes status.index-data-bytes}} -On-Disk usage: {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}}) -On-Disk chunks: {{status.disk-chunks}} - -Deduplication Factor: {{deduplication-factor}} - -Garbage collection successful. - - -Please visit the web interface for further details: - - - -"###; - -const GC_ERR_TEMPLATE: &str = r###" - -Datastore: {{datastore}} - -Garbage collection failed: {{error}} - - -Please visit the web interface for further details: - - - -"###; - -const VERIFY_OK_TEMPLATE: &str = r###" - -Job ID: {{job.id}} -Datastore: {{job.store}} - -Verification successful. - - -Please visit the web interface for further details: - - - -"###; - -const VERIFY_ERR_TEMPLATE: &str = r###" - -Job ID: {{job.id}} -Datastore: {{job.store}} - -Verification failed on these snapshots/groups: - -{{#each errors}} - {{this~}} -{{/each}} - - -Please visit the web interface for further details: - - - -"###; - -const SYNC_OK_TEMPLATE: &str = r###" - -Job ID: {{job.id}} -Datastore: {{job.store}} -{{#if job.remote~}} -Remote: {{job.remote}} -Remote Store: {{job.remote-store}} -{{else~}} -Local Source Store: {{job.remote-store}} -{{/if}} -Synchronization successful. - - -Please visit the web interface for further details: - - - -"###; - -const SYNC_ERR_TEMPLATE: &str = r###" - -Job ID: {{job.id}} -Datastore: {{job.store}} -{{#if job.remote~}} -Remote: {{job.remote}} -Remote Store: {{job.remote-store}} -{{else~}} -Local Source Store: {{job.remote-store}} -{{/if}} -Synchronization failed: {{error}} - - -Please visit the web interface for further details: - - - -"###; - -const PRUNE_OK_TEMPLATE: &str = r###" - -Job ID: {{jobname}} -Datastore: {{store}} - -Pruning successful. - - -Please visit the web interface for further details: - - - -"###; - -const PRUNE_ERR_TEMPLATE: &str = r###" - -Job ID: {{jobname}} -Datastore: {{store}} - -Pruning failed: {{error}} - - -Please visit the web interface for further details: - - - -"###; - -const PACKAGE_UPDATES_TEMPLATE: &str = r###" -Proxmox Backup Server has the following updates available: -{{#each updates }} - {{Package}}: {{OldVersion}} -> {{Version~}} -{{/each }} - -To upgrade visit the web interface: - - - -"###; - -const TAPE_BACKUP_OK_TEMPLATE: &str = r###" - -{{#if id ~}} -Job ID: {{id}} -{{/if~}} -Datastore: {{job.store}} -Tape Pool: {{job.pool}} -Tape Drive: {{job.drive}} - -{{#if snapshot-list ~}} -Snapshots included: - -{{#each snapshot-list~}} -{{this}} -{{/each~}} -{{/if}} -Duration: {{duration}} -{{#if used-tapes }} -Used Tapes: -{{#each used-tapes~}} -{{this}} -{{/each~}} -{{/if}} -Tape Backup successful. - - -Please visit the web interface for further details: - - - -"###; - -const TAPE_BACKUP_ERR_TEMPLATE: &str = r###" - -{{#if id ~}} -Job ID: {{id}} -{{/if~}} -Datastore: {{job.store}} -Tape Pool: {{job.pool}} -Tape Drive: {{job.drive}} - -{{#if snapshot-list ~}} -Snapshots included: - -{{#each snapshot-list~}} -{{this}} -{{/each~}} -{{/if}} -{{#if used-tapes }} -Used Tapes: -{{#each used-tapes~}} -{{this}} -{{/each~}} -{{/if}} -Tape Backup failed: {{error}} - - -Please visit the web interface for further details: - - - -"###; - -const ACME_CERTIFICATE_ERR_RENEWAL: &str = r###" - -Proxmox Backup Server was not able to renew a TLS certificate. - -Error: {{error}} - -Please visit the web interface for further details: - - - -"###; - -lazy_static::lazy_static! { - - static ref HANDLEBARS: Handlebars<'static> = { - let mut hb = Handlebars::new(); - let result: Result<(), TemplateError> = try_block!({ - - hb.set_strict_mode(true); - hb.register_escape_fn(handlebars::no_escape); - - hb.register_helper("human-bytes", Box::new(handlebars_humam_bytes_helper)); - hb.register_helper("relative-percentage", Box::new(handlebars_relative_percentage_helper)); - - hb.register_template_string("gc_ok_template", GC_OK_TEMPLATE)?; - hb.register_template_string("gc_err_template", GC_ERR_TEMPLATE)?; - - hb.register_template_string("verify_ok_template", VERIFY_OK_TEMPLATE)?; - hb.register_template_string("verify_err_template", VERIFY_ERR_TEMPLATE)?; - - hb.register_template_string("sync_ok_template", SYNC_OK_TEMPLATE)?; - hb.register_template_string("sync_err_template", SYNC_ERR_TEMPLATE)?; - - hb.register_template_string("prune_ok_template", PRUNE_OK_TEMPLATE)?; - hb.register_template_string("prune_err_template", PRUNE_ERR_TEMPLATE)?; - - hb.register_template_string("tape_backup_ok_template", TAPE_BACKUP_OK_TEMPLATE)?; - hb.register_template_string("tape_backup_err_template", TAPE_BACKUP_ERR_TEMPLATE)?; - - hb.register_template_string("package_update_template", PACKAGE_UPDATES_TEMPLATE)?; - - hb.register_template_string("certificate_renewal_err_template", ACME_CERTIFICATE_ERR_RENEWAL)?; - - Ok(()) - }); - - if let Err(err) = result { - eprintln!("error during template registration: {err}"); - } - - hb - }; -} - -/// Summary of a successful Tape Job -#[derive(Default)] -pub struct TapeBackupJobSummary { - /// The list of snaphots backed up - pub snapshot_list: Vec, - /// The total time of the backup job - pub duration: std::time::Duration, - /// The labels of the used tapes of the backup job - pub used_tapes: Option>, -} - -fn send_job_status_mail(email: &str, subject: &str, text: &str) -> Result<(), Error> { - let (config, _) = crate::config::node::config()?; - let from = config.email_from; - - // NOTE: some (web)mailers have big problems displaying text mails, so include html as well - let escaped_text = handlebars::html_escape(text); - let html = format!("

\n{escaped_text}\n
");
-
-    let nodename = proxmox_sys::nodename();
-
-    let author = format!("Proxmox Backup Server - {nodename}");
-
-    sendmail(
-        &[email],
-        subject,
-        Some(text),
-        Some(&html),
-        from.as_deref(),
-        Some(&author),
-    )?;
-
-    Ok(())
-}
-
-pub fn send_gc_status(
-    email: &str,
-    notify: DatastoreNotify,
-    datastore: &str,
-    status: &GarbageCollectionStatus,
-    result: &Result<(), Error>,
-) -> Result<(), Error> {
-    match notify.gc {
-        None => { /* send notifications by default */ }
-        Some(notify) => {
-            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
-                return Ok(());
-            }
-        }
-    }
-
-    let (fqdn, port) = get_server_url();
-    let mut data = json!({
-        "datastore": datastore,
-        "fqdn": fqdn,
-        "port": port,
-    });
-
-    let text = match result {
-        Ok(()) => {
-            let deduplication_factor = if status.disk_bytes > 0 {
-                (status.index_data_bytes as f64) / (status.disk_bytes as f64)
-            } else {
-                1.0
-            };
-
-            data["status"] = json!(status);
-            data["deduplication-factor"] = format!("{:.2}", deduplication_factor).into();
-
-            HANDLEBARS.render("gc_ok_template", &data)?
-        }
-        Err(err) => {
-            data["error"] = err.to_string().into();
-            HANDLEBARS.render("gc_err_template", &data)?
-        }
-    };
-
-    let subject = match result {
-        Ok(()) => format!("Garbage Collect Datastore '{datastore}' successful"),
-        Err(_) => format!("Garbage Collect Datastore '{datastore}' failed"),
-    };
-
-    send_job_status_mail(email, &subject, &text)?;
-
-    Ok(())
-}
-
-pub fn send_verify_status(
-    email: &str,
-    notify: DatastoreNotify,
-    job: VerificationJobConfig,
-    result: &Result, Error>,
-) -> Result<(), Error> {
-    let (fqdn, port) = get_server_url();
-    let mut data = json!({
-        "job": job,
-        "fqdn": fqdn,
-        "port": port,
-    });
-
-    let mut result_is_ok = false;
-
-    let text = match result {
-        Ok(errors) if errors.is_empty() => {
-            result_is_ok = true;
-            HANDLEBARS.render("verify_ok_template", &data)?
-        }
-        Ok(errors) => {
-            data["errors"] = json!(errors);
-            HANDLEBARS.render("verify_err_template", &data)?
-        }
-        Err(_) => {
-            // aborted job - do not send any email
-            return Ok(());
-        }
-    };
-
-    match notify.verify {
-        None => { /* send notifications by default */ }
-        Some(notify) => {
-            if notify == Notify::Never || (result_is_ok && notify == Notify::Error) {
-                return Ok(());
-            }
-        }
-    }
-
-    let subject = match result {
-        Ok(errors) if errors.is_empty() => format!("Verify Datastore '{}' successful", job.store),
-        _ => format!("Verify Datastore '{}' failed", job.store),
-    };
-
-    send_job_status_mail(email, &subject, &text)?;
-
-    Ok(())
-}
-
-pub fn send_prune_status(
-    store: &str,
-    jobname: &str,
-    result: &Result<(), Error>,
-) -> Result<(), Error> {
-    let (email, notify) = match lookup_datastore_notify_settings(store) {
-        (Some(email), notify) => (email, notify),
-        (None, _) => return Ok(()),
-    };
-
-    let notify_prune = notify.prune.unwrap_or(Notify::Error);
-    if notify_prune == Notify::Never || (result.is_ok() && notify_prune == Notify::Error) {
-        return Ok(());
-    }
-
-    let (fqdn, port) = get_server_url();
-    let mut data = json!({
-        "jobname": jobname,
-        "store": store,
-        "fqdn": fqdn,
-        "port": port,
-    });
-
-    let text = match result {
-        Ok(()) => HANDLEBARS.render("prune_ok_template", &data)?,
-        Err(err) => {
-            data["error"] = err.to_string().into();
-            HANDLEBARS.render("prune_err_template", &data)?
-        }
-    };
-
-    let subject = match result {
-        Ok(()) => format!("Pruning datastore '{store}' successful"),
-        Err(_) => format!("Pruning datastore '{store}' failed"),
-    };
-
-    send_job_status_mail(&email, &subject, &text)?;
-
-    Ok(())
-}
-
-pub fn send_sync_status(
-    email: &str,
-    notify: DatastoreNotify,
-    job: &SyncJobConfig,
-    result: &Result<(), Error>,
-) -> Result<(), Error> {
-    match notify.sync {
-        None => { /* send notifications by default */ }
-        Some(notify) => {
-            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
-                return Ok(());
-            }
-        }
-    }
-
-    let (fqdn, port) = get_server_url();
-    let mut data = json!({
-        "job": job,
-        "fqdn": fqdn,
-        "port": port,
-    });
-
-    let text = match result {
-        Ok(()) => HANDLEBARS.render("sync_ok_template", &data)?,
-        Err(err) => {
-            data["error"] = err.to_string().into();
-            HANDLEBARS.render("sync_err_template", &data)?
-        }
-    };
-
-    let tmp_src_string;
-    let source_str = if let Some(remote) = &job.remote {
-        tmp_src_string = format!("Sync remote '{}'", remote);
-        &tmp_src_string
-    } else {
-        "Sync local"
-    };
-
-    let subject = match result {
-        Ok(()) => format!("{} datastore '{}' successful", source_str, job.remote_store,),
-        Err(_) => format!("{} datastore '{}' failed", source_str, job.remote_store,),
-    };
-
-    send_job_status_mail(email, &subject, &text)?;
-
-    Ok(())
-}
-
-pub fn send_tape_backup_status(
-    email: &str,
-    id: Option<&str>,
-    job: &TapeBackupJobSetup,
-    result: &Result<(), Error>,
-    summary: TapeBackupJobSummary,
-) -> Result<(), Error> {
-    let (fqdn, port) = get_server_url();
-    let duration: proxmox_time::TimeSpan = summary.duration.into();
-    let mut data = json!({
-        "job": job,
-        "fqdn": fqdn,
-        "port": port,
-        "id": id,
-        "snapshot-list": summary.snapshot_list,
-        "used-tapes": summary.used_tapes,
-        "duration": duration.to_string(),
-    });
-
-    let text = match result {
-        Ok(()) => HANDLEBARS.render("tape_backup_ok_template", &data)?,
-        Err(err) => {
-            data["error"] = err.to_string().into();
-            HANDLEBARS.render("tape_backup_err_template", &data)?
-        }
-    };
-
-    let subject = match (result, id) {
-        (Ok(()), Some(id)) => format!("Tape Backup '{id}' datastore '{}' successful", job.store,),
-        (Ok(()), None) => format!("Tape Backup datastore '{}' successful", job.store,),
-        (Err(_), Some(id)) => format!("Tape Backup '{id}' datastore '{}' failed", job.store,),
-        (Err(_), None) => format!("Tape Backup datastore '{}' failed", job.store,),
-    };
-
-    send_job_status_mail(email, &subject, &text)?;
-
-    Ok(())
-}
-
-/// Send email to a person to request a manual media change
-pub fn send_load_media_email(
-    changer: bool,
-    device: &str,
-    label_text: &str,
-    to: &str,
-    reason: Option,
-) -> Result<(), Error> {
-    use std::fmt::Write as _;
-
-    let device_type = if changer { "changer" } else { "drive" };
-
-    let subject = format!("Load Media '{label_text}' request for {device_type} '{device}'");
-
-    let mut text = String::new();
-
-    if let Some(reason) = reason {
-        let _ = write!(
-            text,
-            "The {device_type} has the wrong or no tape(s) inserted. Error:\n{reason}\n\n"
-        );
-    }
-
-    if changer {
-        text.push_str("Please insert the requested media into the changer.\n\n");
-        let _ = writeln!(text, "Changer: {device}");
-    } else {
-        text.push_str("Please insert the requested media into the backup drive.\n\n");
-        let _ = writeln!(text, "Drive: {device}");
-    }
-    let _ = writeln!(text, "Media: {label_text}");
-
-    send_job_status_mail(to, &subject, &text)
-}
-
-fn get_server_url() -> (String, usize) {
-    // user will surely request that they can change this
-
-    let nodename = proxmox_sys::nodename();
-    let mut fqdn = nodename.to_owned();
-
-    if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
-        if let Some(search) = resolv_conf["search"].as_str() {
-            fqdn.push('.');
-            fqdn.push_str(search);
-        }
-    }
-
-    let port = 8007;
-
-    (fqdn, port)
-}
-
-pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
-    // update mails always go to the root@pam configured email..
-    if let Some(email) = lookup_user_email(Userid::root_userid()) {
-        let nodename = proxmox_sys::nodename();
-        let subject = format!("New software packages available ({nodename})");
-
-        let (fqdn, port) = get_server_url();
-
-        let text = HANDLEBARS.render(
-            "package_update_template",
-            &json!({
-                "fqdn": fqdn,
-                "port": port,
-                "updates": updates,
-            }),
-        )?;
-
-        send_job_status_mail(&email, &subject, &text)?;
-    }
-    Ok(())
-}
-
-/// send email on certificate renewal failure.
-pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
-    let error: String = match result {
-        Err(e) => e.to_string(),
-        _ => return Ok(()),
-    };
-
-    if let Some(email) = lookup_user_email(Userid::root_userid()) {
-        let (fqdn, port) = get_server_url();
-
-        let text = HANDLEBARS.render(
-            "certificate_renewal_err_template",
-            &json!({
-                "fqdn": fqdn,
-                "port": port,
-                "error": error,
-            }),
-        )?;
-
-        let subject = "Could not renew certificate";
-
-        send_job_status_mail(&email, subject, &text)?;
-    }
-
-    Ok(())
-}
-
-/// Lookup users email address
-pub fn lookup_user_email(userid: &Userid) -> Option {
-    if let Ok(user_config) = pbs_config::user::cached_config() {
-        if let Ok(user) = user_config.lookup::("user", userid.as_str()) {
-            return user.email;
-        }
-    }
-
-    None
-}
-
-/// Lookup Datastore notify settings
-pub fn lookup_datastore_notify_settings(store: &str) -> (Option, DatastoreNotify) {
-    let mut email = None;
-
-    let notify = DatastoreNotify {
-        gc: None,
-        verify: None,
-        sync: None,
-        prune: None,
-    };
-
-    let (config, _digest) = match pbs_config::datastore::config() {
-        Ok(result) => result,
-        Err(_) => return (email, notify),
-    };
-
-    let config: DataStoreConfig = match config.lookup("datastore", store) {
-        Ok(result) => result,
-        Err(_) => return (email, notify),
-    };
-
-    email = match config.notify_user {
-        Some(ref userid) => lookup_user_email(userid),
-        None => lookup_user_email(Userid::root_userid()),
-    };
-
-    let notify_str = config.notify.unwrap_or_default();
-
-    if let Ok(value) = DatastoreNotify::API_SCHEMA.parse_property_string(¬ify_str) {
-        if let Ok(notify) = serde_json::from_value(value) {
-            return (email, notify);
-        }
-    }
-
-    (email, notify)
-}
-
-// Handlerbar helper functions
-
-fn handlebars_humam_bytes_helper(
-    h: &Helper,
-    _: &Handlebars,
-    _: &Context,
-    _rc: &mut RenderContext,
-    out: &mut dyn Output,
-) -> HelperResult {
-    let param = h
-        .param(0)
-        .and_then(|v| v.value().as_u64())
-        .ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
-
-    out.write(&HumanByte::from(param).to_string())?;
-
-    Ok(())
-}
-
-fn handlebars_relative_percentage_helper(
-    h: &Helper,
-    _: &Handlebars,
-    _: &Context,
-    _rc: &mut RenderContext,
-    out: &mut dyn Output,
-) -> HelperResult {
-    let param0 = h
-        .param(0)
-        .and_then(|v| v.value().as_f64())
-        .ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
-    let param1 = h
-        .param(1)
-        .and_then(|v| v.value().as_f64())
-        .ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
-
-    if param1 == 0.0 {
-        out.write("-")?;
-    } else {
-        out.write(&format!("{:.2}%", (param0 * 100.0) / param1))?;
-    }
-    Ok(())
-}
-
-#[test]
-fn test_template_register() {
-    HANDLEBARS.get_helper("human-bytes").unwrap();
-    HANDLEBARS.get_helper("relative-percentage").unwrap();
-
-    assert!(HANDLEBARS.has_template("gc_ok_template"));
-    assert!(HANDLEBARS.has_template("gc_err_template"));
-
-    assert!(HANDLEBARS.has_template("verify_ok_template"));
-    assert!(HANDLEBARS.has_template("verify_err_template"));
-
-    assert!(HANDLEBARS.has_template("sync_ok_template"));
-    assert!(HANDLEBARS.has_template("sync_err_template"));
-
-    assert!(HANDLEBARS.has_template("tape_backup_ok_template"));
-    assert!(HANDLEBARS.has_template("tape_backup_err_template"));
-
-    assert!(HANDLEBARS.has_template("package_update_template"));
-
-    assert!(HANDLEBARS.has_template("certificate_renewal_err_template"));
-}
diff --git a/src/server/gc_job.rs b/src/server/gc_job.rs
index 41375d72c..648350286 100644
--- a/src/server/gc_job.rs
+++ b/src/server/gc_job.rs
@@ -1,7 +1,7 @@
 use anyhow::Error;
 use std::sync::Arc;
 
-use proxmox_sys::task_log;
+use tracing::info;
 
 use pbs_api_types::Authid;
 use pbs_datastore::DataStore;
@@ -19,8 +19,6 @@ pub fn do_garbage_collection_job(
 ) -> Result {
     let store = datastore.name().to_string();
 
-    let (email, notify) = crate::server::lookup_datastore_notify_settings(&store);
-
     let worker_type = job.jobtype().to_string();
     let upid_str = WorkerTask::new_thread(
         &worker_type,
@@ -30,9 +28,9 @@ pub fn do_garbage_collection_job(
         move |worker| {
             job.start(&worker.upid().to_string())?;
 
-            task_log!(worker, "starting garbage collection on store {store}");
+            info!("starting garbage collection on store {store}");
             if let Some(event_str) = schedule {
-                task_log!(worker, "task triggered by schedule '{event_str}'");
+                info!("task triggered by schedule '{event_str}'");
             }
 
             let result = datastore.garbage_collection(&*worker, worker.upid());
@@ -43,11 +41,9 @@ pub fn do_garbage_collection_job(
                 eprintln!("could not finish job state for {}: {err}", job.jobtype());
             }
 
-            if let Some(email) = email {
-                let gc_status = datastore.last_gc_status();
-                if let Err(err) = send_gc_status(&email, notify, &store, &gc_status, &result) {
-                    eprintln!("send gc notification failed: {err}");
-                }
+            let gc_status = datastore.last_gc_status();
+            if let Err(err) = send_gc_status(&store, &gc_status, &result) {
+                eprintln!("send gc notification failed: {err}");
             }
 
             result
diff --git a/src/server/metric_collection/metric_server.rs b/src/server/metric_collection/metric_server.rs
new file mode 100644
index 000000000..ba20628a0
--- /dev/null
+++ b/src/server/metric_collection/metric_server.rs
@@ -0,0 +1,156 @@
+use std::sync::Arc;
+
+use anyhow::Error;
+use serde_json::{json, Value};
+
+use proxmox_metrics::MetricsData;
+
+use super::{DiskStat, HostStats};
+
+pub async fn send_data_to_metric_servers(
+    stats: Arc<(HostStats, DiskStat, Vec)>,
+) -> Result<(), Error> {
+    let (config, _digest) = pbs_config::metrics::config()?;
+    let channel_list = get_metric_server_connections(config)?;
+
+    if channel_list.is_empty() {
+        return Ok(());
+    }
+
+    let ctime = proxmox_time::epoch_i64();
+    let nodename = proxmox_sys::nodename();
+
+    let mut values = Vec::new();
+
+    let mut cpuvalue = match &stats.0.proc {
+        Some(stat) => serde_json::to_value(stat)?,
+        None => json!({}),
+    };
+
+    if let Some(loadavg) = &stats.0.load {
+        cpuvalue["avg1"] = Value::from(loadavg.0);
+        cpuvalue["avg5"] = Value::from(loadavg.1);
+        cpuvalue["avg15"] = Value::from(loadavg.2);
+    }
+
+    values.push(Arc::new(
+        MetricsData::new("cpustat", ctime, cpuvalue)?
+            .tag("object", "host")
+            .tag("host", nodename),
+    ));
+
+    if let Some(stat) = &stats.0.meminfo {
+        values.push(Arc::new(
+            MetricsData::new("memory", ctime, stat)?
+                .tag("object", "host")
+                .tag("host", nodename),
+        ));
+    }
+
+    if let Some(netdev) = &stats.0.net {
+        for item in netdev {
+            values.push(Arc::new(
+                MetricsData::new("nics", ctime, item)?
+                    .tag("object", "host")
+                    .tag("host", nodename)
+                    .tag("instance", item.device.clone()),
+            ));
+        }
+    }
+
+    values.push(Arc::new(
+        MetricsData::new("blockstat", ctime, stats.1.to_value())?
+            .tag("object", "host")
+            .tag("host", nodename),
+    ));
+
+    for datastore in stats.2.iter() {
+        values.push(Arc::new(
+            MetricsData::new("blockstat", ctime, datastore.to_value())?
+                .tag("object", "host")
+                .tag("host", nodename)
+                .tag("datastore", datastore.name.clone()),
+        ));
+    }
+
+    // we must have a concrete functions, because the inferred lifetime from a
+    // closure is not general enough for the tokio::spawn call we are in here...
+    fn map_fn(item: &(proxmox_metrics::Metrics, String)) -> &proxmox_metrics::Metrics {
+        &item.0
+    }
+
+    let results =
+        proxmox_metrics::send_data_to_channels(&values, channel_list.iter().map(map_fn)).await;
+    for (res, name) in results
+        .into_iter()
+        .zip(channel_list.iter().map(|(_, name)| name))
+    {
+        if let Err(err) = res {
+            log::error!("error sending into channel of {name}: {err}");
+        }
+    }
+
+    futures::future::join_all(channel_list.into_iter().map(|(channel, name)| async move {
+        if let Err(err) = channel.join().await {
+            log::error!("error sending to metric server {name}: {err}");
+        }
+    }))
+    .await;
+
+    Ok(())
+}
+
+/// Get the metric server connections from a config
+fn get_metric_server_connections(
+    metric_config: proxmox_section_config::SectionConfigData,
+) -> Result, Error> {
+    let mut res = Vec::new();
+
+    for config in
+        metric_config.convert_to_typed_array::("influxdb-udp")?
+    {
+        if !config.enable {
+            continue;
+        }
+        let future = proxmox_metrics::influxdb_udp(&config.host, config.mtu);
+        res.push((future, config.name));
+    }
+
+    for config in
+        metric_config.convert_to_typed_array::("influxdb-http")?
+    {
+        if !config.enable {
+            continue;
+        }
+        let future = proxmox_metrics::influxdb_http(
+            &config.url,
+            config.organization.as_deref().unwrap_or("proxmox"),
+            config.bucket.as_deref().unwrap_or("proxmox"),
+            config.token.as_deref(),
+            config.verify_tls.unwrap_or(true),
+            config.max_body_size.unwrap_or(25_000_000),
+        )?;
+        res.push((future, config.name));
+    }
+    Ok(res)
+}
+
+impl DiskStat {
+    fn to_value(&self) -> Value {
+        let mut value = json!({});
+        if let Some(usage) = &self.usage {
+            value["total"] = Value::from(usage.total);
+            value["used"] = Value::from(usage.used);
+            value["avail"] = Value::from(usage.available);
+        }
+
+        if let Some(dev) = &self.dev {
+            value["read_ios"] = Value::from(dev.read_ios);
+            value["read_bytes"] = Value::from(dev.read_sectors * 512);
+            value["write_ios"] = Value::from(dev.write_ios);
+            value["write_bytes"] = Value::from(dev.write_sectors * 512);
+            value["io_ticks"] = Value::from(dev.io_ticks / 1000);
+        }
+        value
+    }
+}
diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs
new file mode 100644
index 000000000..3cbd74256
--- /dev/null
+++ b/src/server/metric_collection/mod.rs
@@ -0,0 +1,235 @@
+use std::{
+    path::Path,
+    pin::pin,
+    sync::Arc,
+    time::{Duration, Instant},
+};
+
+use anyhow::Error;
+use tokio::join;
+
+use pbs_api_types::{DataStoreConfig, Operation};
+use proxmox_sys::{
+    fs::FileSystemInformation,
+    linux::procfs::{Loadavg, ProcFsMemInfo, ProcFsNetDev, ProcFsStat},
+};
+
+use crate::tools::disks::{zfs_dataset_stats, BlockDevStat, DiskManage};
+
+mod metric_server;
+pub(crate) mod pull_metrics;
+pub(crate) mod rrd;
+
+const METRIC_COLLECTION_INTERVAL: Duration = Duration::from_secs(10);
+
+/// Initialize the metric collection subsystem.
+///
+/// Any datapoints in the RRD journal will be committed.
+pub fn init() -> Result<(), Error> {
+    let rrd_cache = rrd::init()?;
+    rrd_cache.apply_journal()?;
+
+    pull_metrics::init()?;
+
+    Ok(())
+}
+
+/// Spawns a tokio task for regular metric collection.
+///
+/// Every 10 seconds, host and disk stats will be collected and
+///   - stored in the RRD
+///   - sent to any configured metric servers
+pub fn start_collection_task() {
+    tokio::spawn(async {
+        let abort_future = pin!(proxmox_daemon::shutdown_future());
+        let future = pin!(run_stat_generator());
+        futures::future::select(future, abort_future).await;
+    });
+}
+
+async fn run_stat_generator() {
+    loop {
+        let delay_target = Instant::now() + METRIC_COLLECTION_INTERVAL;
+
+        let stats_future = tokio::task::spawn_blocking(|| {
+            let hoststats = collect_host_stats_sync();
+            let (hostdisk, datastores) = collect_disk_stats_sync();
+            Arc::new((hoststats, hostdisk, datastores))
+        });
+        let stats = match stats_future.await {
+            Ok(res) => res,
+            Err(err) => {
+                log::error!("collecting host stats panicked: {err}");
+                tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
+                continue;
+            }
+        };
+
+        let rrd_future = tokio::task::spawn_blocking({
+            let stats = Arc::clone(&stats);
+            move || {
+                rrd::update_metrics(&stats.0, &stats.1, &stats.2);
+                rrd::sync_journal();
+            }
+        });
+        let pull_metric_future = tokio::task::spawn_blocking({
+            let stats = Arc::clone(&stats);
+            move || {
+                pull_metrics::update_metrics(&stats.0, &stats.1, &stats.2)?;
+                Ok::<(), Error>(())
+            }
+        });
+
+        let metrics_future = metric_server::send_data_to_metric_servers(stats);
+
+        let (rrd_res, metrics_res, pull_metrics_res) =
+            join!(rrd_future, metrics_future, pull_metric_future);
+        if let Err(err) = rrd_res {
+            log::error!("rrd update panicked: {err}");
+        }
+        if let Err(err) = metrics_res {
+            log::error!("error during metrics sending: {err}");
+        }
+        if let Err(err) = pull_metrics_res {
+            log::error!("error caching pull-style metrics: {err}");
+        }
+
+        tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
+    }
+}
+
+struct HostStats {
+    proc: Option,
+    meminfo: Option,
+    net: Option>,
+    load: Option,
+}
+
+struct DiskStat {
+    name: String,
+    usage: Option,
+    dev: Option,
+}
+
+fn collect_host_stats_sync() -> HostStats {
+    use proxmox_sys::linux::procfs::{
+        read_loadavg, read_meminfo, read_proc_net_dev, read_proc_stat,
+    };
+
+    let proc = match read_proc_stat() {
+        Ok(stat) => Some(stat),
+        Err(err) => {
+            eprintln!("read_proc_stat failed - {err}");
+            None
+        }
+    };
+
+    let meminfo = match read_meminfo() {
+        Ok(stat) => Some(stat),
+        Err(err) => {
+            eprintln!("read_meminfo failed - {err}");
+            None
+        }
+    };
+
+    let net = match read_proc_net_dev() {
+        Ok(netdev) => Some(netdev),
+        Err(err) => {
+            eprintln!("read_prox_net_dev failed - {err}");
+            None
+        }
+    };
+
+    let load = match read_loadavg() {
+        Ok(loadavg) => Some(loadavg),
+        Err(err) => {
+            eprintln!("read_loadavg failed - {err}");
+            None
+        }
+    };
+
+    HostStats {
+        proc,
+        meminfo,
+        net,
+        load,
+    }
+}
+
+fn collect_disk_stats_sync() -> (DiskStat, Vec) {
+    let disk_manager = DiskManage::new();
+
+    let root = gather_disk_stats(disk_manager.clone(), Path::new("/"), "host");
+
+    let mut datastores = Vec::new();
+    match pbs_config::datastore::config() {
+        Ok((config, _)) => {
+            let datastore_list: Vec = config
+                .convert_to_typed_array("datastore")
+                .unwrap_or_default();
+
+            for config in datastore_list {
+                if config
+                    .get_maintenance_mode()
+                    .map_or(false, |mode| mode.check(Some(Operation::Read)).is_err())
+                {
+                    continue;
+                }
+                let path = Path::new(&config.path);
+                datastores.push(gather_disk_stats(disk_manager.clone(), path, &config.name));
+            }
+        }
+        Err(err) => {
+            eprintln!("read datastore config failed - {err}");
+        }
+    }
+
+    (root, datastores)
+}
+
+fn gather_disk_stats(disk_manager: Arc, path: &Path, name: &str) -> DiskStat {
+    let usage = match proxmox_sys::fs::fs_info(path) {
+        Ok(status) => Some(status),
+        Err(err) => {
+            eprintln!("read fs info on {path:?} failed - {err}");
+            None
+        }
+    };
+
+    let dev = match disk_manager.find_mounted_device(path) {
+        Ok(None) => None,
+        Ok(Some((fs_type, device, source))) => {
+            let mut device_stat = None;
+            match (fs_type.as_str(), source) {
+                ("zfs", Some(source)) => match source.into_string() {
+                    Ok(dataset) => match zfs_dataset_stats(&dataset) {
+                        Ok(stat) => device_stat = Some(stat),
+                        Err(err) => eprintln!("zfs_dataset_stats({dataset:?}) failed - {err}"),
+                    },
+                    Err(source) => {
+                        eprintln!("zfs_pool_stats({source:?}) failed - invalid characters")
+                    }
+                },
+                _ => {
+                    if let Ok(disk) = disk_manager.clone().disk_by_dev_num(device.into_dev_t()) {
+                        match disk.read_stat() {
+                            Ok(stat) => device_stat = stat,
+                            Err(err) => eprintln!("disk.read_stat {path:?} failed - {err}"),
+                        }
+                    }
+                }
+            }
+            device_stat
+        }
+        Err(err) => {
+            eprintln!("find_mounted_device failed - {err}");
+            None
+        }
+    };
+
+    DiskStat {
+        name: name.to_string(),
+        usage,
+        dev,
+    }
+}
diff --git a/src/server/metric_collection/pull_metrics.rs b/src/server/metric_collection/pull_metrics.rs
new file mode 100644
index 000000000..1b5f37773
--- /dev/null
+++ b/src/server/metric_collection/pull_metrics.rs
@@ -0,0 +1,185 @@
+use std::{path::Path, sync::OnceLock, time::Duration};
+
+use anyhow::{format_err, Error};
+
+use nix::sys::stat::Mode;
+use pbs_api_types::{
+    MetricDataPoint,
+    MetricDataType::{self, Derive, Gauge},
+};
+use pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR;
+use proxmox_shared_cache::SharedCache;
+use proxmox_sys::fs::CreateOptions;
+use serde::{Deserialize, Serialize};
+
+use super::{DiskStat, HostStats, METRIC_COLLECTION_INTERVAL};
+
+const METRIC_CACHE_TIME: Duration = Duration::from_secs(30 * 60);
+const STORED_METRIC_GENERATIONS: u64 =
+    METRIC_CACHE_TIME.as_secs() / METRIC_COLLECTION_INTERVAL.as_secs();
+
+static METRIC_CACHE: OnceLock = OnceLock::new();
+
+/// Initialize the metric cache.
+pub(super) fn init() -> Result<(), Error> {
+    let backup_user = pbs_config::backup_user()?;
+    let file_opts = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid)
+        .perm(Mode::from_bits_truncate(0o660));
+
+    let cache_location = Path::new(PROXMOX_BACKUP_RUN_DIR).join("metrics");
+
+    let cache = SharedCache::new(cache_location, file_opts, STORED_METRIC_GENERATIONS as u32)?;
+
+    METRIC_CACHE
+        .set(cache)
+        .map_err(|_e| format_err!("metric cache already initialized"))?;
+
+    Ok(())
+}
+
+/// Return most recent metrics
+///
+/// If the metric collection loop has no produced any metrics yet, an empty
+/// `Vec` is returned. Returns an error if the cache could not be accessed.
+pub fn get_most_recent_metrics() -> Result, Error> {
+    let cached_datapoints: Option = get_cache()?.get()?;
+    let mut points = cached_datapoints.map(|r| r.datapoints).unwrap_or_default();
+
+    points.sort_unstable_by_key(|p| p.timestamp);
+
+    Ok(points)
+}
+
+/// Return all cached metrics with a `timestamp > start_time`
+///
+/// If the metric collection loop has no produced any metrics yet, an empty
+/// `Vec` is returned. Returns an error if the cache could not be accessed.
+pub fn get_all_metrics(start_time: i64) -> Result, Error> {
+    let now = proxmox_time::epoch_i64();
+
+    let delta = now - start_time;
+
+    if delta < 0 {
+        // start-time in the future, no metrics for you
+        return Ok(Vec::new());
+    }
+
+    let generations = delta / (METRIC_COLLECTION_INTERVAL.as_secs() as i64);
+    let generations = generations.clamp(0, STORED_METRIC_GENERATIONS as i64);
+
+    let cached_datapoints: Vec = get_cache()?.get_last(generations as u32)?;
+
+    let mut points = Vec::new();
+
+    for gen in cached_datapoints {
+        if gen.timestamp > start_time {
+            points.extend(gen.datapoints);
+        }
+    }
+
+    points.sort_unstable_by_key(|p| p.timestamp);
+
+    Ok(points)
+}
+
+/// Convert `DiskStat` `HostStat` into a universal metric data point and cache
+/// them for a later retrieval.
+pub(super) fn update_metrics(
+    host: &HostStats,
+    hostdisk: &DiskStat,
+    datastores: &[DiskStat],
+) -> Result<(), Error> {
+    let mut points = MetricDataPoints::new(proxmox_time::epoch_i64());
+
+    // Using the same metric names as in PVE's new /cluster/metrics/export endpoint
+    if let Some(stat) = &host.proc {
+        points.add(Gauge, "host", "cpu_current", stat.cpu);
+        points.add(Gauge, "host", "cpu_iowait", stat.iowait_percent);
+    }
+
+    if let Some(loadavg) = &host.load {
+        points.add(Gauge, "host", "cpu_avg1", loadavg.0);
+        points.add(Gauge, "host", "cpu_avg5", loadavg.1);
+        points.add(Gauge, "host", "cpu_avg15", loadavg.2);
+    }
+
+    if let Some(meminfo) = &host.meminfo {
+        points.add(Gauge, "host", "mem_total", meminfo.memtotal as f64);
+        points.add(Gauge, "host", "mem_used", meminfo.memused as f64);
+        points.add(Gauge, "host", "swap_total", meminfo.swaptotal as f64);
+        points.add(Gauge, "host", "swap_used", meminfo.swapused as f64);
+    }
+
+    if let Some(netdev) = &host.net {
+        use pbs_config::network::is_physical_nic;
+        let mut netin = 0;
+        let mut netout = 0;
+        for item in netdev {
+            if !is_physical_nic(&item.device) {
+                continue;
+            }
+            netin += item.receive;
+            netout += item.send;
+        }
+        points.add(Derive, "host", "net_in", netin as f64);
+        points.add(Derive, "host", "net_out", netout as f64);
+    }
+
+    update_disk_metrics(&mut points, hostdisk, "host");
+
+    for stat in datastores {
+        let id = format!("datastore/{}", stat.name);
+        update_disk_metrics(&mut points, stat, &id);
+    }
+
+    get_cache()?.set(&points, Duration::from_secs(2))?;
+
+    Ok(())
+}
+
+fn get_cache() -> Result<&'static SharedCache, Error> {
+    // Not using get_or_init here since initialization can fail.
+    METRIC_CACHE
+        .get()
+        .ok_or_else(|| format_err!("metric cache not initialized"))
+}
+
+fn update_disk_metrics(points: &mut MetricDataPoints, disk: &DiskStat, id: &str) {
+    if let Some(status) = &disk.usage {
+        points.add(Gauge, id, "disk_total", status.total as f64);
+        points.add(Gauge, id, "disk_used", status.used as f64);
+        points.add(Gauge, id, "disk_available", status.available as f64);
+    }
+
+    if let Some(stat) = &disk.dev {
+        points.add(Derive, id, "disk_read", (stat.read_sectors * 512) as f64);
+        points.add(Derive, id, "disk_write", (stat.write_sectors * 512) as f64);
+    }
+}
+
+#[derive(Serialize, Deserialize)]
+struct MetricDataPoints {
+    timestamp: i64,
+    datapoints: Vec,
+}
+
+impl MetricDataPoints {
+    fn new(timestamp: i64) -> Self {
+        Self {
+            datapoints: Vec::new(),
+            timestamp,
+        }
+    }
+
+    fn add(&mut self, ty: MetricDataType, id: &str, metric: &str, value: f64) {
+        self.datapoints.push(MetricDataPoint {
+            id: id.into(),
+            metric: metric.into(),
+            timestamp: self.timestamp,
+            ty,
+            value,
+        })
+    }
+}
diff --git a/src/server/metric_collection/rrd.rs b/src/server/metric_collection/rrd.rs
new file mode 100644
index 000000000..ed39cc94e
--- /dev/null
+++ b/src/server/metric_collection/rrd.rs
@@ -0,0 +1,215 @@
+//! Round Robin Database cache
+//!
+//! RRD files are stored under `/var/lib/proxmox-backup/rrdb/`. Only a
+//! single process may access and update those files, so we initialize
+//! and update RRD data inside `proxmox-backup-proxy`.
+
+use std::path::Path;
+
+use anyhow::{format_err, Error};
+use once_cell::sync::OnceCell;
+
+use proxmox_rrd::rrd::{AggregationFn, Archive, DataSourceType, Database};
+use proxmox_rrd::Cache;
+use proxmox_sys::fs::CreateOptions;
+
+use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
+use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
+
+use super::{DiskStat, HostStats};
+
+const RRD_CACHE_BASEDIR: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/rrdb");
+
+static RRD_CACHE: OnceCell = OnceCell::new();
+
+/// Get the RRD cache instance
+fn get_cache() -> Result<&'static Cache, Error> {
+    RRD_CACHE
+        .get()
+        .ok_or_else(|| format_err!("RRD cache not initialized!"))
+}
+
+/// Initialize the RRD cache instance
+///
+/// Note: Only a single process must do this (proxmox-backup-proxy)
+pub(super) fn init() -> Result<&'static Cache, Error> {
+    let backup_user = pbs_config::backup_user()?;
+
+    let file_options = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid);
+
+    let dir_options = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid);
+
+    let apply_interval = 30.0 * 60.0; // 30 minutes
+
+    let cache = Cache::new(
+        RRD_CACHE_BASEDIR,
+        Some(file_options),
+        Some(dir_options),
+        apply_interval,
+        load_callback,
+        create_callback,
+    )?;
+
+    RRD_CACHE
+        .set(cache)
+        .map_err(|_| format_err!("RRD cache already initialized!"))?;
+
+    Ok(RRD_CACHE.get().unwrap())
+}
+
+fn load_callback(path: &Path, _rel_path: &str) -> Option {
+    match Database::load(path, true) {
+        Ok(rrd) => Some(rrd),
+        Err(err) => {
+            if err.kind() != std::io::ErrorKind::NotFound {
+                log::warn!("overwriting RRD file {path:?}, because of load error: {err}",);
+            }
+            None
+        }
+    }
+}
+
+fn create_callback(dst: DataSourceType) -> Database {
+    let rra_list = vec![
+        // 1 min * 1440 => 1 day
+        Archive::new(AggregationFn::Average, 60, 1440),
+        Archive::new(AggregationFn::Maximum, 60, 1440),
+        // 30 min * 1440 => 30 days ~ 1 month
+        Archive::new(AggregationFn::Average, 30 * 60, 1440),
+        Archive::new(AggregationFn::Maximum, 30 * 60, 1440),
+        // 6 h * 1440 => 360 days ~ 1 year
+        Archive::new(AggregationFn::Average, 6 * 3600, 1440),
+        Archive::new(AggregationFn::Maximum, 6 * 3600, 1440),
+        // 1 week * 570 => 10 years
+        Archive::new(AggregationFn::Average, 7 * 86400, 570),
+        Archive::new(AggregationFn::Maximum, 7 * 86400, 570),
+    ];
+
+    Database::new(dst, rra_list)
+}
+
+/// Extracts data for the specified time frame from from RRD cache
+pub fn extract_rrd_data(
+    basedir: &str,
+    name: &str,
+    timeframe: RrdTimeframe,
+    mode: RrdMode,
+) -> Result, Error> {
+    let end = proxmox_time::epoch_f64() as u64;
+
+    let (start, resolution) = match timeframe {
+        RrdTimeframe::Hour => (end - 3600, 60),
+        RrdTimeframe::Day => (end - 3600 * 24, 60),
+        RrdTimeframe::Week => (end - 3600 * 24 * 7, 30 * 60),
+        RrdTimeframe::Month => (end - 3600 * 24 * 30, 30 * 60),
+        RrdTimeframe::Year => (end - 3600 * 24 * 365, 6 * 60 * 60),
+        RrdTimeframe::Decade => (end - 10 * 3600 * 24 * 366, 7 * 86400),
+    };
+
+    let cf = match mode {
+        RrdMode::Max => AggregationFn::Maximum,
+        RrdMode::Average => AggregationFn::Average,
+    };
+
+    let rrd_cache = get_cache()?;
+
+    rrd_cache.extract_cached_data(basedir, name, cf, resolution, Some(start), Some(end))
+}
+
+/// Sync/Flush the RRD journal
+pub(super) fn sync_journal() {
+    if let Ok(rrd_cache) = get_cache() {
+        if let Err(err) = rrd_cache.sync_journal() {
+            log::error!("rrd_sync_journal failed - {}", err);
+        }
+    }
+}
+/// Update RRD Gauge values
+fn update_gauge(name: &str, value: f64) {
+    if let Ok(rrd_cache) = get_cache() {
+        let now = proxmox_time::epoch_f64();
+        if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Gauge) {
+            log::error!("rrd::update_value '{}' failed - {}", name, err);
+        }
+    }
+}
+
+/// Update RRD Derive values
+fn update_derive(name: &str, value: f64) {
+    if let Ok(rrd_cache) = get_cache() {
+        let now = proxmox_time::epoch_f64();
+        if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Derive) {
+            log::error!("rrd::update_value '{}' failed - {}", name, err);
+        }
+    }
+}
+
+pub(super) fn update_metrics(host: &HostStats, hostdisk: &DiskStat, datastores: &[DiskStat]) {
+    if let Some(stat) = &host.proc {
+        update_gauge("host/cpu", stat.cpu);
+        update_gauge("host/iowait", stat.iowait_percent);
+    }
+
+    if let Some(meminfo) = &host.meminfo {
+        update_gauge("host/memtotal", meminfo.memtotal as f64);
+        update_gauge("host/memused", meminfo.memused as f64);
+        update_gauge("host/swaptotal", meminfo.swaptotal as f64);
+        update_gauge("host/swapused", meminfo.swapused as f64);
+    }
+
+    if let Some(netdev) = &host.net {
+        use pbs_config::network::is_physical_nic;
+        let mut netin = 0;
+        let mut netout = 0;
+        for item in netdev {
+            if !is_physical_nic(&item.device) {
+                continue;
+            }
+            netin += item.receive;
+            netout += item.send;
+        }
+        update_derive("host/netin", netin as f64);
+        update_derive("host/netout", netout as f64);
+    }
+
+    if let Some(loadavg) = &host.load {
+        update_gauge("host/loadavg", loadavg.0);
+    }
+
+    update_disk_metrics(hostdisk, "host");
+
+    for stat in datastores {
+        let rrd_prefix = format!("datastore/{}", stat.name);
+        update_disk_metrics(stat, &rrd_prefix);
+    }
+}
+
+fn update_disk_metrics(disk: &DiskStat, rrd_prefix: &str) {
+    if let Some(status) = &disk.usage {
+        let rrd_key = format!("{}/total", rrd_prefix);
+        update_gauge(&rrd_key, status.total as f64);
+        let rrd_key = format!("{}/used", rrd_prefix);
+        update_gauge(&rrd_key, status.used as f64);
+        let rrd_key = format!("{}/available", rrd_prefix);
+        update_gauge(&rrd_key, status.available as f64);
+    }
+
+    if let Some(stat) = &disk.dev {
+        let rrd_key = format!("{}/read_ios", rrd_prefix);
+        update_derive(&rrd_key, stat.read_ios as f64);
+        let rrd_key = format!("{}/read_bytes", rrd_prefix);
+        update_derive(&rrd_key, (stat.read_sectors * 512) as f64);
+
+        let rrd_key = format!("{}/write_ios", rrd_prefix);
+        update_derive(&rrd_key, stat.write_ios as f64);
+        let rrd_key = format!("{}/write_bytes", rrd_prefix);
+        update_derive(&rrd_key, (stat.write_sectors * 512) as f64);
+
+        let rrd_key = format!("{}/io_ticks", rrd_prefix);
+        update_derive(&rrd_key, (stat.io_ticks as f64) / 1000.0);
+    }
+}
diff --git a/src/server/mod.rs b/src/server/mod.rs
index 4e3b68ac9..2e40bde3c 100644
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -25,30 +25,33 @@ pub use gc_job::*;
 mod realm_sync_job;
 pub use realm_sync_job::*;
 
-mod email_notifications;
-pub use email_notifications::*;
+pub mod notifications;
+pub use notifications::*;
 
 mod report;
 pub use report::*;
 
 pub mod auth;
 
+pub mod metric_collection;
+
 pub(crate) mod pull;
+pub(crate) mod sync;
 
 pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
     let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
-    let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
+    let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid);
     let _: Value =
-        proxmox_rest_server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
+        proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"reload-certificate\"}\n")
             .await?;
     Ok(())
 }
 
 pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
     let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
-    let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
+    let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid);
     let _: Value =
-        proxmox_rest_server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
+        proxmox_daemon::command_socket::send_raw(sock, "{\"command\":\"datastore-removed\"}\n")
             .await?;
     Ok(())
 }
diff --git a/src/server/notifications.rs b/src/server/notifications.rs
new file mode 100644
index 000000000..eea552022
--- /dev/null
+++ b/src/server/notifications.rs
@@ -0,0 +1,566 @@
+use std::collections::HashMap;
+use std::path::Path;
+use std::time::{Duration, Instant};
+
+use anyhow::Error;
+use const_format::concatcp;
+use nix::unistd::Uid;
+use serde_json::json;
+
+use proxmox_notify::context::pbs::PBS_CONTEXT;
+use proxmox_schema::ApiType;
+use proxmox_sys::fs::{create_path, CreateOptions};
+
+use crate::tape::TapeNotificationMode;
+use pbs_api_types::{
+    APTUpdateInfo, DataStoreConfig, DatastoreNotify, GarbageCollectionStatus, NotificationMode,
+    Notify, SyncJobConfig, TapeBackupJobSetup, User, Userid, VerificationJobConfig,
+};
+use proxmox_notify::endpoints::sendmail::{SendmailConfig, SendmailEndpoint};
+use proxmox_notify::{Endpoint, Notification, Severity};
+
+const SPOOL_DIR: &str = concatcp!(pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR, "/notifications");
+
+/// Initialize the notification system by setting context in proxmox_notify
+pub fn init() -> Result<(), Error> {
+    proxmox_notify::context::set_context(&PBS_CONTEXT);
+    Ok(())
+}
+
+/// Create the directory which will be used to temporarily store notifications
+/// which were sent from an unprivileged process.
+pub fn create_spool_dir() -> Result<(), Error> {
+    let backup_user = pbs_config::backup_user()?;
+    let opts = CreateOptions::new()
+        .owner(backup_user.uid)
+        .group(backup_user.gid);
+
+    create_path(SPOOL_DIR, None, Some(opts))?;
+    Ok(())
+}
+
+async fn send_queued_notifications() -> Result<(), Error> {
+    let mut read_dir = tokio::fs::read_dir(SPOOL_DIR).await?;
+
+    let mut notifications = Vec::new();
+
+    while let Some(entry) = read_dir.next_entry().await? {
+        let path = entry.path();
+
+        if let Some(ext) = path.extension() {
+            if ext == "json" {
+                let p = path.clone();
+
+                let bytes = tokio::fs::read(p).await?;
+                let notification: Notification = serde_json::from_slice(&bytes)?;
+                notifications.push(notification);
+
+                // Currently, there is no retry-mechanism in case of failure...
+                // For retries, we'd have to keep track of which targets succeeded/failed
+                // to send, so we do not retry notifying a target which succeeded before.
+                tokio::fs::remove_file(path).await?;
+            }
+        }
+    }
+
+    // Make sure that we send the oldest notification first
+    notifications.sort_unstable_by_key(|n| n.timestamp());
+
+    let res = tokio::task::spawn_blocking(move || {
+        let config = pbs_config::notifications::config()?;
+        for notification in notifications {
+            if let Err(err) = proxmox_notify::api::common::send(&config, ¬ification) {
+                log::error!("failed to send notification: {err}");
+            }
+        }
+
+        Ok::<(), Error>(())
+    })
+    .await?;
+
+    if let Err(e) = res {
+        log::error!("could not read notification config: {e}");
+    }
+
+    Ok::<(), Error>(())
+}
+
+/// Worker task to periodically send any queued notifications.
+pub async fn notification_worker() {
+    loop {
+        let delay_target = Instant::now() + Duration::from_secs(5);
+
+        if let Err(err) = send_queued_notifications().await {
+            log::error!("notification worker task error: {err}");
+        }
+
+        tokio::time::sleep_until(tokio::time::Instant::from_std(delay_target)).await;
+    }
+}
+
+fn send_notification(notification: Notification) -> Result<(), Error> {
+    if nix::unistd::ROOT == Uid::current() {
+        let config = pbs_config::notifications::config()?;
+        proxmox_notify::api::common::send(&config, ¬ification)?;
+    } else {
+        let ser = serde_json::to_vec(¬ification)?;
+        let path = Path::new(SPOOL_DIR).join(format!("{id}.json", id = notification.id()));
+
+        let backup_user = pbs_config::backup_user()?;
+        let opts = CreateOptions::new()
+            .owner(backup_user.uid)
+            .group(backup_user.gid);
+        proxmox_sys::fs::replace_file(path, &ser, opts, true)?;
+        log::info!("queued notification (id={id})", id = notification.id())
+    }
+
+    Ok(())
+}
+
+fn send_sendmail_legacy_notification(notification: Notification, email: &str) -> Result<(), Error> {
+    let endpoint = SendmailEndpoint {
+        config: SendmailConfig {
+            mailto: vec![email.into()],
+            ..Default::default()
+        },
+    };
+
+    endpoint.send(¬ification)?;
+
+    Ok(())
+}
+
+/// Summary of a successful Tape Job
+#[derive(Default)]
+pub struct TapeBackupJobSummary {
+    /// The list of snaphots backed up
+    pub snapshot_list: Vec,
+    /// The total time of the backup job
+    pub duration: std::time::Duration,
+    /// The labels of the used tapes of the backup job
+    pub used_tapes: Option>,
+}
+
+pub fn send_gc_status(
+    datastore: &str,
+    status: &GarbageCollectionStatus,
+    result: &Result<(), Error>,
+) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let mut data = json!({
+        "datastore": datastore,
+        "fqdn": fqdn,
+        "port": port,
+    });
+
+    let (severity, template) = match result {
+        Ok(()) => {
+            let deduplication_factor = if status.disk_bytes > 0 {
+                (status.index_data_bytes as f64) / (status.disk_bytes as f64)
+            } else {
+                1.0
+            };
+
+            data["status"] = json!(status);
+            data["deduplication-factor"] = format!("{:.2}", deduplication_factor).into();
+
+            (Severity::Info, "gc-ok")
+        }
+        Err(err) => {
+            data["error"] = err.to_string().into();
+            (Severity::Error, "gc-err")
+        }
+    };
+    let metadata = HashMap::from([
+        ("datastore".into(), datastore.into()),
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "gc".into()),
+    ]);
+
+    let notification = Notification::from_template(severity, template, data, metadata);
+
+    let (email, notify, mode) = lookup_datastore_notify_settings(datastore);
+    match mode {
+        NotificationMode::LegacySendmail => {
+            let notify = notify.gc.unwrap_or(Notify::Always);
+
+            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
+                return Ok(());
+            }
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        NotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+pub fn send_verify_status(
+    job: VerificationJobConfig,
+    result: &Result, Error>,
+) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let mut data = json!({
+        "job": job,
+        "fqdn": fqdn,
+        "port": port,
+    });
+
+    let (template, severity) = match result {
+        Ok(errors) if errors.is_empty() => ("verify-ok", Severity::Info),
+        Ok(errors) => {
+            data["errors"] = json!(errors);
+            ("verify-err", Severity::Error)
+        }
+        Err(_) => {
+            // aborted job - do not send any notification
+            return Ok(());
+        }
+    };
+
+    let metadata = HashMap::from([
+        ("job-id".into(), job.id.clone()),
+        ("datastore".into(), job.store.clone()),
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "verify".into()),
+    ]);
+
+    let notification = Notification::from_template(severity, template, data, metadata);
+
+    let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
+    match mode {
+        NotificationMode::LegacySendmail => {
+            let notify = notify.verify.unwrap_or(Notify::Always);
+
+            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
+                return Ok(());
+            }
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        NotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+pub fn send_prune_status(
+    store: &str,
+    jobname: &str,
+    result: &Result<(), Error>,
+) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let mut data = json!({
+        "jobname": jobname,
+        "store": store,
+        "fqdn": fqdn,
+        "port": port,
+    });
+
+    let (template, severity) = match result {
+        Ok(()) => ("prune-ok", Severity::Info),
+        Err(err) => {
+            data["error"] = err.to_string().into();
+            ("prune-err", Severity::Error)
+        }
+    };
+
+    let metadata = HashMap::from([
+        ("job-id".into(), jobname.to_string()),
+        ("datastore".into(), store.into()),
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "prune".into()),
+    ]);
+
+    let notification = Notification::from_template(severity, template, data, metadata);
+
+    let (email, notify, mode) = lookup_datastore_notify_settings(store);
+    match mode {
+        NotificationMode::LegacySendmail => {
+            let notify = notify.prune.unwrap_or(Notify::Error);
+
+            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
+                return Ok(());
+            }
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        NotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+pub fn send_sync_status(job: &SyncJobConfig, result: &Result<(), Error>) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let mut data = json!({
+        "job": job,
+        "fqdn": fqdn,
+        "port": port,
+    });
+
+    let (template, severity) = match result {
+        Ok(()) => ("sync-ok", Severity::Info),
+        Err(err) => {
+            data["error"] = err.to_string().into();
+            ("sync-err", Severity::Error)
+        }
+    };
+
+    let metadata = HashMap::from([
+        ("job-id".into(), job.id.clone()),
+        ("datastore".into(), job.store.clone()),
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "sync".into()),
+    ]);
+
+    let notification = Notification::from_template(severity, template, data, metadata);
+
+    let (email, notify, mode) = lookup_datastore_notify_settings(&job.store);
+    match mode {
+        NotificationMode::LegacySendmail => {
+            let notify = notify.sync.unwrap_or(Notify::Always);
+
+            if notify == Notify::Never || (result.is_ok() && notify == Notify::Error) {
+                return Ok(());
+            }
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        NotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+pub fn send_tape_backup_status(
+    id: Option<&str>,
+    job: &TapeBackupJobSetup,
+    result: &Result<(), Error>,
+    summary: TapeBackupJobSummary,
+) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let duration: proxmox_time::TimeSpan = summary.duration.into();
+    let mut data = json!({
+        "job": job,
+        "fqdn": fqdn,
+        "port": port,
+        "id": id,
+        "snapshot-list": summary.snapshot_list,
+        "used-tapes": summary.used_tapes,
+        "job-duration": duration.to_string(),
+    });
+
+    let (template, severity) = match result {
+        Ok(()) => ("tape-backup-ok", Severity::Info),
+        Err(err) => {
+            data["error"] = err.to_string().into();
+            ("tape-backup-err", Severity::Error)
+        }
+    };
+
+    let mut metadata = HashMap::from([
+        ("datastore".into(), job.store.clone()),
+        ("media-pool".into(), job.pool.clone()),
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "tape-backup".into()),
+    ]);
+
+    if let Some(id) = id {
+        metadata.insert("job-id".into(), id.into());
+    }
+
+    let notification = Notification::from_template(severity, template, data, metadata);
+
+    let mode = TapeNotificationMode::from(job);
+
+    match &mode {
+        TapeNotificationMode::LegacySendmail { notify_user } => {
+            let email = lookup_user_email(notify_user);
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        TapeNotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+/// Send email to a person to request a manual media change
+pub fn send_load_media_notification(
+    mode: &TapeNotificationMode,
+    changer: bool,
+    device: &str,
+    label_text: &str,
+    reason: Option,
+) -> Result<(), Error> {
+    let device_type = if changer { "changer" } else { "drive" };
+
+    let data = json!({
+        "device-type": device_type,
+        "device": device,
+        "label-text": label_text,
+        "reason": reason,
+        "is-changer": changer,
+    });
+
+    let metadata = HashMap::from([
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "tape-load".into()),
+    ]);
+    let notification = Notification::from_template(Severity::Notice, "tape-load", data, metadata);
+
+    match mode {
+        TapeNotificationMode::LegacySendmail { notify_user } => {
+            let email = lookup_user_email(notify_user);
+
+            if let Some(email) = email {
+                send_sendmail_legacy_notification(notification, &email)?;
+            }
+        }
+        TapeNotificationMode::NotificationSystem => {
+            send_notification(notification)?;
+        }
+    }
+
+    Ok(())
+}
+
+fn get_server_url() -> (String, usize) {
+    // user will surely request that they can change this
+
+    let nodename = proxmox_sys::nodename();
+    let mut fqdn = nodename.to_owned();
+
+    if let Ok(resolv_conf) = crate::api2::node::dns::read_etc_resolv_conf() {
+        if let Some(search) = resolv_conf["search"].as_str() {
+            fqdn.push('.');
+            fqdn.push_str(search);
+        }
+    }
+
+    let port = 8007;
+
+    (fqdn, port)
+}
+
+pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
+    let (fqdn, port) = get_server_url();
+    let hostname = proxmox_sys::nodename().to_string();
+
+    let data = json!({
+        "fqdn": fqdn,
+        "hostname": &hostname,
+        "port": port,
+        "updates": updates,
+    });
+
+    let metadata = HashMap::from([
+        ("hostname".into(), hostname),
+        ("type".into(), "package-updates".into()),
+    ]);
+
+    let notification =
+        Notification::from_template(Severity::Info, "package-updates", data, metadata);
+
+    send_notification(notification)?;
+    Ok(())
+}
+
+/// send email on certificate renewal failure.
+pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
+    let error: String = match result {
+        Err(e) => e.to_string(),
+        _ => return Ok(()),
+    };
+
+    let (fqdn, port) = get_server_url();
+
+    let data = json!({
+        "fqdn": fqdn,
+        "port": port,
+        "error": error,
+    });
+
+    let metadata = HashMap::from([
+        ("hostname".into(), proxmox_sys::nodename().into()),
+        ("type".into(), "acme".into()),
+    ]);
+
+    let notification = Notification::from_template(Severity::Info, "acme-err", data, metadata);
+
+    send_notification(notification)?;
+    Ok(())
+}
+
+/// Lookup users email address
+pub fn lookup_user_email(userid: &Userid) -> Option {
+    if let Ok(user_config) = pbs_config::user::cached_config() {
+        if let Ok(user) = user_config.lookup::("user", userid.as_str()) {
+            return user.email;
+        }
+    }
+
+    None
+}
+
+/// Lookup Datastore notify settings
+pub fn lookup_datastore_notify_settings(
+    store: &str,
+) -> (Option, DatastoreNotify, NotificationMode) {
+    let mut email = None;
+
+    let notify = DatastoreNotify {
+        gc: None,
+        verify: None,
+        sync: None,
+        prune: None,
+    };
+
+    let (config, _digest) = match pbs_config::datastore::config() {
+        Ok(result) => result,
+        Err(_) => return (email, notify, NotificationMode::default()),
+    };
+
+    let config: DataStoreConfig = match config.lookup("datastore", store) {
+        Ok(result) => result,
+        Err(_) => return (email, notify, NotificationMode::default()),
+    };
+
+    email = match config.notify_user {
+        Some(ref userid) => lookup_user_email(userid),
+        None => lookup_user_email(Userid::root_userid()),
+    };
+
+    let notification_mode = config.notification_mode.unwrap_or_default();
+    let notify_str = config.notify.unwrap_or_default();
+
+    if let Ok(value) = DatastoreNotify::API_SCHEMA.parse_property_string(¬ify_str) {
+        if let Ok(notify) = serde_json::from_value(value) {
+            return (email, notify, notification_mode);
+        }
+    }
+
+    (email, notify, notification_mode)
+}
diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs
index 2de349735..1c86647a0 100644
--- a/src/server/prune_job.rs
+++ b/src/server/prune_job.rs
@@ -1,8 +1,7 @@
 use std::sync::Arc;
 
 use anyhow::Error;
-
-use proxmox_sys::{task_log, task_warn};
+use tracing::{info, warn};
 
 use pbs_api_types::{
     print_store_and_ns, Authid, KeepOptions, Operation, PruneJobOptions, MAX_NAMESPACE_DEPTH,
@@ -16,7 +15,6 @@ use crate::backup::ListAccessibleBackupGroups;
 use crate::server::jobstate::Job;
 
 pub fn prune_datastore(
-    worker: Arc,
     auth_id: Authid,
     prune_options: PruneJobOptions,
     datastore: Arc,
@@ -31,19 +29,19 @@ pub fn prune_datastore(
     };
     let ns = prune_options.ns.clone().unwrap_or_default();
     let store_ns = print_store_and_ns(store, &ns);
-    task_log!(worker, "Starting datastore prune on {store_ns}, {depth}");
+    info!("Starting datastore prune on {store_ns}, {depth}");
 
     if dry_run {
-        task_log!(worker, "(dry test run)");
+        info!("(dry test run)");
     }
 
     let keep_all = !prune_options.keeps_something();
 
     if keep_all {
-        task_log!(worker, "No prune selection - keeping all files.");
+        info!("No prune selection - keeping all files.");
     } else {
         let rendered_options = cli_prune_options_string(&prune_options);
-        task_log!(worker, "retention options: {rendered_options}");
+        info!("retention options: {rendered_options}");
     }
 
     for group in ListAccessibleBackupGroups::new_with_privs(
@@ -61,8 +59,7 @@ pub fn prune_datastore(
         let mut prune_info = compute_prune_info(list, &prune_options.keep)?;
         prune_info.reverse(); // delete older snapshots first
 
-        task_log!(
-            worker,
+        info!(
             "Pruning group {ns}:\"{}/{}\"",
             group.backup_type(),
             group.backup_id()
@@ -70,11 +67,9 @@ pub fn prune_datastore(
 
         for (info, mark) in prune_info {
             let keep = keep_all || mark.keep();
-            task_log!(
-                worker,
-                "{}{} {}/{}/{}",
+            info!(
+                "{}{mark} {}/{}/{}",
                 if dry_run { "would " } else { "" },
-                mark,
                 group.backup_type(),
                 group.backup_id(),
                 info.backup_dir.backup_time_string()
@@ -82,7 +77,7 @@ pub fn prune_datastore(
             if !keep && !dry_run {
                 if let Err(err) = datastore.remove_backup_dir(ns, info.backup_dir.as_ref(), false) {
                     let path = info.backup_dir.relative_path();
-                    task_warn!(worker, "failed to remove dir {path:?}: {err}");
+                    warn!("failed to remove dir {path:?}: {err}");
                 }
             }
         }
@@ -150,13 +145,13 @@ pub fn do_prune_job(
         move |worker| {
             job.start(&worker.upid().to_string())?;
 
-            task_log!(worker, "prune job '{}'", job.jobname());
+            info!("prune job '{}'", job.jobname());
 
             if let Some(event_str) = schedule {
-                task_log!(worker, "task triggered by schedule '{event_str}'");
+                info!("task triggered by schedule '{event_str}'");
             }
 
-            let result = prune_datastore(worker.clone(), auth_id, prune_options, datastore, false);
+            let result = prune_datastore(auth_id, prune_options, datastore, false);
 
             let status = worker.create_state(&result);
 
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 14744e9c8..3117f7d2c 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -1,531 +1,48 @@
-//! Sync datastore from remote server
+//! Sync datastore by pulling contents from remote server
 
-use std::collections::{HashMap, HashSet};
-use std::io::{Seek, Write};
-use std::path::{Path, PathBuf};
+use std::collections::HashSet;
+use std::io::Seek;
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::{Arc, Mutex};
-use std::time::{Duration, SystemTime};
+use std::time::SystemTime;
 
 use anyhow::{bail, format_err, Error};
-use http::StatusCode;
 use proxmox_human_byte::HumanByte;
-use proxmox_rest_server::WorkerTask;
-use proxmox_router::HttpError;
-use proxmox_sys::{task_log, task_warn};
-use serde_json::json;
+use tracing::info;
 
 use pbs_api_types::{
-    print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupFilter,
-    GroupListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, MAX_NAMESPACE_DEPTH,
-    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+    print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation,
+    RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
 };
-use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
+use pbs_client::BackupRepository;
 use pbs_config::CachedUserInfo;
 use pbs_datastore::data_blob::DataBlob;
 use pbs_datastore::dynamic_index::DynamicIndexReader;
 use pbs_datastore::fixed_index::FixedIndexReader;
 use pbs_datastore::index::IndexFile;
 use pbs_datastore::manifest::{
-    archive_type, ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
+    ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
 };
 use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_datastore::{
-    check_backup_owner, DataStore, ListNamespacesRecursive, LocalChunkReader, StoreProgress,
-};
+use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
 use pbs_tools::sha::sha256;
 
-use crate::backup::{check_ns_modification_privs, check_ns_privs, ListAccessibleBackupGroups};
+use super::sync::{
+    check_namespace_depth_limit, LocalSource, RemoteSource, RemovedVanishedStats, SkipInfo,
+    SkipReason, SyncSource, SyncSourceReader, SyncStats,
+};
+use crate::backup::{check_ns_modification_privs, check_ns_privs};
 use crate::tools::parallel_handler::ParallelHandler;
 
-struct RemoteReader {
-    backup_reader: Arc,
-    dir: BackupDir,
-}
-
-struct LocalReader {
-    _dir_lock: Arc>,
-    path: PathBuf,
-    datastore: Arc,
-}
-
 pub(crate) struct PullTarget {
     store: Arc,
     ns: BackupNamespace,
 }
 
-pub(crate) struct RemoteSource {
-    repo: BackupRepository,
-    ns: BackupNamespace,
-    client: HttpClient,
-}
-
-pub(crate) struct LocalSource {
-    store: Arc,
-    ns: BackupNamespace,
-}
-
-#[derive(Default)]
-pub(crate) struct RemovedVanishedStats {
-    pub(crate) groups: usize,
-    pub(crate) snapshots: usize,
-    pub(crate) namespaces: usize,
-}
-
-impl RemovedVanishedStats {
-    fn add(&mut self, rhs: RemovedVanishedStats) {
-        self.groups += rhs.groups;
-        self.snapshots += rhs.snapshots;
-        self.namespaces += rhs.namespaces;
-    }
-}
-
-#[derive(Default)]
-pub(crate) struct PullStats {
-    pub(crate) chunk_count: usize,
-    pub(crate) bytes: usize,
-    pub(crate) elapsed: Duration,
-    pub(crate) removed: Option,
-}
-
-impl From for PullStats {
-    fn from(removed: RemovedVanishedStats) -> Self {
-        Self {
-            removed: Some(removed),
-            ..Default::default()
-        }
-    }
-}
-
-impl PullStats {
-    fn add(&mut self, rhs: PullStats) {
-        self.chunk_count += rhs.chunk_count;
-        self.bytes += rhs.bytes;
-        self.elapsed += rhs.elapsed;
-
-        if let Some(rhs_removed) = rhs.removed {
-            if let Some(ref mut removed) = self.removed {
-                removed.add(rhs_removed);
-            } else {
-                self.removed = Some(rhs_removed);
-            }
-        }
-    }
-}
-
-#[async_trait::async_trait]
-/// `PullSource` is a trait that provides an interface for pulling data/information from a source.
-/// The trait includes methods for listing namespaces, groups, and backup directories,
-/// as well as retrieving a reader for reading data from the source
-trait PullSource: Send + Sync {
-    /// Lists namespaces from the source.
-    async fn list_namespaces(
-        &self,
-        max_depth: &mut Option,
-        worker: &WorkerTask,
-    ) -> Result, Error>;
-
-    /// Lists groups within a specific namespace from the source.
-    async fn list_groups(
-        &self,
-        namespace: &BackupNamespace,
-        owner: &Authid,
-    ) -> Result, Error>;
-
-    /// Lists backup directories for a specific group within a specific namespace from the source.
-    async fn list_backup_dirs(
-        &self,
-        namespace: &BackupNamespace,
-        group: &BackupGroup,
-        worker: &WorkerTask,
-    ) -> Result, Error>;
-    fn get_ns(&self) -> BackupNamespace;
-    fn get_store(&self) -> &str;
-
-    /// Returns a reader for reading data from a specific backup directory.
-    async fn reader(
-        &self,
-        ns: &BackupNamespace,
-        dir: &BackupDir,
-    ) -> Result, Error>;
-}
-
-#[async_trait::async_trait]
-impl PullSource for RemoteSource {
-    async fn list_namespaces(
-        &self,
-        max_depth: &mut Option,
-        worker: &WorkerTask,
-    ) -> Result, Error> {
-        if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) {
-            return Ok(vec![self.ns.clone()]);
-        }
-
-        let path = format!("api2/json/admin/datastore/{}/namespace", self.repo.store());
-        let mut data = json!({});
-        if let Some(max_depth) = max_depth {
-            data["max-depth"] = json!(max_depth);
-        }
-
-        if !self.ns.is_root() {
-            data["parent"] = json!(self.ns);
-        }
-        self.client.login().await?;
-
-        let mut result = match self.client.get(&path, Some(data)).await {
-            Ok(res) => res,
-            Err(err) => match err.downcast_ref::() {
-                Some(HttpError { code, message }) => match code {
-                    &StatusCode::NOT_FOUND => {
-                        if self.ns.is_root() && max_depth.is_none() {
-                            task_warn!(worker, "Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
-                            task_warn!(worker, "Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
-                            max_depth.replace(0);
-                        } else {
-                            bail!("Remote namespace set/recursive sync requested, but remote does not support namespaces.")
-                        }
-
-                        return Ok(vec![self.ns.clone()]);
-                    }
-                    _ => {
-                        bail!("Querying namespaces failed - HTTP error {code} - {message}");
-                    }
-                },
-                None => {
-                    bail!("Querying namespaces failed - {err}");
-                }
-            },
-        };
-
-        let list: Vec =
-            serde_json::from_value::>(result["data"].take())?
-                .into_iter()
-                .map(|list_item| list_item.ns)
-                .collect();
-
-        Ok(list)
-    }
-
-    async fn list_groups(
-        &self,
-        namespace: &BackupNamespace,
-        _owner: &Authid,
-    ) -> Result, Error> {
-        let path = format!("api2/json/admin/datastore/{}/groups", self.repo.store());
-
-        let args = if !namespace.is_root() {
-            Some(json!({ "ns": namespace.clone() }))
-        } else {
-            None
-        };
-
-        self.client.login().await?;
-        let mut result =
-            self.client.get(&path, args).await.map_err(|err| {
-                format_err!("Failed to retrieve backup groups from remote - {}", err)
-            })?;
-
-        Ok(
-            serde_json::from_value::>(result["data"].take())
-                .map_err(Error::from)?
-                .into_iter()
-                .map(|item| item.backup)
-                .collect::>(),
-        )
-    }
-
-    async fn list_backup_dirs(
-        &self,
-        namespace: &BackupNamespace,
-        group: &BackupGroup,
-        worker: &WorkerTask,
-    ) -> Result, Error> {
-        let path = format!("api2/json/admin/datastore/{}/snapshots", self.repo.store());
-
-        let mut args = json!({
-            "backup-type": group.ty,
-            "backup-id": group.id,
-        });
-
-        if !namespace.is_root() {
-            args["ns"] = serde_json::to_value(namespace)?;
-        }
-
-        self.client.login().await?;
-
-        let mut result = self.client.get(&path, Some(args)).await?;
-        let snapshot_list: Vec = serde_json::from_value(result["data"].take())?;
-        Ok(snapshot_list
-            .into_iter()
-            .filter_map(|item: SnapshotListItem| {
-                let snapshot = item.backup;
-                // in-progress backups can't be synced
-                if item.size.is_none() {
-                    task_log!(
-                        worker,
-                        "skipping snapshot {} - in-progress backup",
-                        snapshot
-                    );
-                    return None;
-                }
-
-                Some(snapshot)
-            })
-            .collect::>())
-    }
-
-    fn get_ns(&self) -> BackupNamespace {
-        self.ns.clone()
-    }
-
-    fn get_store(&self) -> &str {
-        self.repo.store()
-    }
-
-    async fn reader(
-        &self,
-        ns: &BackupNamespace,
-        dir: &BackupDir,
-    ) -> Result, Error> {
-        let backup_reader =
-            BackupReader::start(&self.client, None, self.repo.store(), ns, dir, true).await?;
-        Ok(Arc::new(RemoteReader {
-            backup_reader,
-            dir: dir.clone(),
-        }))
-    }
-}
-
-#[async_trait::async_trait]
-impl PullSource for LocalSource {
-    async fn list_namespaces(
-        &self,
-        max_depth: &mut Option,
-        _worker: &WorkerTask,
-    ) -> Result, Error> {
-        ListNamespacesRecursive::new_max_depth(
-            self.store.clone(),
-            self.ns.clone(),
-            max_depth.unwrap_or(MAX_NAMESPACE_DEPTH),
-        )?
-        .collect()
-    }
-
-    async fn list_groups(
-        &self,
-        namespace: &BackupNamespace,
-        owner: &Authid,
-    ) -> Result, Error> {
-        Ok(ListAccessibleBackupGroups::new_with_privs(
-            &self.store,
-            namespace.clone(),
-            0,
-            Some(PRIV_DATASTORE_READ),
-            Some(PRIV_DATASTORE_BACKUP),
-            Some(owner),
-        )?
-        .filter_map(Result::ok)
-        .map(|backup_group| backup_group.group().clone())
-        .collect::>())
-    }
-
-    async fn list_backup_dirs(
-        &self,
-        namespace: &BackupNamespace,
-        group: &BackupGroup,
-        _worker: &WorkerTask,
-    ) -> Result, Error> {
-        Ok(self
-            .store
-            .backup_group(namespace.clone(), group.clone())
-            .iter_snapshots()?
-            .filter_map(Result::ok)
-            .map(|snapshot| snapshot.dir().to_owned())
-            .collect::>())
-    }
-
-    fn get_ns(&self) -> BackupNamespace {
-        self.ns.clone()
-    }
-
-    fn get_store(&self) -> &str {
-        self.store.name()
-    }
-
-    async fn reader(
-        &self,
-        ns: &BackupNamespace,
-        dir: &BackupDir,
-    ) -> Result, Error> {
-        let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
-        let dir_lock = proxmox_sys::fs::lock_dir_noblock_shared(
-            &dir.full_path(),
-            "snapshot",
-            "locked by another operation",
-        )?;
-        Ok(Arc::new(LocalReader {
-            _dir_lock: Arc::new(Mutex::new(dir_lock)),
-            path: dir.full_path(),
-            datastore: dir.datastore().clone(),
-        }))
-    }
-}
-
-#[async_trait::async_trait]
-/// `PullReader` is a trait that provides an interface for reading data from a source.
-/// The trait includes methods for getting a chunk reader, loading a file, downloading client log, and checking whether chunk sync should be skipped.
-trait PullReader: Send + Sync {
-    /// Returns a chunk reader with the specified encryption mode.
-    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc;
-
-    /// Asynchronously loads a file from the source into a local file.
-    /// `filename` is the name of the file to load from the source.
-    /// `into` is the path of the local file to load the source file into.
-    async fn load_file_into(
-        &self,
-        filename: &str,
-        into: &Path,
-        worker: &WorkerTask,
-    ) -> Result, Error>;
-
-    /// Tries to download the client log from the source and save it into a local file.
-    async fn try_download_client_log(
-        &self,
-        to_path: &Path,
-        worker: &WorkerTask,
-    ) -> Result<(), Error>;
-
-    fn skip_chunk_sync(&self, target_store_name: &str) -> bool;
-}
-
-#[async_trait::async_trait]
-impl PullReader for RemoteReader {
-    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc {
-        Arc::new(RemoteChunkReader::new(
-            self.backup_reader.clone(),
-            None,
-            crypt_mode,
-            HashMap::new(),
-        ))
-    }
-
-    async fn load_file_into(
-        &self,
-        filename: &str,
-        into: &Path,
-        worker: &WorkerTask,
-    ) -> Result, Error> {
-        let mut tmp_file = std::fs::OpenOptions::new()
-            .write(true)
-            .create(true)
-            .truncate(true)
-            .read(true)
-            .open(into)?;
-        let download_result = self.backup_reader.download(filename, &mut tmp_file).await;
-        if let Err(err) = download_result {
-            match err.downcast_ref::() {
-                Some(HttpError { code, message }) => match *code {
-                    StatusCode::NOT_FOUND => {
-                        task_log!(
-                            worker,
-                            "skipping snapshot {} - vanished since start of sync",
-                            &self.dir,
-                        );
-                        return Ok(None);
-                    }
-                    _ => {
-                        bail!("HTTP error {code} - {message}");
-                    }
-                },
-                None => {
-                    return Err(err);
-                }
-            };
-        };
-        tmp_file.rewind()?;
-        Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
-    }
-
-    async fn try_download_client_log(
-        &self,
-        to_path: &Path,
-        worker: &WorkerTask,
-    ) -> Result<(), Error> {
-        let mut tmp_path = to_path.to_owned();
-        tmp_path.set_extension("tmp");
-
-        let tmpfile = std::fs::OpenOptions::new()
-            .write(true)
-            .create(true)
-            .read(true)
-            .open(&tmp_path)?;
-
-        // Note: be silent if there is no log - only log successful download
-        if let Ok(()) = self
-            .backup_reader
-            .download(CLIENT_LOG_BLOB_NAME, tmpfile)
-            .await
-        {
-            if let Err(err) = std::fs::rename(&tmp_path, to_path) {
-                bail!("Atomic rename file {:?} failed - {}", to_path, err);
-            }
-            task_log!(worker, "got backup log file {:?}", CLIENT_LOG_BLOB_NAME);
-        }
-
-        Ok(())
-    }
-
-    fn skip_chunk_sync(&self, _target_store_name: &str) -> bool {
-        false
-    }
-}
-
-#[async_trait::async_trait]
-impl PullReader for LocalReader {
-    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc {
-        Arc::new(LocalChunkReader::new(
-            self.datastore.clone(),
-            None,
-            crypt_mode,
-        ))
-    }
-
-    async fn load_file_into(
-        &self,
-        filename: &str,
-        into: &Path,
-        _worker: &WorkerTask,
-    ) -> Result, Error> {
-        let mut tmp_file = std::fs::OpenOptions::new()
-            .write(true)
-            .create(true)
-            .truncate(true)
-            .read(true)
-            .open(into)?;
-        let mut from_path = self.path.clone();
-        from_path.push(filename);
-        tmp_file.write_all(std::fs::read(from_path)?.as_slice())?;
-        tmp_file.rewind()?;
-        Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
-    }
-
-    async fn try_download_client_log(
-        &self,
-        _to_path: &Path,
-        _worker: &WorkerTask,
-    ) -> Result<(), Error> {
-        Ok(())
-    }
-
-    fn skip_chunk_sync(&self, target_store_name: &str) -> bool {
-        self.datastore.name() == target_store_name
-    }
-}
-
 /// Parameters for a pull operation.
 pub(crate) struct PullParameters {
     /// Where data is pulled from
-    source: Arc,
+    source: Arc,
     /// Where data should be pulled into
     target: PullTarget,
     /// Owner of synced groups (needs to match local owner of pre-existing groups)
@@ -542,6 +59,7 @@ pub(crate) struct PullParameters {
 
 impl PullParameters {
     /// Creates a new instance of `PullParameters`.
+    #[allow(clippy::too_many_arguments)]
     pub(crate) fn new(
         store: &str,
         ns: BackupNamespace,
@@ -561,7 +79,7 @@ impl PullParameters {
         };
         let remove_vanished = remove_vanished.unwrap_or(false);
 
-        let source: Arc = if let Some(remote) = remote {
+        let source: Arc = if let Some(remote) = remote {
             let (remote_config, _digest) = pbs_config::remote::config()?;
             let remote: Remote = remote_config.lookup("remote", remote)?;
 
@@ -603,12 +121,11 @@ impl PullParameters {
 }
 
 async fn pull_index_chunks(
-    worker: &WorkerTask,
     chunk_reader: Arc,
     target: Arc,
     index: I,
     downloaded_chunks: Arc>>,
-) -> Result {
+) -> Result {
     use futures::stream::{self, StreamExt, TryStreamExt};
 
     let start_time = SystemTime::now();
@@ -658,10 +175,10 @@ async fn pull_index_chunks(
                     target.cond_touch_chunk(&info.digest, false)
                 })?;
                 if chunk_exists {
-                    //task_log!(worker, "chunk {} exists {}", pos, hex::encode(digest));
+                    //info!("chunk {} exists {}", pos, hex::encode(digest));
                     return Ok::<_, Error>(());
                 }
-                //task_log!(worker, "sync {} chunk {}", pos, hex::encode(digest));
+                //info!("sync {} chunk {}", pos, hex::encode(digest));
                 let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
                 let raw_size = chunk.raw_size() as usize;
 
@@ -689,14 +206,13 @@ async fn pull_index_chunks(
     let bytes = bytes.load(Ordering::SeqCst);
     let chunk_count = chunk_count.load(Ordering::SeqCst);
 
-    task_log!(
-        worker,
+    info!(
         "downloaded {} ({}/s)",
         HumanByte::from(bytes),
         HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
     );
 
-    Ok(PullStats {
+    Ok(SyncStats {
         chunk_count,
         bytes,
         elapsed,
@@ -730,12 +246,11 @@ fn verify_archive(info: &FileInfo, csum: &[u8; 32], size: u64) -> Result<(), Err
 /// - if archive is an index, pull referenced chunks
 /// - Rename tmp file into real path
 async fn pull_single_archive<'a>(
-    worker: &'a WorkerTask,
-    reader: Arc,
+    reader: Arc,
     snapshot: &'a pbs_datastore::BackupDir,
     archive_info: &'a FileInfo,
     downloaded_chunks: Arc>>,
-) -> Result {
+) -> Result {
     let archive_name = &archive_info.filename;
     let mut path = snapshot.full_path();
     path.push(archive_name);
@@ -743,17 +258,15 @@ async fn pull_single_archive<'a>(
     let mut tmp_path = path.clone();
     tmp_path.set_extension("tmp");
 
-    let mut pull_stats = PullStats::default();
+    let mut sync_stats = SyncStats::default();
 
-    task_log!(worker, "sync archive {}", archive_name);
+    info!("sync archive {archive_name}");
 
-    reader
-        .load_file_into(archive_name, &tmp_path, worker)
-        .await?;
+    reader.load_file_into(archive_name, &tmp_path).await?;
 
     let mut tmpfile = std::fs::OpenOptions::new().read(true).open(&tmp_path)?;
 
-    match archive_type(archive_name)? {
+    match ArchiveType::from_path(archive_name)? {
         ArchiveType::DynamicIndex => {
             let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
                 format_err!("unable to read dynamic index {:?} - {}", tmp_path, err)
@@ -762,17 +275,16 @@ async fn pull_single_archive<'a>(
             verify_archive(archive_info, &csum, size)?;
 
             if reader.skip_chunk_sync(snapshot.datastore().name()) {
-                task_log!(worker, "skipping chunk sync for same datastore");
+                info!("skipping chunk sync for same datastore");
             } else {
                 let stats = pull_index_chunks(
-                    worker,
                     reader.chunk_reader(archive_info.crypt_mode),
                     snapshot.datastore().clone(),
                     index,
                     downloaded_chunks,
                 )
                 .await?;
-                pull_stats.add(stats);
+                sync_stats.add(stats);
             }
         }
         ArchiveType::FixedIndex => {
@@ -783,17 +295,16 @@ async fn pull_single_archive<'a>(
             verify_archive(archive_info, &csum, size)?;
 
             if reader.skip_chunk_sync(snapshot.datastore().name()) {
-                task_log!(worker, "skipping chunk sync for same datastore");
+                info!("skipping chunk sync for same datastore");
             } else {
                 let stats = pull_index_chunks(
-                    worker,
                     reader.chunk_reader(archive_info.crypt_mode),
                     snapshot.datastore().clone(),
                     index,
                     downloaded_chunks,
                 )
                 .await?;
-                pull_stats.add(stats);
+                sync_stats.add(stats);
             }
         }
         ArchiveType::Blob => {
@@ -805,7 +316,7 @@ async fn pull_single_archive<'a>(
     if let Err(err) = std::fs::rename(&tmp_path, &path) {
         bail!("Atomic rename file {:?} failed - {}", path, err);
     }
-    Ok(pull_stats)
+    Ok(sync_stats)
 }
 
 /// Actual implementation of pulling a snapshot.
@@ -818,12 +329,11 @@ async fn pull_single_archive<'a>(
 /// -- if not, pull it from the remote
 /// - Download log if not already existing
 async fn pull_snapshot<'a>(
-    worker: &'a WorkerTask,
-    reader: Arc,
+    reader: Arc,
     snapshot: &'a pbs_datastore::BackupDir,
     downloaded_chunks: Arc>>,
-) -> Result {
-    let mut pull_stats = PullStats::default();
+) -> Result {
+    let mut sync_stats = SyncStats::default();
     let mut manifest_name = snapshot.full_path();
     manifest_name.push(MANIFEST_BLOB_NAME);
 
@@ -834,12 +344,12 @@ async fn pull_snapshot<'a>(
     tmp_manifest_name.set_extension("tmp");
     let tmp_manifest_blob;
     if let Some(data) = reader
-        .load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name, worker)
+        .load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name)
         .await?
     {
         tmp_manifest_blob = data;
     } else {
-        return Ok(pull_stats);
+        return Ok(sync_stats);
     }
 
     if manifest_name.exists() {
@@ -857,13 +367,11 @@ async fn pull_snapshot<'a>(
 
         if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
             if !client_log_name.exists() {
-                reader
-                    .try_download_client_log(&client_log_name, worker)
-                    .await?;
+                reader.try_download_client_log(&client_log_name).await?;
             };
-            task_log!(worker, "no data changes");
+            info!("no data changes");
             let _ = std::fs::remove_file(&tmp_manifest_name);
-            return Ok(pull_stats); // nothing changed
+            return Ok(sync_stats); // nothing changed
         }
     }
 
@@ -874,14 +382,14 @@ async fn pull_snapshot<'a>(
         path.push(&item.filename);
 
         if path.exists() {
-            match archive_type(&item.filename)? {
+            match ArchiveType::from_path(&item.filename)? {
                 ArchiveType::DynamicIndex => {
                     let index = DynamicIndexReader::open(&path)?;
                     let (csum, size) = index.compute_csum();
                     match manifest.verify_file(&item.filename, &csum, size) {
                         Ok(_) => continue,
                         Err(err) => {
-                            task_log!(worker, "detected changed file {:?} - {}", path, err);
+                            info!("detected changed file {path:?} - {err}");
                         }
                     }
                 }
@@ -891,7 +399,7 @@ async fn pull_snapshot<'a>(
                     match manifest.verify_file(&item.filename, &csum, size) {
                         Ok(_) => continue,
                         Err(err) => {
-                            task_log!(worker, "detected changed file {:?} - {}", path, err);
+                            info!("detected changed file {path:?} - {err}");
                         }
                     }
                 }
@@ -901,22 +409,16 @@ async fn pull_snapshot<'a>(
                     match manifest.verify_file(&item.filename, &csum, size) {
                         Ok(_) => continue,
                         Err(err) => {
-                            task_log!(worker, "detected changed file {:?} - {}", path, err);
+                            info!("detected changed file {path:?} - {err}");
                         }
                     }
                 }
             }
         }
 
-        let stats = pull_single_archive(
-            worker,
-            reader.clone(),
-            snapshot,
-            item,
-            downloaded_chunks.clone(),
-        )
-        .await?;
-        pull_stats.add(stats);
+        let stats =
+            pull_single_archive(reader.clone(), snapshot, item, downloaded_chunks.clone()).await?;
+        sync_stats.add(stats);
     }
 
     if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
@@ -924,15 +426,13 @@ async fn pull_snapshot<'a>(
     }
 
     if !client_log_name.exists() {
-        reader
-            .try_download_client_log(&client_log_name, worker)
-            .await?;
+        reader.try_download_client_log(&client_log_name).await?;
     };
     snapshot
         .cleanup_unreferenced_files(&manifest)
         .map_err(|err| format_err!("failed to cleanup unreferenced files - {err}"))?;
 
-    Ok(pull_stats)
+    Ok(sync_stats)
 }
 
 /// Pulls a `snapshot`, removing newly created ones on error, but keeping existing ones in any case.
@@ -940,119 +440,39 @@ async fn pull_snapshot<'a>(
 /// The `reader` is configured to read from the source backup directory, while the
 /// `snapshot` is pointing to the local datastore and target namespace.
 async fn pull_snapshot_from<'a>(
-    worker: &'a WorkerTask,
-    reader: Arc,
+    reader: Arc,
     snapshot: &'a pbs_datastore::BackupDir,
     downloaded_chunks: Arc>>,
-) -> Result {
+) -> Result {
     let (_path, is_new, _snap_lock) = snapshot
         .datastore()
         .create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
 
-    let pull_stats = if is_new {
-        task_log!(worker, "sync snapshot {}", snapshot.dir());
+    let sync_stats = if is_new {
+        info!("sync snapshot {}", snapshot.dir());
 
-        match pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
+        match pull_snapshot(reader, snapshot, downloaded_chunks).await {
             Err(err) => {
                 if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
                     snapshot.backup_ns(),
                     snapshot.as_ref(),
                     true,
                 ) {
-                    task_log!(worker, "cleanup error - {}", cleanup_err);
+                    info!("cleanup error - {cleanup_err}");
                 }
                 return Err(err);
             }
-            Ok(pull_stats) => {
-                task_log!(worker, "sync snapshot {} done", snapshot.dir());
-                pull_stats
+            Ok(sync_stats) => {
+                info!("sync snapshot {} done", snapshot.dir());
+                sync_stats
             }
         }
     } else {
-        task_log!(worker, "re-sync snapshot {}", snapshot.dir());
-        pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?
+        info!("re-sync snapshot {}", snapshot.dir());
+        pull_snapshot(reader, snapshot, downloaded_chunks).await?
     };
 
-    Ok(pull_stats)
-}
-
-#[derive(PartialEq, Eq)]
-enum SkipReason {
-    AlreadySynced,
-    TransferLast,
-}
-
-impl std::fmt::Display for SkipReason {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(
-            f,
-            "{}",
-            match self {
-                SkipReason::AlreadySynced => "older than the newest local snapshot",
-                SkipReason::TransferLast => "due to transfer-last",
-            }
-        )
-    }
-}
-
-struct SkipInfo {
-    oldest: i64,
-    newest: i64,
-    count: u64,
-    skip_reason: SkipReason,
-}
-
-impl SkipInfo {
-    fn new(skip_reason: SkipReason) -> Self {
-        SkipInfo {
-            oldest: i64::MAX,
-            newest: i64::MIN,
-            count: 0,
-            skip_reason,
-        }
-    }
-
-    fn reset(&mut self) {
-        self.count = 0;
-        self.oldest = i64::MAX;
-        self.newest = i64::MIN;
-    }
-
-    fn update(&mut self, backup_time: i64) {
-        self.count += 1;
-
-        if backup_time < self.oldest {
-            self.oldest = backup_time;
-        }
-
-        if backup_time > self.newest {
-            self.newest = backup_time;
-        }
-    }
-
-    fn affected(&self) -> Result {
-        match self.count {
-            0 => Ok(String::new()),
-            1 => Ok(proxmox_time::epoch_to_rfc3339_utc(self.oldest)?),
-            _ => Ok(format!(
-                "{} .. {}",
-                proxmox_time::epoch_to_rfc3339_utc(self.oldest)?,
-                proxmox_time::epoch_to_rfc3339_utc(self.newest)?,
-            )),
-        }
-    }
-}
-
-impl std::fmt::Display for SkipInfo {
-    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
-        write!(
-            f,
-            "skipped: {} snapshot(s) ({}) - {}",
-            self.count,
-            self.affected().map_err(|_| std::fmt::Error)?,
-            self.skip_reason,
-        )
-    }
+    Ok(sync_stats)
 }
 
 /// Pulls a group according to `params`.
@@ -1073,18 +493,17 @@ impl std::fmt::Display for SkipInfo {
 /// - remote snapshot access is checked by remote (twice: query and opening the backup reader)
 /// - local group owner is already checked by pull_store
 async fn pull_group(
-    worker: &WorkerTask,
     params: &PullParameters,
     source_namespace: &BackupNamespace,
     group: &BackupGroup,
     progress: &mut StoreProgress,
-) -> Result {
+) -> Result {
     let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
     let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
 
     let mut raw_list: Vec = params
         .source
-        .list_backup_dirs(source_namespace, group, worker)
+        .list_backup_dirs(source_namespace, group)
         .await?;
     raw_list.sort_unstable_by(|a, b| a.time.cmp(&b.time));
 
@@ -1113,7 +532,7 @@ async fn pull_group(
                 already_synced_skip_info.update(dir.time);
                 return false;
             } else if already_synced_skip_info.count > 0 {
-                task_log!(worker, "{}", already_synced_skip_info);
+                info!("{already_synced_skip_info}");
                 already_synced_skip_info.reset();
                 return true;
             }
@@ -1122,7 +541,7 @@ async fn pull_group(
                 transfer_last_skip_info.update(dir.time);
                 return false;
             } else if transfer_last_skip_info.count > 0 {
-                task_log!(worker, "{}", transfer_last_skip_info);
+                info!("{transfer_last_skip_info}");
                 transfer_last_skip_info.reset();
             }
             true
@@ -1135,7 +554,7 @@ async fn pull_group(
 
     progress.group_snapshots = list.len() as u64;
 
-    let mut pull_stats = PullStats::default();
+    let mut sync_stats = SyncStats::default();
 
     for (pos, from_snapshot) in list.into_iter().enumerate() {
         let to_snapshot = params
@@ -1147,14 +566,13 @@ async fn pull_group(
             .source
             .reader(source_namespace, &from_snapshot)
             .await?;
-        let result =
-            pull_snapshot_from(worker, reader, &to_snapshot, downloaded_chunks.clone()).await;
+        let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await;
 
         progress.done_snapshots = pos as u64 + 1;
-        task_log!(worker, "percentage done: {}", progress);
+        info!("percentage done: {progress}");
 
         let stats = result?; // stop on error
-        pull_stats.add(stats);
+        sync_stats.add(stats);
     }
 
     if params.remove_vanished {
@@ -1169,19 +587,18 @@ async fn pull_group(
                 continue;
             }
             if snapshot.is_protected() {
-                task_log!(
-                    worker,
+                info!(
                     "don't delete vanished snapshot {} (protected)",
                     snapshot.dir()
                 );
                 continue;
             }
-            task_log!(worker, "delete vanished snapshot {}", snapshot.dir());
+            info!("delete vanished snapshot {}", snapshot.dir());
             params
                 .target
                 .store
                 .remove_backup_dir(&target_ns, snapshot.as_ref(), false)?;
-            pull_stats.add(PullStats::from(RemovedVanishedStats {
+            sync_stats.add(SyncStats::from(RemovedVanishedStats {
                 snapshots: 1,
                 groups: 0,
                 namespaces: 0,
@@ -1189,7 +606,7 @@ async fn pull_group(
         }
     }
 
-    Ok(pull_stats)
+    Ok(sync_stats)
 }
 
 fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result {
@@ -1235,7 +652,6 @@ fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> R
 }
 
 fn check_and_remove_vanished_ns(
-    worker: &WorkerTask,
     params: &PullParameters,
     synced_ns: HashSet,
 ) -> Result<(bool, RemovedVanishedStats), Error> {
@@ -1276,16 +692,12 @@ fn check_and_remove_vanished_ns(
         }
         match check_and_remove_ns(params, &local_ns) {
             Ok(true) => {
-                task_log!(worker, "Removed namespace {local_ns}");
+                info!("Removed namespace {local_ns}");
                 removed_stats.namespaces += 1;
             }
-            Ok(false) => task_log!(
-                worker,
-                "Did not remove namespace {} - protected snapshots remain",
-                local_ns
-            ),
+            Ok(false) => info!("Did not remove namespace {local_ns} - protected snapshots remain"),
             Err(err) => {
-                task_log!(worker, "Failed to remove namespace {} - {}", local_ns, err);
+                info!("Failed to remove namespace {local_ns} - {err}");
                 errors = true;
             }
         }
@@ -1311,10 +723,7 @@ fn check_and_remove_vanished_ns(
 /// - remote namespaces are filtered by remote
 /// - creation and removal of sub-NS checked here
 /// - access to sub-NS checked here
-pub(crate) async fn pull_store(
-    worker: &WorkerTask,
-    mut params: PullParameters,
-) -> Result {
+pub(crate) async fn pull_store(mut params: PullParameters) -> Result {
     // explicit create shared lock to prevent GC on newly created chunks
     let _shared_store_lock = params.target.store.try_shared_chunk_store_lock()?;
     let mut errors = false;
@@ -1323,34 +732,17 @@ pub(crate) async fn pull_store(
     let mut namespaces = if params.source.get_ns().is_root() && old_max_depth == Some(0) {
         vec![params.source.get_ns()] // backwards compat - don't query remote namespaces!
     } else {
-        params
-            .source
-            .list_namespaces(&mut params.max_depth, worker)
-            .await?
+        params.source.list_namespaces(&mut params.max_depth).await?
     };
 
-    let ns_layers_to_be_pulled = namespaces
-        .iter()
-        .map(BackupNamespace::depth)
-        .max()
-        .map_or(0, |v| v - params.source.get_ns().depth());
-    let target_depth = params.target.ns.depth();
-
-    if ns_layers_to_be_pulled + target_depth > MAX_NAMESPACE_DEPTH {
-        bail!(
-            "Syncing would exceed max allowed namespace depth. ({}+{} > {})",
-            ns_layers_to_be_pulled,
-            target_depth,
-            MAX_NAMESPACE_DEPTH
-        );
-    }
+    check_namespace_depth_limit(¶ms.source.get_ns(), ¶ms.target.ns, &namespaces)?;
 
     errors |= old_max_depth != params.max_depth; // fail job if we switched to backwards-compat mode
     namespaces.sort_unstable_by_key(|a| a.name_len());
 
     let (mut groups, mut snapshots) = (0, 0);
     let mut synced_ns = HashSet::with_capacity(namespaces.len());
-    let mut pull_stats = PullStats::default();
+    let mut sync_stats = SyncStats::default();
 
     for namespace in namespaces {
         let source_store_ns_str = print_store_and_ns(params.source.get_store(), &namespace);
@@ -1358,73 +750,62 @@ pub(crate) async fn pull_store(
         let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
         let target_store_ns_str = print_store_and_ns(params.target.store.name(), &target_ns);
 
-        task_log!(worker, "----");
-        task_log!(
-            worker,
-            "Syncing {} into {}",
-            source_store_ns_str,
-            target_store_ns_str
-        );
+        info!("----");
+        info!("Syncing {source_store_ns_str} into {target_store_ns_str}");
 
         synced_ns.insert(target_ns.clone());
 
         match check_and_create_ns(¶ms, &target_ns) {
-            Ok(true) => task_log!(worker, "Created namespace {}", target_ns),
+            Ok(true) => info!("Created namespace {target_ns}"),
             Ok(false) => {}
             Err(err) => {
-                task_log!(
-                    worker,
-                    "Cannot sync {} into {} - {}",
-                    source_store_ns_str,
-                    target_store_ns_str,
-                    err,
-                );
+                info!("Cannot sync {source_store_ns_str} into {target_store_ns_str} - {err}");
                 errors = true;
                 continue;
             }
         }
 
-        match pull_ns(worker, &namespace, &mut params).await {
-            Ok((ns_progress, ns_pull_stats, ns_errors)) => {
+        match pull_ns(&namespace, &mut params).await {
+            Ok((ns_progress, ns_sync_stats, ns_errors)) => {
                 errors |= ns_errors;
 
-                pull_stats.add(ns_pull_stats);
+                sync_stats.add(ns_sync_stats);
 
                 if params.max_depth != Some(0) {
                     groups += ns_progress.done_groups;
                     snapshots += ns_progress.done_snapshots;
-                    task_log!(
-                        worker,
-                        "Finished syncing namespace {}, current progress: {} groups, {} snapshots",
-                        namespace,
-                        groups,
-                        snapshots,
+
+                    let ns = if namespace.is_root() {
+                        "root namespace".into()
+                    } else {
+                        format!("namespace {namespace}")
+                    };
+                    info!(
+                        "Finished syncing {ns}, current progress: {groups} groups, {snapshots} snapshots"
                     );
                 }
             }
             Err(err) => {
                 errors = true;
-                task_log!(
-                    worker,
-                    "Encountered errors while syncing namespace {} - {}",
+                info!(
+                    "Encountered errors while syncing namespace {} - {err}",
                     &namespace,
-                    err,
                 );
             }
         };
     }
 
     if params.remove_vanished {
-        let (has_errors, stats) = check_and_remove_vanished_ns(worker, ¶ms, synced_ns)?;
+        let (has_errors, stats) = check_and_remove_vanished_ns(¶ms, synced_ns)?;
         errors |= has_errors;
-        pull_stats.add(PullStats::from(stats));
+        sync_stats.add(SyncStats::from(stats));
     }
 
     if errors {
         bail!("sync failed with some errors.");
     }
 
-    Ok(pull_stats)
+    Ok(sync_stats)
 }
 
 /// Pulls a namespace according to `params`.
@@ -1440,10 +821,9 @@ pub(crate) async fn pull_store(
 /// - remote namespaces are filtered by remote
 /// - owner check for vanished groups done here
 pub(crate) async fn pull_ns(
-    worker: &WorkerTask,
     namespace: &BackupNamespace,
     params: &mut PullParameters,
-) -> Result<(StoreProgress, PullStats, bool), Error> {
+) -> Result<(StoreProgress, SyncStats, bool), Error> {
     let mut list: Vec = params.source.list_groups(namespace, ¶ms.owner).await?;
 
     list.sort_unstable_by(|a, b| {
@@ -1460,11 +840,9 @@ pub(crate) async fn pull_ns(
         .into_iter()
         .filter(|group| group.apply_filters(¶ms.group_filter))
         .collect();
-    task_log!(
-        worker,
-        "found {} groups to sync (out of {} total)",
-        list.len(),
-        unfiltered_count
+    info!(
+        "found {} groups to sync (out of {unfiltered_count} total)",
+        list.len()
     );
 
     let mut errors = false;
@@ -1475,7 +853,7 @@ pub(crate) async fn pull_ns(
     }
 
     let mut progress = StoreProgress::new(list.len() as u64);
-    let mut pull_stats = PullStats::default();
+    let mut sync_stats = SyncStats::default();
 
     let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
 
@@ -1492,15 +870,10 @@ pub(crate) async fn pull_ns(
             {
                 Ok(result) => result,
                 Err(err) => {
-                    task_log!(
-                        worker,
-                        "sync group {} failed - group lock failed: {}",
-                        &group,
-                        err
-                    );
+                    info!("sync group {} failed - group lock failed: {err}", &group);
                     errors = true;
                     // do not stop here, instead continue
-                    task_log!(worker, "create_locked_backup_group failed");
+                    info!("create_locked_backup_group failed");
                     continue;
                 }
             };
@@ -1508,19 +881,16 @@ pub(crate) async fn pull_ns(
         // permission check
         if params.owner != owner {
             // only the owner is allowed to create additional snapshots
-            task_log!(
-                worker,
-                "sync group {} failed - owner check failed ({} != {})",
-                &group,
-                params.owner,
-                owner
+            info!(
+                "sync group {} failed - owner check failed ({} != {owner})",
+                &group, params.owner
             );
             errors = true; // do not stop here, instead continue
         } else {
-            match pull_group(worker, params, namespace, &group, &mut progress).await {
-                Ok(stats) => pull_stats.add(stats),
+            match pull_group(params, namespace, &group, &mut progress).await {
+                Ok(stats) => sync_stats.add(stats),
                 Err(err) => {
-                    task_log!(worker, "sync group {} failed - {}", &group, err,);
+                    info!("sync group {} failed - {err}", &group);
                     errors = true; // do not stop here, instead continue
                 }
             }
@@ -1542,7 +912,7 @@ pub(crate) async fn pull_ns(
                 if !local_group.apply_filters(¶ms.group_filter) {
                     continue;
                 }
-                task_log!(worker, "delete vanished group '{local_group}'",);
+                info!("delete vanished group '{local_group}'");
                 let delete_stats_result = params
                     .target
                     .store
@@ -1551,17 +921,14 @@ pub(crate) async fn pull_ns(
                 match delete_stats_result {
                     Ok(stats) => {
                         if !stats.all_removed() {
-                            task_log!(
-                                worker,
-                                "kept some protected snapshots of group '{local_group}'",
-                            );
-                            pull_stats.add(PullStats::from(RemovedVanishedStats {
+                            info!("kept some protected snapshots of group '{local_group}'");
+                            sync_stats.add(SyncStats::from(RemovedVanishedStats {
                                 snapshots: stats.removed_snapshots(),
                                 groups: 0,
                                 namespaces: 0,
                             }));
                         } else {
-                            pull_stats.add(PullStats::from(RemovedVanishedStats {
+                            sync_stats.add(SyncStats::from(RemovedVanishedStats {
                                 snapshots: stats.removed_snapshots(),
                                 groups: 1,
                                 namespaces: 0,
@@ -1569,7 +936,7 @@ pub(crate) async fn pull_ns(
                         }
                     }
                     Err(err) => {
-                        task_log!(worker, "{}", err);
+                        info!("{err}");
                         errors = true;
                     }
                 }
@@ -1577,10 +944,10 @@ pub(crate) async fn pull_ns(
             Ok(())
         });
         if let Err(err) = result {
-            task_log!(worker, "error during cleanup: {}", err);
+            info!("error during cleanup: {err}");
             errors = true;
         };
     }
 
-    Ok((progress, pull_stats, errors))
+    Ok((progress, sync_stats, errors))
 }
diff --git a/src/server/realm_sync_job.rs b/src/server/realm_sync_job.rs
index 972e9a6b5..8768a0d80 100644
--- a/src/server/realm_sync_job.rs
+++ b/src/server/realm_sync_job.rs
@@ -1,18 +1,19 @@
 use anyhow::{bail, format_err, Context, Error};
+use tracing::{info, warn};
+
 use pbs_config::{acl::AclTree, token_shadow, BackupLockGuard};
 use proxmox_lang::try_block;
 use proxmox_ldap::{Config, Connection, SearchParameters, SearchResult};
 use proxmox_rest_server::WorkerTask;
 use proxmox_schema::{ApiType, Schema};
 use proxmox_section_config::SectionConfigData;
-use proxmox_sys::{task_log, task_warn};
 
-use std::{collections::HashSet, sync::Arc};
+use std::collections::HashSet;
 
 use pbs_api_types::{
-    ApiToken, Authid, LdapRealmConfig, Realm, RemoveVanished, SyncAttributes as LdapSyncAttributes,
-    SyncDefaultsOptions, User, Userid, EMAIL_SCHEMA, FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA,
-    REMOVE_VANISHED_ARRAY, USER_CLASSES_ARRAY,
+    AdRealmConfig, ApiToken, Authid, LdapRealmConfig, Realm, RealmType, RemoveVanished,
+    SyncAttributes as LdapSyncAttributes, SyncDefaultsOptions, User, Userid, EMAIL_SCHEMA,
+    FIRST_NAME_SCHEMA, LAST_NAME_SCHEMA, REMOVE_VANISHED_ARRAY, USER_CLASSES_ARRAY,
 };
 
 use crate::{auth, server::jobstate::Job};
@@ -22,6 +23,7 @@ use crate::{auth, server::jobstate::Job};
 pub fn do_realm_sync_job(
     mut job: Job,
     realm: Realm,
+    realm_type: RealmType,
     auth_id: &Authid,
     _schedule: Option,
     to_stdout: bool,
@@ -38,7 +40,7 @@ pub fn do_realm_sync_job(
         move |worker| {
             job.start(&worker.upid().to_string()).unwrap();
 
-            task_log!(worker, "starting realm sync for {}", realm.as_str());
+            info!("starting realm sync for {}", realm.as_str());
 
             let override_settings = GeneralSyncSettingsOverride {
                 remove_vanished,
@@ -46,8 +48,19 @@ pub fn do_realm_sync_job(
             };
 
             async move {
-                let sync_job = LdapRealmSyncJob::new(worker, realm, &override_settings, dry_run)?;
-                sync_job.sync().await
+                match realm_type {
+                    RealmType::Ldap => {
+                        LdapRealmSyncJob::new(realm, &override_settings, dry_run)?
+                            .sync()
+                            .await
+                    }
+                    RealmType::Ad => {
+                        AdRealmSyncJob::new(realm, &override_settings, dry_run)?
+                            .sync()
+                            .await
+                    }
+                    _ => bail!("cannot sync realm {realm} of type {realm_type}"),
+                }
             }
         },
     )?;
@@ -55,9 +68,51 @@ pub fn do_realm_sync_job(
     Ok(upid_str)
 }
 
+/// Implementation for syncing Active Directory realms. Merely a thin wrapper over
+/// `LdapRealmSyncJob`, as AD is just LDAP with some special requirements.
+struct AdRealmSyncJob(LdapRealmSyncJob);
+
+impl AdRealmSyncJob {
+    fn new(
+        realm: Realm,
+        override_settings: &GeneralSyncSettingsOverride,
+        dry_run: bool,
+    ) -> Result {
+        let (domains, _digest) = pbs_config::domains::config()?;
+        let config = if let Ok(config) = domains.lookup::("ad", realm.as_str()) {
+            config
+        } else {
+            bail!("unknown Active Directory realm '{}'", realm.as_str());
+        };
+
+        let sync_settings = GeneralSyncSettings::default()
+            .apply_config(config.sync_defaults_options.as_deref())?
+            .apply_override(override_settings)?;
+        let sync_attributes = LdapSyncSettings::new(
+            "sAMAccountName",
+            config.sync_attributes.as_deref(),
+            config.user_classes.as_deref(),
+            config.filter.as_deref(),
+        )?;
+
+        let ldap_config = auth::AdAuthenticator::api_type_to_config(&config)?;
+
+        Ok(Self(LdapRealmSyncJob {
+            realm,
+            general_sync_settings: sync_settings,
+            ldap_sync_settings: sync_attributes,
+            ldap_config,
+            dry_run,
+        }))
+    }
+
+    async fn sync(&self) -> Result<(), Error> {
+        self.0.sync().await
+    }
+}
+
 /// Implementation for syncing LDAP realms
 struct LdapRealmSyncJob {
-    worker: Arc,
     realm: Realm,
     general_sync_settings: GeneralSyncSettings,
     ldap_sync_settings: LdapSyncSettings,
@@ -68,7 +123,6 @@ struct LdapRealmSyncJob {
 impl LdapRealmSyncJob {
     /// Create new LdapRealmSyncJob
     fn new(
-        worker: Arc,
         realm: Realm,
         override_settings: &GeneralSyncSettingsOverride,
         dry_run: bool,
@@ -77,7 +131,7 @@ impl LdapRealmSyncJob {
         let config = if let Ok(config) = domains.lookup::("ldap", realm.as_str()) {
             config
         } else {
-            bail!("unknown realm '{}'", realm.as_str());
+            bail!("unknown LDAP realm '{}'", realm.as_str());
         };
 
         let sync_settings = GeneralSyncSettings::default()
@@ -93,7 +147,6 @@ impl LdapRealmSyncJob {
         let ldap_config = auth::LdapAuthenticator::api_type_to_config(&config)?;
 
         Ok(Self {
-            worker,
             realm,
             general_sync_settings: sync_settings,
             ldap_sync_settings: sync_attributes,
@@ -105,10 +158,7 @@ impl LdapRealmSyncJob {
     /// Perform realm synchronization
     async fn sync(&self) -> Result<(), Error> {
         if self.dry_run {
-            task_log!(
-                self.worker,
-                "this is a DRY RUN - changes will not be persisted"
-            );
+            info!("this is a DRY RUN - changes will not be persisted");
         }
 
         let ldap = Connection::new(self.ldap_config.clone());
@@ -190,7 +240,7 @@ impl LdapRealmSyncJob {
                 anyhow::Ok(())
             });
             if let Err(e) = result {
-                task_log!(self.worker, "could not create/update user: {e}");
+                info!("could not create/update user: {e}");
             }
         }
 
@@ -209,18 +259,10 @@ impl LdapRealmSyncJob {
 
         if let Some(existing_user) = existing_user {
             if existing_user != new_or_updated_user {
-                task_log!(
-                    self.worker,
-                    "updating user {}",
-                    new_or_updated_user.userid.as_str()
-                );
+                info!("updating user {}", new_or_updated_user.userid.as_str());
             }
         } else {
-            task_log!(
-                self.worker,
-                "creating user {}",
-                new_or_updated_user.userid.as_str()
-            );
+            info!("creating user {}", new_or_updated_user.userid.as_str());
         }
 
         user_config.set_data(
@@ -242,10 +284,7 @@ impl LdapRealmSyncJob {
             let schema = schema.unwrap_string_schema();
 
             if let Err(e) = schema.check_constraints(value) {
-                task_warn!(
-                    self.worker,
-                    "{userid}: ignoring attribute `{attribute}`: {e}"
-                );
+                warn!("{userid}: ignoring attribute `{attribute}`: {e}");
 
                 None
             } else {
@@ -324,7 +363,7 @@ impl LdapRealmSyncJob {
         to_delete: &[Userid],
     ) -> Result<(), Error> {
         for userid in to_delete {
-            task_log!(self.worker, "deleting user {}", userid.as_str());
+            info!("deleting user {}", userid.as_str());
 
             // Delete the user
             user_config.sections.remove(userid.as_str());
@@ -351,7 +390,7 @@ impl LdapRealmSyncJob {
 
                     if !self.dry_run {
                         if let Err(e) = token_shadow::delete_secret(&tokenid) {
-                            task_warn!(self.worker, "could not delete token for user {userid}: {e}",)
+                            warn!("could not delete token for user {userid}: {e}",)
                         }
                     }
 
@@ -406,9 +445,9 @@ impl LdapSyncSettings {
             let value = LdapSyncAttributes::API_SCHEMA.parse_property_string(sync_attributes)?;
             let sync_attributes: LdapSyncAttributes = serde_json::from_value(value)?;
 
-            email = sync_attributes.email.clone();
-            firstname = sync_attributes.firstname.clone();
-            lastname = sync_attributes.lastname.clone();
+            email.clone_from(&sync_attributes.email);
+            firstname.clone_from(&sync_attributes.firstname);
+            lastname.clone_from(&sync_attributes.lastname);
 
             if let Some(email_attr) = &sync_attributes.email {
                 attributes.push(email_attr.clone());
diff --git a/src/server/sync.rs b/src/server/sync.rs
new file mode 100644
index 000000000..bd68dda46
--- /dev/null
+++ b/src/server/sync.rs
@@ -0,0 +1,570 @@
+//! Sync datastore contents from source to target, either in push or pull direction
+
+use std::collections::HashMap;
+use std::io::{Seek, Write};
+use std::path::{Path, PathBuf};
+use std::sync::{Arc, Mutex};
+use std::time::Duration;
+
+use anyhow::{bail, format_err, Error};
+use http::StatusCode;
+use serde_json::json;
+use tracing::{info, warn};
+
+use proxmox_router::HttpError;
+
+use pbs_api_types::{
+    Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem,
+    MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
+};
+use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
+use pbs_datastore::data_blob::DataBlob;
+use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME;
+use pbs_datastore::read_chunk::AsyncReadChunk;
+use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader};
+
+use crate::backup::ListAccessibleBackupGroups;
+
+#[derive(Default)]
+pub(crate) struct RemovedVanishedStats {
+    pub(crate) groups: usize,
+    pub(crate) snapshots: usize,
+    pub(crate) namespaces: usize,
+}
+
+impl RemovedVanishedStats {
+    pub(crate) fn add(&mut self, rhs: RemovedVanishedStats) {
+        self.groups += rhs.groups;
+        self.snapshots += rhs.snapshots;
+        self.namespaces += rhs.namespaces;
+    }
+}
+
+#[derive(Default)]
+pub(crate) struct SyncStats {
+    pub(crate) chunk_count: usize,
+    pub(crate) bytes: usize,
+    pub(crate) elapsed: Duration,
+    pub(crate) removed: Option,
+}
+
+impl From for SyncStats {
+    fn from(removed: RemovedVanishedStats) -> Self {
+        Self {
+            removed: Some(removed),
+            ..Default::default()
+        }
+    }
+}
+
+impl SyncStats {
+    pub(crate) fn add(&mut self, rhs: SyncStats) {
+        self.chunk_count += rhs.chunk_count;
+        self.bytes += rhs.bytes;
+        self.elapsed += rhs.elapsed;
+
+        if let Some(rhs_removed) = rhs.removed {
+            if let Some(ref mut removed) = self.removed {
+                removed.add(rhs_removed);
+            } else {
+                self.removed = Some(rhs_removed);
+            }
+        }
+    }
+}
+
+#[async_trait::async_trait]
+/// `SyncReader` is a trait that provides an interface for reading data from a source.
+/// The trait includes methods for getting a chunk reader, loading a file, downloading client log,
+/// and checking whether chunk sync should be skipped.
+pub(crate) trait SyncSourceReader: Send + Sync {
+    /// Returns a chunk reader with the specified encryption mode.
+    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc;
+
+    /// Asynchronously loads a file from the source into a local file.
+    /// `filename` is the name of the file to load from the source.
+    /// `into` is the path of the local file to load the source file into.
+    async fn load_file_into(&self, filename: &str, into: &Path) -> Result, Error>;
+
+    /// Tries to download the client log from the source and save it into a local file.
+    async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error>;
+
+    fn skip_chunk_sync(&self, target_store_name: &str) -> bool;
+}
+
+pub(crate) struct RemoteSourceReader {
+    pub(crate) backup_reader: Arc,
+    pub(crate) dir: BackupDir,
+}
+
+pub(crate) struct LocalSourceReader {
+    pub(crate) _dir_lock: Arc>,
+    pub(crate) path: PathBuf,
+    pub(crate) datastore: Arc,
+}
+
+#[async_trait::async_trait]
+impl SyncSourceReader for RemoteSourceReader {
+    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc {
+        Arc::new(RemoteChunkReader::new(
+            self.backup_reader.clone(),
+            None,
+            crypt_mode,
+            HashMap::new(),
+        ))
+    }
+
+    async fn load_file_into(&self, filename: &str, into: &Path) -> Result, Error> {
+        let mut tmp_file = std::fs::OpenOptions::new()
+            .write(true)
+            .create(true)
+            .truncate(true)
+            .read(true)
+            .open(into)?;
+        let download_result = self.backup_reader.download(filename, &mut tmp_file).await;
+        if let Err(err) = download_result {
+            match err.downcast_ref::() {
+                Some(HttpError { code, message }) => match *code {
+                    StatusCode::NOT_FOUND => {
+                        info!(
+                            "skipping snapshot {} - vanished since start of sync",
+                            &self.dir
+                        );
+                        return Ok(None);
+                    }
+                    _ => {
+                        bail!("HTTP error {code} - {message}");
+                    }
+                },
+                None => {
+                    return Err(err);
+                }
+            };
+        };
+        tmp_file.rewind()?;
+        Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
+    }
+
+    async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error> {
+        let mut tmp_path = to_path.to_owned();
+        tmp_path.set_extension("tmp");
+
+        let tmpfile = std::fs::OpenOptions::new()
+            .write(true)
+            .create(true)
+            .read(true)
+            .open(&tmp_path)?;
+
+        // Note: be silent if there is no log - only log successful download
+        if let Ok(()) = self
+            .backup_reader
+            .download(CLIENT_LOG_BLOB_NAME, tmpfile)
+            .await
+        {
+            if let Err(err) = std::fs::rename(&tmp_path, to_path) {
+                bail!("Atomic rename file {to_path:?} failed - {err}");
+            }
+            info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}");
+        }
+
+        Ok(())
+    }
+
+    fn skip_chunk_sync(&self, _target_store_name: &str) -> bool {
+        false
+    }
+}
+
+#[async_trait::async_trait]
+impl SyncSourceReader for LocalSourceReader {
+    fn chunk_reader(&self, crypt_mode: CryptMode) -> Arc {
+        Arc::new(LocalChunkReader::new(
+            self.datastore.clone(),
+            None,
+            crypt_mode,
+        ))
+    }
+
+    async fn load_file_into(&self, filename: &str, into: &Path) -> Result, Error> {
+        let mut tmp_file = std::fs::OpenOptions::new()
+            .write(true)
+            .create(true)
+            .truncate(true)
+            .read(true)
+            .open(into)?;
+        let mut from_path = self.path.clone();
+        from_path.push(filename);
+        tmp_file.write_all(std::fs::read(from_path)?.as_slice())?;
+        tmp_file.rewind()?;
+        Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
+    }
+
+    async fn try_download_client_log(&self, _to_path: &Path) -> Result<(), Error> {
+        Ok(())
+    }
+
+    fn skip_chunk_sync(&self, target_store_name: &str) -> bool {
+        self.datastore.name() == target_store_name
+    }
+}
+
+#[async_trait::async_trait]
+/// `SyncSource` is a trait that provides an interface for synchronizing data/information from a
+/// source.
+/// The trait includes methods for listing namespaces, groups, and backup directories,
+/// as well as retrieving a reader for reading data from the source.
+pub(crate) trait SyncSource: Send + Sync {
+    /// Lists namespaces from the source.
+    async fn list_namespaces(
+        &self,
+        max_depth: &mut Option,
+    ) -> Result, Error>;
+
+    /// Lists groups within a specific namespace from the source.
+    async fn list_groups(
+        &self,
+        namespace: &BackupNamespace,
+        owner: &Authid,
+    ) -> Result, Error>;
+
+    /// Lists backup directories for a specific group within a specific namespace from the source.
+    async fn list_backup_dirs(
+        &self,
+        namespace: &BackupNamespace,
+        group: &BackupGroup,
+    ) -> Result, Error>;
+    fn get_ns(&self) -> BackupNamespace;
+    fn get_store(&self) -> &str;
+
+    /// Returns a reader for reading data from a specific backup directory.
+    async fn reader(
+        &self,
+        ns: &BackupNamespace,
+        dir: &BackupDir,
+    ) -> Result, Error>;
+}
+
+pub(crate) struct RemoteSource {
+    pub(crate) repo: BackupRepository,
+    pub(crate) ns: BackupNamespace,
+    pub(crate) client: HttpClient,
+}
+
+pub(crate) struct LocalSource {
+    pub(crate) store: Arc,
+    pub(crate) ns: BackupNamespace,
+}
+
+#[async_trait::async_trait]
+impl SyncSource for RemoteSource {
+    async fn list_namespaces(
+        &self,
+        max_depth: &mut Option,
+    ) -> Result, Error> {
+        if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) {
+            return Ok(vec![self.ns.clone()]);
+        }
+
+        let path = format!("api2/json/admin/datastore/{}/namespace", self.repo.store());
+        let mut data = json!({});
+        if let Some(max_depth) = max_depth {
+            data["max-depth"] = json!(max_depth);
+        }
+
+        if !self.ns.is_root() {
+            data["parent"] = json!(self.ns);
+        }
+        self.client.login().await?;
+
+        let mut result = match self.client.get(&path, Some(data)).await {
+            Ok(res) => res,
+            Err(err) => match err.downcast_ref::() {
+                Some(HttpError { code, message }) => match code {
+                    &StatusCode::NOT_FOUND => {
+                        if self.ns.is_root() && max_depth.is_none() {
+                            warn!("Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
+                            warn!("Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
+                            max_depth.replace(0);
+                        } else {
+                            bail!("Remote namespace set/recursive sync requested, but remote does not support namespaces.")
+                        }
+
+                        return Ok(vec![self.ns.clone()]);
+                    }
+                    _ => {
+                        bail!("Querying namespaces failed - HTTP error {code} - {message}");
+                    }
+                },
+                None => {
+                    bail!("Querying namespaces failed - {err}");
+                }
+            },
+        };
+
+        let list: Vec =
+            serde_json::from_value::>(result["data"].take())?
+                .into_iter()
+                .map(|list_item| list_item.ns)
+                .collect();
+
+        Ok(list)
+    }
+
+    async fn list_groups(
+        &self,
+        namespace: &BackupNamespace,
+        _owner: &Authid,
+    ) -> Result, Error> {
+        let path = format!("api2/json/admin/datastore/{}/groups", self.repo.store());
+
+        let args = if !namespace.is_root() {
+            Some(json!({ "ns": namespace.clone() }))
+        } else {
+            None
+        };
+
+        self.client.login().await?;
+        let mut result =
+            self.client.get(&path, args).await.map_err(|err| {
+                format_err!("Failed to retrieve backup groups from remote - {}", err)
+            })?;
+
+        Ok(
+            serde_json::from_value::>(result["data"].take())
+                .map_err(Error::from)?
+                .into_iter()
+                .map(|item| item.backup)
+                .collect::>(),
+        )
+    }
+
+    async fn list_backup_dirs(
+        &self,
+        namespace: &BackupNamespace,
+        group: &BackupGroup,
+    ) -> Result, Error> {
+        let path = format!("api2/json/admin/datastore/{}/snapshots", self.repo.store());
+
+        let mut args = json!({
+            "backup-type": group.ty,
+            "backup-id": group.id,
+        });
+
+        if !namespace.is_root() {
+            args["ns"] = serde_json::to_value(namespace)?;
+        }
+
+        self.client.login().await?;
+
+        let mut result = self.client.get(&path, Some(args)).await?;
+        let snapshot_list: Vec = serde_json::from_value(result["data"].take())?;
+        Ok(snapshot_list
+            .into_iter()
+            .filter_map(|item: SnapshotListItem| {
+                let snapshot = item.backup;
+                // in-progress backups can't be synced
+                if item.size.is_none() {
+                    info!("skipping snapshot {snapshot} - in-progress backup");
+                    return None;
+                }
+
+                Some(snapshot)
+            })
+            .collect::>())
+    }
+
+    fn get_ns(&self) -> BackupNamespace {
+        self.ns.clone()
+    }
+
+    fn get_store(&self) -> &str {
+        self.repo.store()
+    }
+
+    async fn reader(
+        &self,
+        ns: &BackupNamespace,
+        dir: &BackupDir,
+    ) -> Result, Error> {
+        let backup_reader =
+            BackupReader::start(&self.client, None, self.repo.store(), ns, dir, true).await?;
+        Ok(Arc::new(RemoteSourceReader {
+            backup_reader,
+            dir: dir.clone(),
+        }))
+    }
+}
+
+#[async_trait::async_trait]
+impl SyncSource for LocalSource {
+    async fn list_namespaces(
+        &self,
+        max_depth: &mut Option,
+    ) -> Result, Error> {
+        ListNamespacesRecursive::new_max_depth(
+            self.store.clone(),
+            self.ns.clone(),
+            max_depth.unwrap_or(MAX_NAMESPACE_DEPTH),
+        )?
+        .collect()
+    }
+
+    async fn list_groups(
+        &self,
+        namespace: &BackupNamespace,
+        owner: &Authid,
+    ) -> Result, Error> {
+        Ok(ListAccessibleBackupGroups::new_with_privs(
+            &self.store,
+            namespace.clone(),
+            0,
+            Some(PRIV_DATASTORE_READ),
+            Some(PRIV_DATASTORE_BACKUP),
+            Some(owner),
+        )?
+        .filter_map(Result::ok)
+        .map(|backup_group| backup_group.group().clone())
+        .collect::>())
+    }
+
+    async fn list_backup_dirs(
+        &self,
+        namespace: &BackupNamespace,
+        group: &BackupGroup,
+    ) -> Result, Error> {
+        Ok(self
+            .store
+            .backup_group(namespace.clone(), group.clone())
+            .iter_snapshots()?
+            .filter_map(Result::ok)
+            .map(|snapshot| snapshot.dir().to_owned())
+            .collect::>())
+    }
+
+    fn get_ns(&self) -> BackupNamespace {
+        self.ns.clone()
+    }
+
+    fn get_store(&self) -> &str {
+        self.store.name()
+    }
+
+    async fn reader(
+        &self,
+        ns: &BackupNamespace,
+        dir: &BackupDir,
+    ) -> Result, Error> {
+        let dir = self.store.backup_dir(ns.clone(), dir.clone())?;
+        let dir_lock = proxmox_sys::fs::lock_dir_noblock_shared(
+            &dir.full_path(),
+            "snapshot",
+            "locked by another operation",
+        )?;
+        Ok(Arc::new(LocalSourceReader {
+            _dir_lock: Arc::new(Mutex::new(dir_lock)),
+            path: dir.full_path(),
+            datastore: dir.datastore().clone(),
+        }))
+    }
+}
+
+#[derive(PartialEq, Eq)]
+pub(crate) enum SkipReason {
+    AlreadySynced,
+    TransferLast,
+}
+
+impl std::fmt::Display for SkipReason {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(
+            f,
+            "{}",
+            match self {
+                SkipReason::AlreadySynced =>
+                    "older than the newest snapshot present on sync target",
+                SkipReason::TransferLast => "due to transfer-last",
+            }
+        )
+    }
+}
+
+pub(crate) struct SkipInfo {
+    oldest: i64,
+    newest: i64,
+    pub(crate) count: u64,
+    skip_reason: SkipReason,
+}
+
+impl SkipInfo {
+    pub(crate) fn new(skip_reason: SkipReason) -> Self {
+        SkipInfo {
+            oldest: i64::MAX,
+            newest: i64::MIN,
+            count: 0,
+            skip_reason,
+        }
+    }
+
+    pub(crate) fn reset(&mut self) {
+        self.count = 0;
+        self.oldest = i64::MAX;
+        self.newest = i64::MIN;
+    }
+
+    pub(crate) fn update(&mut self, backup_time: i64) {
+        self.count += 1;
+
+        if backup_time < self.oldest {
+            self.oldest = backup_time;
+        }
+
+        if backup_time > self.newest {
+            self.newest = backup_time;
+        }
+    }
+
+    fn affected(&self) -> Result {
+        match self.count {
+            0 => Ok(String::new()),
+            1 => Ok(proxmox_time::epoch_to_rfc3339_utc(self.oldest)?),
+            _ => Ok(format!(
+                "{} .. {}",
+                proxmox_time::epoch_to_rfc3339_utc(self.oldest)?,
+                proxmox_time::epoch_to_rfc3339_utc(self.newest)?,
+            )),
+        }
+    }
+}
+
+impl std::fmt::Display for SkipInfo {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        write!(
+            f,
+            "skipped: {} snapshot(s) ({}) - {}",
+            self.count,
+            self.affected().map_err(|_| std::fmt::Error)?,
+            self.skip_reason,
+        )
+    }
+}
+
+/// Check if a sync from source to target of given namespaces exceeds the global namespace depth limit
+pub(crate) fn check_namespace_depth_limit(
+    source_namespace: &BackupNamespace,
+    target_namespace: &BackupNamespace,
+    namespaces: &[BackupNamespace],
+) -> Result<(), Error> {
+    let target_ns_depth = target_namespace.depth();
+    let sync_ns_depth = namespaces
+        .iter()
+        .map(BackupNamespace::depth)
+        .max()
+        .map_or(0, |v| v - source_namespace.depth());
+
+    if sync_ns_depth + target_ns_depth > MAX_NAMESPACE_DEPTH {
+        bail!(
+            "Syncing would exceed max allowed namespace depth. ({sync_ns_depth}+{target_ns_depth} > {MAX_NAMESPACE_DEPTH})",
+        );
+    }
+    Ok(())
+}
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index 8bf2a0c99..a15a257da 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -1,9 +1,9 @@
 use anyhow::{format_err, Error};
+use tracing::{error, info};
 
 use pbs_api_types::{Authid, Operation, VerificationJobConfig};
 use pbs_datastore::DataStore;
 use proxmox_rest_server::WorkerTask;
-use proxmox_sys::task_log;
 
 use crate::{
     backup::{verify_all_backups, verify_filter},
@@ -23,8 +23,6 @@ pub fn do_verification_job(
     let outdated_after = verification_job.outdated_after;
     let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
 
-    let (email, notify) = crate::server::lookup_datastore_notify_settings(&verification_job.store);
-
     // FIXME encode namespace here for filter/ACL check?
     let job_id = format!("{}:{}", &verification_job.store, job.jobname());
     let worker_type = job.jobtype().to_string();
@@ -36,9 +34,9 @@ pub fn do_verification_job(
         move |worker| {
             job.start(&worker.upid().to_string())?;
 
-            task_log!(worker, "Starting datastore verify job '{}'", job_id);
+            info!("Starting datastore verify job '{job_id}'");
             if let Some(event_str) = schedule {
-                task_log!(worker, "task triggered by schedule '{}'", event_str);
+                info!("task triggered by schedule '{event_str}'");
             }
 
             let ns = match verification_job.ns {
@@ -60,9 +58,9 @@ pub fn do_verification_job(
             let job_result = match result {
                 Ok(ref failed_dirs) if failed_dirs.is_empty() => Ok(()),
                 Ok(ref failed_dirs) => {
-                    task_log!(worker, "Failed to verify the following snapshots/groups:");
+                    error!("Failed to verify the following snapshots/groups:");
                     for dir in failed_dirs {
-                        task_log!(worker, "\t{}", dir);
+                        error!("\t{dir}");
                     }
 
                     Err(format_err!(
@@ -78,12 +76,8 @@ pub fn do_verification_job(
                 eprintln!("could not finish job state for {}: {}", job.jobtype(), err);
             }
 
-            if let Some(email) = email {
-                if let Err(err) =
-                    crate::server::send_verify_status(&email, notify, verification_job, &result)
-                {
-                    eprintln!("send verify notification failed: {}", err);
-                }
+            if let Err(err) = crate::server::send_verify_status(verification_job, &result) {
+                eprintln!("send verify notification failed: {err}");
             }
 
             job_result
diff --git a/src/tape/drive/lto/mod.rs b/src/tape/drive/lto/mod.rs
index faa9bf413..93ed752bd 100644
--- a/src/tape/drive/lto/mod.rs
+++ b/src/tape/drive/lto/mod.rs
@@ -225,6 +225,8 @@ impl TapeDriver for LtoTapeHandle {
 
         self.set_encryption(encrypt_fingerprint)?;
 
+        self.write_additional_attributes(None, Some(media_set_label.pool.clone()));
+
         Ok(())
     }
 
@@ -268,6 +270,14 @@ impl TapeDriver for LtoTapeHandle {
         }
         Ok(())
     }
+
+    fn get_volume_statistics(&mut self) -> Result {
+        self.volume_statistics()
+    }
+
+    fn write_additional_attributes(&mut self, label: Option, pool: Option) {
+        self.sg_tape.write_mam_attributes(label, pool)
+    }
 }
 
 fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result {
diff --git a/src/tape/drive/mod.rs b/src/tape/drive/mod.rs
index 8607d64b0..36c8627a0 100644
--- a/src/tape/drive/mod.rs
+++ b/src/tape/drive/mod.rs
@@ -1,10 +1,5 @@
 //! Tape drivers
 
-mod virtual_tape;
-
-mod lto;
-pub use lto::*;
-
 use std::path::PathBuf;
 
 use anyhow::{bail, format_err, Error};
@@ -12,23 +7,23 @@ use nix::fcntl::OFlag;
 use nix::sys::stat::Mode;
 use serde::Deserialize;
 use serde_json::Value;
-
-use proxmox_sys::fs::{
-    atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
-};
+use tracing::info;
 
 use proxmox_io::ReadExt;
 use proxmox_section_config::SectionConfigData;
-use proxmox_sys::{task_log, WorkerTaskContext};
+use proxmox_sys::fs::{
+    atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
+};
 use proxmox_uuid::Uuid;
+use proxmox_worker_task::WorkerTaskContext;
 
 use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive};
 use pbs_key_config::KeyConfig;
-
 use pbs_tape::{sg_tape::TapeAlertFlags, BlockReadError, MediaContentHeader, TapeRead, TapeWrite};
 
+use crate::tape::TapeNotificationMode;
 use crate::{
-    server::send_load_media_email,
+    server::send_load_media_notification,
     tape::{
         changer::{MediaChange, MtxMediaChanger},
         drive::virtual_tape::open_virtual_tape_drive,
@@ -40,6 +35,11 @@ use crate::{
     },
 };
 
+mod virtual_tape;
+
+mod lto;
+pub use lto::*;
+
 /// Tape driver interface
 pub trait TapeDriver {
     /// Flush all data to the tape
@@ -241,6 +241,14 @@ pub trait TapeDriver {
         }
         Ok(())
     }
+
+    /// Returns volume statistics from a loaded tape
+    fn get_volume_statistics(&mut self) -> Result;
+
+    /// Writes additional attributes on the drive, like the vendor/application/etc. (e.g. on MAM)
+    ///
+    /// Since it's not fatal when it does not work, it only logs warnings in that case
+    fn write_additional_attributes(&mut self, label: Option, pool: Option);
 }
 
 /// A boxed implementor of [`MediaChange`].
@@ -368,15 +376,13 @@ pub fn request_and_load_media(
     config: &SectionConfigData,
     drive: &str,
     label: &MediaLabel,
-    notify_email: &Option,
+    notification_mode: &TapeNotificationMode,
 ) -> Result<(Box, MediaId), Error> {
     let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| {
         if let Ok((Some(media_id), _)) = handle.read_label() {
-            task_log!(
-                worker,
+            info!(
                 "found media label {} ({})",
-                media_id.label.label_text,
-                media_id.label.uuid,
+                media_id.label.label_text, media_id.label.uuid,
             );
 
             if media_id.label.uuid == *uuid {
@@ -414,29 +420,24 @@ pub fn request_and_load_media(
                     let update_and_log_request_error =
                         |old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> {
                             if new != *old {
-                                task_log!(worker, "{}", new);
+                                info!("{new}");
                                 let (device_type, device) = if let Some(changer) = changer {
                                     ("changer", changer.as_str())
                                 } else {
                                     ("drive", drive)
                                 };
 
-                                task_log!(
-                                    worker,
-                                    "Please insert media '{}' into {} '{}'",
-                                    label_text,
-                                    device_type,
-                                    device
-                                );
-                                if let Some(to) = notify_email {
-                                    send_load_media_email(
-                                        changer.is_some(),
-                                        device,
-                                        &label_text,
-                                        to,
-                                        Some(new.to_string()),
-                                    )?;
-                                }
+                                info!(
+                                "Please insert media '{label_text}' into {device_type} '{device}'"
+                            );
+                                send_load_media_notification(
+                                    notification_mode,
+                                    changer.is_some(),
+                                    device,
+                                    &label_text,
+                                    Some(new.to_string()),
+                                )?;
+
                                 *old = new;
                             }
                             Ok(())
@@ -452,19 +453,9 @@ pub fn request_and_load_media(
                                 std::thread::sleep(std::time::Duration::from_millis(100));
                             }
                         } else if drive_config.changer.is_none() {
-                            task_log!(
-                                worker,
-                                "Checking for media '{}' in drive '{}'",
-                                label_text,
-                                drive
-                            );
+                            info!("Checking for media '{label_text}' in drive '{drive}'");
                         } else {
-                            task_log!(
-                                worker,
-                                "trying to load media '{}' into drive '{}'",
-                                label_text,
-                                drive
-                            );
+                            info!("trying to load media '{label_text}' into drive '{drive}'");
                         }
 
                         if drive_config.changer.is_some() {
@@ -491,8 +482,7 @@ pub fn request_and_load_media(
 
                         let request_error = match handle.read_label() {
                             Ok((Some(media_id), _)) if media_id.label.uuid == label.uuid => {
-                                task_log!(
-                                    worker,
+                                info!(
                                     "found media label {} ({})",
                                     media_id.label.label_text,
                                     media_id.label.uuid.to_string(),
@@ -608,7 +598,7 @@ pub struct DeviceLockGuard {
 // Uses systemd escape_unit to compute a file name from `device_path`, the try
 // to lock `/var/lock/`.
 fn open_device_lock(device_path: &str) -> Result {
-    let lock_name = proxmox_sys::systemd::escape_unit(device_path, true);
+    let lock_name = proxmox_systemd::escape_unit(device_path, true);
 
     let mut path = std::path::PathBuf::from(crate::tape::DRIVE_LOCK_DIR);
     path.push(lock_name);
diff --git a/src/tape/drive/virtual_tape.rs b/src/tape/drive/virtual_tape.rs
index b13c58c4e..866e4d323 100644
--- a/src/tape/drive/virtual_tape.rs
+++ b/src/tape/drive/virtual_tape.rs
@@ -461,6 +461,14 @@ impl TapeDriver for VirtualTapeHandle {
         let status = VirtualDriveStatus { current_tape: None };
         self.store_status(&status)
     }
+
+    fn get_volume_statistics(&mut self) -> Result {
+        Ok(Default::default())
+    }
+
+    fn write_additional_attributes(&mut self, _label: Option, _pool: Option) {
+        // not implemented
+    }
 }
 
 impl MediaChange for VirtualTapeHandle {
diff --git a/src/tape/encryption_keys.rs b/src/tape/encryption_keys.rs
index 1f60c1c6e..5f9f6d18c 100644
--- a/src/tape/encryption_keys.rs
+++ b/src/tape/encryption_keys.rs
@@ -165,7 +165,7 @@ pub fn insert_key(key: [u8; 32], key_config: KeyConfig, force: bool) -> Result<(
         None => bail!("missing encryption key fingerprint - internal error"),
     };
 
-    if !force && config_map.get(&fingerprint).is_some() {
+    if !force && config_map.contains_key(&fingerprint) {
         bail!("encryption key '{}' already exists.", fingerprint);
     }
 
diff --git a/src/tape/file_formats/mod.rs b/src/tape/file_formats/mod.rs
index 0f9830597..7b99cb996 100644
--- a/src/tape/file_formats/mod.rs
+++ b/src/tape/file_formats/mod.rs
@@ -2,6 +2,7 @@
 //! tapes
 
 use std::collections::HashMap;
+use std::sync::LazyLock;
 
 use endian_trait::Endian;
 use serde::{Deserialize, Serialize};
@@ -56,22 +57,48 @@ pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] =
 // openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.1")[0..8];
 pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1: [u8; 8] = [179, 236, 113, 240, 173, 236, 2, 96];
 
-lazy_static::lazy_static! {
-    // Map content magic numbers to human readable names.
-    static ref PROXMOX_TAPE_CONTENT_NAME: HashMap<&'static [u8;8], &'static str> = {
+// Map content magic numbers to human readable names.
+static PROXMOX_TAPE_CONTENT_NAME: LazyLock> =
+    LazyLock::new(|| {
         let mut map = HashMap::new();
-        map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
-        map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
-        map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, "Proxmox Backup Chunk Archive v1.0");
-        map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, "Proxmox Backup Chunk Archive v1.1");
-        map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
-        map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, "Proxmox Backup Snapshot Archive v1.1");
-        map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2, "Proxmox Backup Snapshot Archive v1.2");
-        map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, "Proxmox Backup Catalog Archive v1.0");
-        map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1, "Proxmox Backup Catalog Archive v1.1");
+        map.insert(
+            &PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
+            "Proxmox Backup Tape Label v1.0",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
+            "Proxmox Backup MediaSet Label v1.0",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
+            "Proxmox Backup Chunk Archive v1.0",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
+            "Proxmox Backup Chunk Archive v1.1",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
+            "Proxmox Backup Snapshot Archive v1.0",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
+            "Proxmox Backup Snapshot Archive v1.1",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
+            "Proxmox Backup Snapshot Archive v1.2",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
+            "Proxmox Backup Catalog Archive v1.0",
+        );
+        map.insert(
+            &PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
+            "Proxmox Backup Catalog Archive v1.1",
+        );
         map
-    };
-}
+    });
 
 /// Map content magic numbers to human readable names.
 pub fn proxmox_tape_magic_to_text(magic: &[u8; 8]) -> Option {
@@ -150,7 +177,7 @@ pub struct MediaSetLabel {
     pub seq_nr: u64,
     /// Creation time stamp
     pub ctime: i64,
-    /// Encryption key finkerprint (if encryped)
+    /// Encryption key finkerprint (if encrypted)
     #[serde(skip_serializing_if = "Option::is_none")]
     pub encryption_key_fingerprint: Option,
 }
diff --git a/src/tape/file_formats/snapshot_archive.rs b/src/tape/file_formats/snapshot_archive.rs
index 252384b50..f5a588f4e 100644
--- a/src/tape/file_formats/snapshot_archive.rs
+++ b/src/tape/file_formats/snapshot_archive.rs
@@ -58,8 +58,11 @@ pub fn tape_write_snapshot_archive<'a>(
             ));
         }
 
-        let mut encoder =
-            pxar::encoder::sync::Encoder::new(PxarTapeWriter::new(writer), &root_metadata)?;
+        let mut encoder = pxar::encoder::sync::Encoder::new(
+            pxar::PxarVariant::Unified(PxarTapeWriter::new(writer)),
+            &root_metadata,
+            None,
+        )?;
 
         for filename in file_list.iter() {
             let mut file = snapshot_reader.open_file(filename).map_err(|err| {
@@ -89,6 +92,7 @@ pub fn tape_write_snapshot_archive<'a>(
             }
         }
         encoder.finish()?;
+        encoder.close()?;
         Ok(())
     });
 
diff --git a/src/tape/inventory.rs b/src/tape/inventory.rs
index 7514d76c0..5e4318e21 100644
--- a/src/tape/inventory.rs
+++ b/src/tape/inventory.rs
@@ -84,6 +84,8 @@ struct MediaStateEntry {
     location: Option,
     #[serde(skip_serializing_if = "Option::is_none")]
     status: Option,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    bytes_used: Option,
 }
 
 /// Media Inventory
@@ -211,6 +213,7 @@ impl Inventory {
                 } else {
                     previous.status
                 },
+                bytes_used: previous.bytes_used,
             };
             self.map.insert(uuid, entry);
         } else {
@@ -218,6 +221,7 @@ impl Inventory {
                 id: media_id,
                 location: None,
                 status: None,
+                bytes_used: None,
             };
             self.map.insert(uuid, entry);
         }
@@ -720,6 +724,32 @@ impl Inventory {
         self.set_media_location(uuid, Some(MediaLocation::Offline))
     }
 
+    /// Lock database, reload database, set bytes used for media, store database
+    pub fn set_media_bytes_used(
+        &mut self,
+        uuid: &Uuid,
+        bytes_used: Option,
+    ) -> Result<(), Error> {
+        let _lock = self.lock()?;
+        self.map = self.load_media_db()?;
+        if let Some(entry) = self.map.get_mut(uuid) {
+            entry.bytes_used = bytes_used;
+            self.update_helpers();
+            self.replace_file()?;
+            Ok(())
+        } else {
+            bail!("no such media '{}'", uuid);
+        }
+    }
+
+    /// Returns bytes used of the given media, if set
+    pub fn get_media_bytes_used(&self, uuid: &Uuid) -> Option {
+        match self.map.get(uuid) {
+            Some(entry) => entry.bytes_used,
+            None => None,
+        }
+    }
+
     /// Update online status
     pub fn update_online_status(&mut self, online_map: &OnlineStatusMap) -> Result<(), Error> {
         let _lock = self.lock()?;
diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs
index 9aae0aa12..fa8ff4058 100644
--- a/src/tape/media_catalog.rs
+++ b/src/tape/media_catalog.rs
@@ -946,7 +946,7 @@ impl MediaSetCatalog {
 
     /// Add a catalog
     pub fn append_catalog(&mut self, catalog: MediaCatalog) -> Result<(), Error> {
-        if self.catalog_list.get(&catalog.uuid).is_some() {
+        if self.catalog_list.contains_key(&catalog.uuid) {
             bail!("MediaSetCatalog already contains media '{}'", catalog.uuid);
         }
 
diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs
index 8f2b0adda..1e8c739e7 100644
--- a/src/tape/media_pool.rs
+++ b/src/tape/media_pool.rs
@@ -212,8 +212,11 @@ impl MediaPool {
         }
 
         let (status, location) = self.compute_media_state(&media_id);
+        let bytes_used = self.inventory.get_media_bytes_used(uuid);
 
-        Ok(BackupMedia::with_media_id(media_id, location, status))
+        Ok(BackupMedia::with_media_id(
+            media_id, location, status, bytes_used,
+        ))
     }
 
     /// List all media associated with this pool
@@ -224,7 +227,8 @@ impl MediaPool {
             .into_iter()
             .map(|media_id| {
                 let (status, location) = self.compute_media_state(&media_id);
-                BackupMedia::with_media_id(media_id, location, status)
+                let bytes_used = self.inventory.get_media_bytes_used(&media_id.label.uuid);
+                BackupMedia::with_media_id(media_id, location, status, bytes_used)
             })
             .collect()
     }
@@ -238,6 +242,15 @@ impl MediaPool {
         Ok(())
     }
 
+    /// Update bytes used for media in inventory
+    pub fn set_media_bytes_used(
+        &mut self,
+        uuid: &Uuid,
+        bytes_used: Option,
+    ) -> Result<(), Error> {
+        self.inventory.set_media_bytes_used(uuid, bytes_used)
+    }
+
     /// Make sure the current media set is usable for writing
     ///
     /// If not, starts a new media set. Also creates a new
@@ -715,15 +728,23 @@ pub struct BackupMedia {
     location: MediaLocation,
     /// Media status
     status: MediaStatus,
+    /// Bytes used
+    bytes_used: Option,
 }
 
 impl BackupMedia {
     /// Creates a new instance
-    pub fn with_media_id(id: MediaId, location: MediaLocation, status: MediaStatus) -> Self {
+    pub fn with_media_id(
+        id: MediaId,
+        location: MediaLocation,
+        status: MediaStatus,
+        bytes_used: Option,
+    ) -> Self {
         Self {
             id,
             location,
             status,
+            bytes_used,
         }
     }
 
@@ -776,4 +797,9 @@ impl BackupMedia {
     pub fn label_text(&self) -> &str {
         &self.id.label.label_text
     }
+
+    /// Returns the bytes used, if set
+    pub fn bytes_used(&self) -> Option {
+        self.bytes_used
+    }
 }
diff --git a/src/tape/mod.rs b/src/tape/mod.rs
index 7a9288842..f276f9482 100644
--- a/src/tape/mod.rs
+++ b/src/tape/mod.rs
@@ -1,6 +1,7 @@
 //! Magnetic tape backup
 
 use anyhow::{format_err, Error};
+use proxmox_auth_api::types::Userid;
 
 use proxmox_sys::fs::{create_path, CreateOptions};
 
@@ -29,6 +30,7 @@ pub use media_catalog::*;
 
 mod media_catalog_cache;
 pub use media_catalog_cache::*;
+use pbs_api_types::{NotificationMode, TapeBackupJobSetup};
 
 mod pool_writer;
 pub use pool_writer::*;
@@ -128,3 +130,28 @@ pub fn create_changer_state_dir() -> Result<(), Error> {
 
     Ok(())
 }
+
+#[derive(Clone)]
+pub enum TapeNotificationMode {
+    LegacySendmail { notify_user: Userid },
+    NotificationSystem,
+}
+
+impl From<&TapeBackupJobSetup> for TapeNotificationMode {
+    fn from(value: &TapeBackupJobSetup) -> Self {
+        Self::from((value.notify_user.clone(), value.notification_mode.clone()))
+    }
+}
+
+impl From<(Option, Option)> for TapeNotificationMode {
+    fn from(value: (Option, Option)) -> Self {
+        match value.1.as_ref().unwrap_or(&Default::default()) {
+            NotificationMode::LegacySendmail => {
+                let notify_user = value.0.as_ref().unwrap_or(Userid::root_userid()).clone();
+
+                Self::LegacySendmail { notify_user }
+            }
+            NotificationMode::NotificationSystem => Self::NotificationSystem,
+        }
+    }
+}
diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs
index a6ba4a1d3..9731e1cc8 100644
--- a/src/tape/pool_writer/mod.rs
+++ b/src/tape/pool_writer/mod.rs
@@ -11,8 +11,8 @@ use std::sync::{Arc, Mutex};
 use std::time::SystemTime;
 
 use anyhow::{bail, Error};
+use tracing::{info, warn};
 
-use proxmox_sys::{task_log, task_warn};
 use proxmox_uuid::Uuid;
 
 use pbs_datastore::{DataStore, SnapshotReader};
@@ -25,7 +25,8 @@ use crate::tape::{
     file_formats::{
         tape_write_catalog, tape_write_snapshot_archive, ChunkArchiveWriter, MediaSetLabel,
     },
-    MediaCatalog, MediaId, MediaPool, COMMIT_BLOCK_SIZE, MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR,
+    MediaCatalog, MediaId, MediaPool, TapeNotificationMode, COMMIT_BLOCK_SIZE,
+    MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR,
 };
 
 use super::file_formats::{
@@ -42,8 +43,8 @@ struct PoolWriterState {
     media_uuid: Uuid,
     // tell if we already moved to EOM
     at_eom: bool,
-    // bytes written after the last tape fush/sync
-    bytes_written: usize,
+    // bytes written after the last tape flush/sync and catalog commit
+    bytes_written_after_sync: usize,
 }
 
 /// Helper to manage a backup job, writing several tapes of a pool
@@ -52,7 +53,7 @@ pub struct PoolWriter {
     drive_name: String,
     status: Option,
     catalog_set: Arc>,
-    notify_email: Option,
+    notification_mode: TapeNotificationMode,
     ns_magic: bool,
     used_tapes: HashSet,
 }
@@ -61,8 +62,7 @@ impl PoolWriter {
     pub fn new(
         mut pool: MediaPool,
         drive_name: &str,
-        worker: &WorkerTask,
-        notify_email: Option,
+        notification_mode: TapeNotificationMode,
         force_media_set: bool,
         ns_magic: bool,
     ) -> Result {
@@ -70,11 +70,11 @@ impl PoolWriter {
 
         let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
         if let Some(reason) = new_media_set_reason {
-            task_log!(worker, "starting new media set - reason: {}", reason,);
+            info!("starting new media set - reason: {reason}");
         }
 
         let media_set_uuid = pool.current_media_set().uuid();
-        task_log!(worker, "media set uuid: {}", media_set_uuid);
+        info!("media set uuid: {media_set_uuid}");
 
         let mut catalog_set = CatalogSet::new();
 
@@ -90,7 +90,7 @@ impl PoolWriter {
             drive_name: drive_name.to_string(),
             status: None,
             catalog_set: Arc::new(Mutex::new(catalog_set)),
-            notify_email,
+            notification_mode,
             ns_magic,
             used_tapes: HashSet::new(),
         })
@@ -129,7 +129,7 @@ impl PoolWriter {
     }
 
     /// Eject media and drop PoolWriterState (close drive)
-    pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> {
+    pub fn eject_media(&mut self) -> Result<(), Error> {
         let mut status = match self.status.take() {
             Some(status) => status,
             None => return Ok(()), // no media loaded
@@ -138,13 +138,13 @@ impl PoolWriter {
         let (drive_config, _digest) = pbs_config::drive::config()?;
 
         if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
-            task_log!(worker, "eject media");
+            info!("eject media");
             status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
             drop(status); // close drive
-            task_log!(worker, "unload media");
+            info!("unload media");
             changer.unload_media(None)?; //eject and unload
         } else {
-            task_log!(worker, "standalone drive - ejecting media");
+            info!("standalone drive - ejecting media");
             status.drive.eject_media()?;
         }
 
@@ -152,14 +152,14 @@ impl PoolWriter {
     }
 
     /// Export current media set and drop PoolWriterState (close drive)
-    pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
+    pub fn export_media_set(&mut self) -> Result<(), Error> {
         let mut status = self.status.take();
 
         let (drive_config, _digest) = pbs_config::drive::config()?;
 
         if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
             if let Some(ref mut status) = status {
-                task_log!(worker, "rewind media");
+                info!("rewind media");
                 // rewind first so that the unload command later does not run into a timeout
                 status.drive.rewind()?;
             }
@@ -169,25 +169,15 @@ impl PoolWriter {
                 let media = self.pool.lookup_media(media_uuid)?;
                 let label_text = media.label_text();
                 if let Some(slot) = changer.export_media(label_text)? {
-                    task_log!(
-                        worker,
-                        "exported media '{}' to import/export slot {}",
-                        label_text,
-                        slot
-                    );
+                    info!("exported media '{label_text}' to import/export slot {slot}");
                 } else {
-                    task_warn!(
-                        worker,
-                        "export failed - media '{}' is not online or in different drive",
-                        label_text
+                    warn!(
+                        "export failed - media '{label_text}' is not online or in different drive"
                     );
                 }
             }
         } else if let Some(mut status) = status {
-            task_log!(
-                worker,
-                "standalone drive - ejecting media instead of export"
-            );
+            info!("standalone drive - ejecting media instead of export");
             status.drive.eject_media()?;
         }
 
@@ -199,8 +189,17 @@ impl PoolWriter {
     /// This is done automatically during a backupsession, but needs to
     /// be called explicitly before dropping the PoolWriter
     pub fn commit(&mut self) -> Result<(), Error> {
-        if let Some(PoolWriterState { ref mut drive, .. }) = self.status {
-            drive.sync()?; // sync all data to the tape
+        if let Some(ref mut status) = self.status {
+            status.drive.sync()?; // sync all data to the tape
+            status.bytes_written_after_sync = 0; // reset bytes written
+
+            // not all drives support that
+            if let Ok(stats) = status.drive.get_volume_statistics() {
+                self.pool.set_media_bytes_used(
+                    &status.media_uuid,
+                    Some(stats.total_used_native_capacity),
+                )?;
+            }
         }
         self.catalog_set.lock().unwrap().commit()?; // then commit the catalog
         Ok(())
@@ -228,15 +227,17 @@ impl PoolWriter {
             return Ok(media_uuid);
         }
 
-        task_log!(
-            worker,
-            "allocated new writable media '{}'",
-            media.label_text()
-        );
+        info!("allocated new writable media '{}'", media.label_text());
 
         if let Some(PoolWriterState { mut drive, .. }) = self.status.take() {
-            if last_media_uuid.is_some() {
-                task_log!(worker, "eject current media");
+            if let Some(uuid) = &last_media_uuid {
+                // not all drives support that
+                if let Ok(stats) = drive.get_volume_statistics() {
+                    self.pool
+                        .set_media_bytes_used(uuid, Some(stats.total_used_native_capacity))?;
+                }
+
+                info!("eject current media");
                 drive.eject_media()?;
             }
         }
@@ -248,13 +249,13 @@ impl PoolWriter {
             &drive_config,
             &self.drive_name,
             media.label(),
-            &self.notify_email,
+            &self.notification_mode,
         )?;
 
         // test for critical tape alert flags
         if let Ok(alert_flags) = drive.tape_alert_flags() {
             if !alert_flags.is_empty() {
-                task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
+                info!("TapeAlertFlags: {alert_flags:?}");
                 if tape_alert_flags_critical(alert_flags) {
                     self.pool.set_media_status_damaged(&media_uuid)?;
                     bail!(
@@ -265,20 +266,15 @@ impl PoolWriter {
             }
         }
 
-        let (catalog, is_new_media) = update_media_set_label(
-            worker,
-            drive.as_mut(),
-            old_media_id.media_set_label,
-            media.id(),
-        )?;
+        let (catalog, is_new_media) =
+            update_media_set_label(drive.as_mut(), old_media_id.media_set_label, media.id())?;
 
         self.catalog_set.lock().unwrap().append_catalog(catalog)?;
 
         let media_set = media.media_set_label().unwrap();
 
         if is_new_media && media_set.seq_nr >= MEDIA_SET_SEQ_NR_WARN_LIMIT {
-            task_warn!(
-                worker,
+            warn!(
                 "large media-set detected ({}), consider using a different allocation policy",
                 media_set.seq_nr
             );
@@ -290,12 +286,12 @@ impl PoolWriter {
             drive,
             media_uuid: media_uuid.clone(),
             at_eom: false,
-            bytes_written: 0,
+            bytes_written_after_sync: 0,
         });
 
         if is_new_media {
             // add catalogs from previous media
-            self.append_media_set_catalogs(worker)?;
+            self.append_media_set_catalogs()?;
         }
 
         self.used_tapes.insert(media_uuid.clone());
@@ -315,12 +311,12 @@ impl PoolWriter {
     // Check it tape is loaded, then move to EOM (if not already there)
     //
     // Returns the tape position at EOM.
-    fn prepare_tape_write(status: &mut PoolWriterState, worker: &WorkerTask) -> Result {
+    fn prepare_tape_write(status: &mut PoolWriterState) -> Result {
         if !status.at_eom {
-            task_log!(worker, "moving to end of media");
+            info!("moving to end of media");
             status.drive.move_to_eom(true)?;
             status.at_eom = true;
-            task_log!(worker, "arrived at end of media");
+            info!("arrived at end of media");
         }
 
         let current_file_number = status.drive.current_file_number()?;
@@ -341,7 +337,7 @@ impl PoolWriter {
     /// on the media (return value 'Ok(false, _)'). In that case, the
     /// archive is marked incomplete. The caller should mark the media
     /// as full and try again using another media.
-    pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result {
+    pub fn append_catalog_archive(&mut self) -> Result {
         let catalog_magic = self.catalog_version();
 
         let status = match self.status {
@@ -349,7 +345,7 @@ impl PoolWriter {
             None => bail!("PoolWriter - no media loaded"),
         };
 
-        Self::prepare_tape_write(status, worker)?;
+        Self::prepare_tape_write(status)?;
 
         let catalog_set = self.catalog_set.lock().unwrap();
 
@@ -392,7 +388,7 @@ impl PoolWriter {
     }
 
     // Append catalogs for all previous media in set (without last)
-    fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> {
+    fn append_media_set_catalogs(&mut self) -> Result<(), Error> {
         let media_set = self.pool.current_media_set();
 
         let mut media_list = media_set.media_list();
@@ -408,7 +404,7 @@ impl PoolWriter {
             None => bail!("PoolWriter - no media loaded"),
         };
 
-        Self::prepare_tape_write(status, worker)?;
+        Self::prepare_tape_write(status)?;
 
         for (seq_nr, uuid) in media_list.iter().enumerate() {
             let uuid = match uuid {
@@ -420,7 +416,7 @@ impl PoolWriter {
 
             let mut file = Self::open_catalog_file(uuid)?;
 
-            task_log!(worker, "write catalog for previous media: {}", uuid);
+            info!("write catalog for previous media: {uuid}");
 
             if tape_write_catalog(
                 writer.as_mut(),
@@ -451,7 +447,6 @@ impl PoolWriter {
     /// media.
     pub fn append_snapshot_archive(
         &mut self,
-        worker: &WorkerTask,
         snapshot_reader: &SnapshotReader,
     ) -> Result<(bool, usize), Error> {
         let status = match self.status {
@@ -459,7 +454,7 @@ impl PoolWriter {
             None => bail!("PoolWriter - no media loaded"),
         };
 
-        let current_file_number = Self::prepare_tape_write(status, worker)?;
+        let current_file_number = Self::prepare_tape_write(status)?;
 
         let (done, bytes_written) = {
             let mut writer: Box = status.drive.write_file()?;
@@ -479,9 +474,9 @@ impl PoolWriter {
             }
         };
 
-        status.bytes_written += bytes_written;
+        status.bytes_written_after_sync += bytes_written;
 
-        let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
+        let request_sync = status.bytes_written_after_sync >= COMMIT_BLOCK_SIZE;
 
         if !done || request_sync {
             self.commit()?;
@@ -505,7 +500,7 @@ impl PoolWriter {
             None => bail!("PoolWriter - no media loaded"),
         };
 
-        let current_file_number = Self::prepare_tape_write(status, worker)?;
+        let current_file_number = Self::prepare_tape_write(status)?;
 
         let writer = status.drive.write_file()?;
 
@@ -514,18 +509,17 @@ impl PoolWriter {
         let (saved_chunks, content_uuid, leom, bytes_written) =
             write_chunk_archive(worker, writer, chunk_iter, store, MAX_CHUNK_ARCHIVE_SIZE)?;
 
-        status.bytes_written += bytes_written;
+        status.bytes_written_after_sync += bytes_written;
 
         let elapsed = start_time.elapsed()?.as_secs_f64();
-        task_log!(
-            worker,
+        info!(
             "wrote {} chunks ({:.2} MB at {:.2} MB/s)",
             saved_chunks.len(),
             bytes_written as f64 / 1_000_000.0,
             (bytes_written as f64) / (1_000_000.0 * elapsed),
         );
 
-        let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
+        let request_sync = status.bytes_written_after_sync >= COMMIT_BLOCK_SIZE;
 
         // register chunks in media_catalog
         self.catalog_set.lock().unwrap().register_chunk_archive(
@@ -598,7 +592,7 @@ fn write_chunk_archive<'a>(
         }
 
         if writer.bytes_written() > max_size {
-            //task_log!(worker, "Chunk Archive max size reached, closing archive");
+            //info!("Chunk Archive max size reached, closing archive");
             break;
         }
     }
@@ -612,7 +606,6 @@ fn write_chunk_archive<'a>(
 // set label does not match the expected media set, overwrite the
 // media set label.
 fn update_media_set_label(
-    worker: &WorkerTask,
     drive: &mut dyn TapeDriver,
     old_set: Option,
     media_id: &MediaId,
@@ -641,7 +634,7 @@ fn update_media_set_label(
 
     let new_media = match old_set {
         None => {
-            task_log!(worker, "writing new media set label");
+            info!("writing new media set label");
             drive.write_media_set_label(new_set, key_config.as_ref())?;
             media_catalog = MediaCatalog::overwrite(TAPE_STATUS_DIR, media_id, false)?;
             true
@@ -665,8 +658,7 @@ fn update_media_set_label(
 
                 false
             } else {
-                task_log!(
-                    worker,
+                info!(
                     "writing new media set label (overwrite '{}/{}')",
                     media_set_label.uuid.to_string(),
                     media_set_label.seq_nr,
diff --git a/src/tape/test/alloc_writable_media.rs b/src/tape/test/alloc_writable_media.rs
index d6e87bebe..ff9a63489 100644
--- a/src/tape/test/alloc_writable_media.rs
+++ b/src/tape/test/alloc_writable_media.rs
@@ -1,4 +1,4 @@
-// Tape Media Pool tests - test allow_ritable_media() function
+// Tape Media Pool tests - test allow_writable_media() function
 //
 // # cargo test --release tape::test::alloc_writable_media
 
@@ -28,7 +28,7 @@ fn test_alloc_writable_media_1() -> Result<(), Error> {
 
     let mut pool = MediaPool::new(
         "p1",
-        &testdir,
+        testdir,
         MediaSetPolicy::ContinueCurrent,
         RetentionPolicy::KeepForever,
         None,
diff --git a/src/tape/test/current_set_usable.rs b/src/tape/test/current_set_usable.rs
index 2cf2fe942..4a8bd470b 100644
--- a/src/tape/test/current_set_usable.rs
+++ b/src/tape/test/current_set_usable.rs
@@ -30,7 +30,7 @@ fn test_current_set_usable_1() -> Result<(), Error> {
 
     let pool = MediaPool::new(
         "p1",
-        &testdir,
+        testdir,
         MediaSetPolicy::AlwaysCreate,
         RetentionPolicy::KeepForever,
         None,
diff --git a/src/tape/test/inventory.rs b/src/tape/test/inventory.rs
index 53df2558a..501b73785 100644
--- a/src/tape/test/inventory.rs
+++ b/src/tape/test/inventory.rs
@@ -26,7 +26,7 @@ fn create_testdir(name: &str) -> Result {
 fn test_media_state_db() -> Result<(), Error> {
     let testdir = create_testdir("test_media_state_db")?;
 
-    let mut inventory = Inventory::load(&testdir)?;
+    let mut inventory = Inventory::load(testdir)?;
 
     let uuid1: Uuid = inventory.generate_free_tape("tape1", 0);
 
@@ -75,7 +75,7 @@ fn test_media_state_db() -> Result<(), Error> {
 #[test]
 fn test_list_pool_media() -> Result<(), Error> {
     let testdir = create_testdir("test_list_pool_media")?;
-    let mut inventory = Inventory::load(&testdir)?;
+    let mut inventory = Inventory::load(testdir)?;
 
     let ctime = 0;
 
@@ -198,7 +198,7 @@ fn test_latest_media_set() -> Result<(), Error> {
         assert_eq!(media.label.label_text, label);
     };
 
-    let mut inventory = Inventory::load(&testdir)?;
+    let mut inventory = Inventory::load(testdir)?;
 
     let ctime = 0;
 
diff --git a/src/tools/apt.rs b/src/tools/apt.rs
deleted file mode 100644
index 900843aae..000000000
--- a/src/tools/apt.rs
+++ /dev/null
@@ -1,293 +0,0 @@
-use std::collections::HashMap;
-use std::collections::HashSet;
-
-use anyhow::{bail, format_err, Error};
-use apt_pkg_native::Cache;
-
-use proxmox_schema::const_regex;
-use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
-
-use pbs_api_types::APTUpdateInfo;
-use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
-
-const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
-
-#[derive(Debug, serde::Serialize, serde::Deserialize)]
-/// Some information we cache about the package (update) state, like what pending update version
-/// we already notfied an user about
-pub struct PkgState {
-    /// simple map from package name to most recently notified (emailed) version
-    pub notified: Option>,
-    /// A list of pending updates
-    pub package_status: Vec,
-}
-
-pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
-    let serialized_state = serde_json::to_string(state)?;
-
-    replace_file(
-        APT_PKG_STATE_FN,
-        serialized_state.as_bytes(),
-        CreateOptions::new(),
-        false,
-    )
-    .map_err(|err| format_err!("Error writing package cache - {}", err))?;
-    Ok(())
-}
-
-pub fn read_pkg_state() -> Result, Error> {
-    let serialized_state = match file_read_optional_string(APT_PKG_STATE_FN) {
-        Ok(Some(raw)) => raw,
-        Ok(None) => return Ok(None),
-        Err(err) => bail!("could not read cached package state file - {}", err),
-    };
-
-    serde_json::from_str(&serialized_state)
-        .map(Some)
-        .map_err(|err| format_err!("could not parse cached package status - {}", err))
-}
-
-pub fn pkg_cache_expired() -> Result {
-    if let Ok(pbs_cache) = std::fs::metadata(APT_PKG_STATE_FN) {
-        let apt_pkgcache = std::fs::metadata("/var/cache/apt/pkgcache.bin")?;
-        let dpkg_status = std::fs::metadata("/var/lib/dpkg/status")?;
-
-        let mtime = pbs_cache.modified()?;
-
-        if apt_pkgcache.modified()? <= mtime && dpkg_status.modified()? <= mtime {
-            return Ok(false);
-        }
-    }
-    Ok(true)
-}
-
-pub fn update_cache() -> Result {
-    // update our cache
-    let all_upgradeable = list_installed_apt_packages(
-        |data| {
-            data.candidate_version == data.active_version
-                && data.installed_version != Some(data.candidate_version)
-        },
-        None,
-    );
-
-    let cache = match read_pkg_state() {
-        Ok(Some(mut cache)) => {
-            cache.package_status = all_upgradeable;
-            cache
-        }
-        _ => PkgState {
-            notified: None,
-            package_status: all_upgradeable,
-        },
-    };
-    write_pkg_cache(&cache)?;
-    Ok(cache)
-}
-
-const_regex! {
-    VERSION_EPOCH_REGEX = r"^\d+:";
-    FILENAME_EXTRACT_REGEX = r"^.*/.*?_(.*)_Packages$";
-}
-
-pub struct FilterData<'a> {
-    /// package name
-    pub package: &'a str,
-    /// this is version info returned by APT
-    pub installed_version: Option<&'a str>,
-    pub candidate_version: &'a str,
-
-    /// this is the version info the filter is supposed to check
-    pub active_version: &'a str,
-}
-
-enum PackagePreSelect {
-    OnlyInstalled,
-    OnlyNew,
-    All,
-}
-
-pub fn list_installed_apt_packages bool>(
-    filter: F,
-    only_versions_for: Option<&str>,
-) -> Vec {
-    let mut ret = Vec::new();
-    let mut depends = HashSet::new();
-
-    // note: this is not an 'apt update', it just re-reads the cache from disk
-    let mut cache = Cache::get_singleton();
-    cache.reload();
-
-    let mut cache_iter = match only_versions_for {
-        Some(name) => cache.find_by_name(name),
-        None => cache.iter(),
-    };
-
-    loop {
-        match cache_iter.next() {
-            Some(view) => {
-                let di = if only_versions_for.is_some() {
-                    query_detailed_info(PackagePreSelect::All, &filter, view, None)
-                } else {
-                    query_detailed_info(
-                        PackagePreSelect::OnlyInstalled,
-                        &filter,
-                        view,
-                        Some(&mut depends),
-                    )
-                };
-                if let Some(info) = di {
-                    ret.push(info);
-                }
-
-                if only_versions_for.is_some() {
-                    break;
-                }
-            }
-            None => {
-                drop(cache_iter);
-                // also loop through missing dependencies, as they would be installed
-                for pkg in depends.iter() {
-                    let mut iter = cache.find_by_name(pkg);
-                    let view = match iter.next() {
-                        Some(view) => view,
-                        None => continue, // package not found, ignore
-                    };
-
-                    let di = query_detailed_info(PackagePreSelect::OnlyNew, &filter, view, None);
-                    if let Some(info) = di {
-                        ret.push(info);
-                    }
-                }
-                break;
-            }
-        }
-    }
-
-    ret
-}
-
-fn query_detailed_info<'a, F, V>(
-    pre_select: PackagePreSelect,
-    filter: F,
-    view: V,
-    depends: Option<&mut HashSet>,
-) -> Option
-where
-    F: Fn(FilterData) -> bool,
-    V: std::ops::Deref>,
-{
-    let current_version = view.current_version();
-    let candidate_version = view.candidate_version();
-
-    let (current_version, candidate_version) = match pre_select {
-        PackagePreSelect::OnlyInstalled => match (current_version, candidate_version) {
-            (Some(cur), Some(can)) => (Some(cur), can), // package installed and there is an update
-            (Some(cur), None) => (Some(cur.clone()), cur), // package installed and up-to-date
-            (None, Some(_)) => return None,             // package could be installed
-            (None, None) => return None,                // broken
-        },
-        PackagePreSelect::OnlyNew => match (current_version, candidate_version) {
-            (Some(_), Some(_)) => return None,
-            (Some(_), None) => return None,
-            (None, Some(can)) => (None, can),
-            (None, None) => return None,
-        },
-        PackagePreSelect::All => match (current_version, candidate_version) {
-            (Some(cur), Some(can)) => (Some(cur), can),
-            (Some(cur), None) => (Some(cur.clone()), cur),
-            (None, Some(can)) => (None, can),
-            (None, None) => return None,
-        },
-    };
-
-    // get additional information via nested APT 'iterators'
-    let mut view_iter = view.versions();
-    while let Some(ver) = view_iter.next() {
-        let package = view.name();
-        let version = ver.version();
-        let mut origin_res = "unknown".to_owned();
-        let mut section_res = "unknown".to_owned();
-        let mut priority_res = "unknown".to_owned();
-        let mut short_desc = package.clone();
-        let mut long_desc = "".to_owned();
-
-        let fd = FilterData {
-            package: package.as_str(),
-            installed_version: current_version.as_deref(),
-            candidate_version: &candidate_version,
-            active_version: &version,
-        };
-
-        if filter(fd) {
-            if let Some(section) = ver.section() {
-                section_res = section;
-            }
-
-            if let Some(prio) = ver.priority_type() {
-                priority_res = prio;
-            }
-
-            // assume every package has only one origin file (not
-            // origin, but origin *file*, for some reason those seem to
-            // be different concepts in APT)
-            let mut origin_iter = ver.origin_iter();
-            let origin = origin_iter.next();
-            if let Some(origin) = origin {
-                if let Some(sd) = origin.short_desc() {
-                    short_desc = sd;
-                }
-
-                if let Some(ld) = origin.long_desc() {
-                    long_desc = ld;
-                }
-
-                // the package files appear in priority order, meaning
-                // the one for the candidate version is first - this is fine
-                // however, as the source package should be the same for all
-                // versions anyway
-                let mut pkg_iter = origin.file();
-                let pkg_file = pkg_iter.next();
-                if let Some(pkg_file) = pkg_file {
-                    if let Some(origin_name) = pkg_file.origin() {
-                        origin_res = origin_name;
-                    }
-                }
-            }
-
-            if let Some(depends) = depends {
-                let mut dep_iter = ver.dep_iter();
-                loop {
-                    let dep = match dep_iter.next() {
-                        Some(dep) if dep.dep_type() != "Depends" => continue,
-                        Some(dep) => dep,
-                        None => break,
-                    };
-
-                    let dep_pkg = dep.target_pkg();
-                    let name = dep_pkg.name();
-
-                    depends.insert(name);
-                }
-            }
-
-            return Some(APTUpdateInfo {
-                package,
-                title: short_desc,
-                arch: view.arch(),
-                description: long_desc,
-                origin: origin_res,
-                version: candidate_version.clone(),
-                old_version: match current_version {
-                    Some(vers) => vers,
-                    None => "".to_owned(),
-                },
-                priority: priority_res,
-                section: section_res,
-                extra_info: None,
-            });
-        }
-    }
-
-    None
-}
diff --git a/src/tools/disks/lvm.rs b/src/tools/disks/lvm.rs
index b5e79e220..1456a21c3 100644
--- a/src/tools/disks/lvm.rs
+++ b/src/tools/disks/lvm.rs
@@ -1,19 +1,17 @@
 use std::collections::HashSet;
 use std::os::unix::fs::MetadataExt;
+use std::sync::LazyLock;
 
 use anyhow::Error;
-use lazy_static::lazy_static;
 use serde_json::Value;
 
 use super::LsblkInfo;
 
-lazy_static! {
-    static ref LVM_UUIDS: HashSet<&'static str> = {
-        let mut set = HashSet::new();
-        set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
-        set
-    };
-}
+static LVM_UUIDS: LazyLock> = LazyLock::new(|| {
+    let mut set = HashSet::new();
+    set.insert("e6d6d379-f507-44c2-a23c-238f2a3df928");
+    set
+});
 
 /// Get set of devices used by LVM (pvs).
 ///
diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs
index 94f89e0a9..c729c26a2 100644
--- a/src/tools/disks/mod.rs
+++ b/src/tools/disks/mod.rs
@@ -6,7 +6,7 @@ use std::io;
 use std::os::unix::ffi::{OsStrExt, OsStringExt};
 use std::os::unix::fs::MetadataExt;
 use std::path::{Path, PathBuf};
-use std::sync::Arc;
+use std::sync::{Arc, LazyLock};
 
 use anyhow::{bail, format_err, Error};
 use libc::dev_t;
@@ -14,16 +14,14 @@ use once_cell::sync::OnceCell;
 
 use ::serde::{Deserialize, Serialize};
 
-use proxmox_lang::error::io_err_other;
 use proxmox_lang::{io_bail, io_format_err};
-use proxmox_rest_server::WorkerTask;
 use proxmox_schema::api;
 use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
-use proxmox_sys::task_log;
 
 use pbs_api_types::{BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX};
 
 mod zfs;
+use tracing::info;
 pub use zfs::*;
 mod zpool_status;
 pub use zpool_status::*;
@@ -34,10 +32,8 @@ pub use lvm::*;
 mod smart;
 pub use smart::*;
 
-lazy_static::lazy_static! {
-    static ref ISCSI_PATH_REGEX: regex::Regex =
-        regex::Regex::new(r"host[^/]*/session[^/]*").unwrap();
-}
+static ISCSI_PATH_REGEX: LazyLock =
+    LazyLock::new(|| regex::Regex::new(r"host[^/]*/session[^/]*").unwrap());
 
 /// Disk management context.
 ///
@@ -301,7 +297,7 @@ impl Disk {
     /// Convenience wrapper for reading a `/sys` file which contains just a simple utf-8 string.
     pub fn read_sys_str>(&self, path: P) -> io::Result> {
         Ok(match self.read_sys(path.as_ref())? {
-            Some(data) => Some(String::from_utf8(data).map_err(io_err_other)?),
+            Some(data) => Some(String::from_utf8(data).map_err(io::Error::other)?),
             None => None,
         })
     }
@@ -309,7 +305,7 @@ impl Disk {
     /// Convenience wrapper for unsigned integer `/sys` values up to 64 bit.
     pub fn read_sys_u64>(&self, path: P) -> io::Result> {
         Ok(match self.read_sys_str(path)? {
-            Some(data) => Some(data.trim().parse().map_err(io_err_other)?),
+            Some(data) => Some(data.trim().parse().map_err(io::Error::other)?),
             None => None,
         })
     }
@@ -902,7 +898,7 @@ fn get_partitions_info(
                         _ => used,
                     };
                     if used == PartitionUsageType::FileSystem {
-                        filesystem = info.file_system_type.clone();
+                        filesystem.clone_from(&info.file_system_type);
                     }
                 }
             }
@@ -1116,7 +1112,7 @@ pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Erro
 
 /// Wipes all labels and the first 200 MiB of a disk/partition (or the whole if it is smaller).
 /// If called with a partition, also sets the partition type to 0x83 'Linux filesystem'.
-pub fn wipe_blockdev(disk: &Disk, worker: Arc) -> Result<(), Error> {
+pub fn wipe_blockdev(disk: &Disk) -> Result<(), Error> {
     let disk_path = match disk.device_path() {
         Some(path) => path,
         None => bail!("disk {:?} has no node in /dev", disk.syspath()),
@@ -1137,13 +1133,13 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc) -> Result<(), Error>
 
     to_wipe.push(disk_path.to_path_buf());
 
-    task_log!(worker, "Wiping block device {}", disk_path.display());
+    info!("Wiping block device {}", disk_path.display());
 
     let mut wipefs_command = std::process::Command::new("wipefs");
     wipefs_command.arg("--all").args(&to_wipe);
 
     let wipefs_output = proxmox_sys::command::run_command(wipefs_command, None)?;
-    task_log!(worker, "wipefs output: {}", wipefs_output);
+    info!("wipefs output: {wipefs_output}");
 
     let size = disk.size().map(|size| size / 1024 / 1024)?;
     let count = size.min(200);
@@ -1163,21 +1159,17 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc) -> Result<(), Error>
     dd_command.args(args);
 
     let dd_output = proxmox_sys::command::run_command(dd_command, None)?;
-    task_log!(worker, "dd output: {}", dd_output);
+    info!("dd output: {dd_output}");
 
     if is_partition {
         // set the partition type to 0x83 'Linux filesystem'
-        change_parttype(disk, "8300", worker)?;
+        change_parttype(disk, "8300")?;
     }
 
     Ok(())
 }
 
-pub fn change_parttype(
-    part_disk: &Disk,
-    part_type: &str,
-    worker: Arc,
-) -> Result<(), Error> {
+pub fn change_parttype(part_disk: &Disk, part_type: &str) -> Result<(), Error> {
     let part_path = match part_disk.device_path() {
         Some(path) => path,
         None => bail!("disk {:?} has no node in /dev", part_disk.syspath()),
@@ -1199,7 +1191,7 @@ pub fn change_parttype(
         };
         sgdisk_command.arg(part_disk_parent_path);
         let sgdisk_output = proxmox_sys::command::run_command(sgdisk_command, None)?;
-        task_log!(worker, "sgdisk output: {}", sgdisk_output);
+        info!("sgdisk output: {sgdisk_output}");
     }
     Ok(())
 }
diff --git a/src/tools/disks/smart.rs b/src/tools/disks/smart.rs
index e666eb1a2..3ad782b7b 100644
--- a/src/tools/disks/smart.rs
+++ b/src/tools/disks/smart.rs
@@ -1,8 +1,8 @@
 use std::collections::{HashMap, HashSet};
+use std::sync::LazyLock;
 
 use ::serde::{Deserialize, Serialize};
 use anyhow::{bail, Error};
-use lazy_static::lazy_static;
 
 use proxmox_schema::api;
 
@@ -224,7 +224,5 @@ static WEAROUT_FIELD_ORDER: &[&str] = &[
     "Perc_Rated_Life_Used",
 ];
 
-lazy_static! {
-    static ref WEAROUT_FIELD_NAMES: HashSet<&'static str> =
-        WEAROUT_FIELD_ORDER.iter().cloned().collect();
-}
+static WEAROUT_FIELD_NAMES: LazyLock> =
+    LazyLock::new(|| WEAROUT_FIELD_ORDER.iter().cloned().collect());
diff --git a/src/tools/disks/zfs.rs b/src/tools/disks/zfs.rs
index b12a948bb..2abb5176c 100644
--- a/src/tools/disks/zfs.rs
+++ b/src/tools/disks/zfs.rs
@@ -1,23 +1,20 @@
 use std::collections::HashSet;
 use std::os::unix::fs::MetadataExt;
 use std::path::PathBuf;
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, LazyLock, Mutex};
 
 use anyhow::{bail, Error};
-use lazy_static::lazy_static;
 
 use proxmox_schema::const_regex;
 
 use super::*;
 
-lazy_static! {
-    static ref ZFS_UUIDS: HashSet<&'static str> = {
-        let mut set = HashSet::new();
-        set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
-        set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
-        set
-    };
-}
+static ZFS_UUIDS: LazyLock> = LazyLock::new(|| {
+    let mut set = HashSet::new();
+    set.insert("6a898cc3-1dd2-11b2-99a6-080020736631"); // apple
+    set.insert("516e7cba-6ecf-11d6-8ff8-00022d09712b"); // bsd
+    set
+});
 
 fn get_pool_from_dataset(dataset: &str) -> &str {
     if let Some(idx) = dataset.find('/') {
@@ -100,10 +97,8 @@ const_regex! {
     OBJSET_REGEX = r"^objset-0x[a-fA-F0-9]+$";
 }
 
-lazy_static::lazy_static! {
-    pub static ref ZFS_DATASET_OBJSET_MAP: Arc>> =
-        Arc::new(Mutex::new(HashMap::new()));
-}
+pub static ZFS_DATASET_OBJSET_MAP: LazyLock>>> =
+    LazyLock::new(|| Arc::new(Mutex::new(HashMap::new())));
 
 // parses /proc/spl/kstat/zfs/POOL/objset-ID files
 // they have the following format:
diff --git a/src/tools/fs.rs b/src/tools/fs.rs
index 4eab0d56f..78ecc3b8d 100644
--- a/src/tools/fs.rs
+++ b/src/tools/fs.rs
@@ -13,8 +13,8 @@ pub async fn fs_info(path: PathBuf) -> Result Result {
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index dfdbb7024..322894dd7 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -6,7 +6,6 @@ use anyhow::{bail, Error};
 
 use proxmox_http::{client::Client, HttpOptions, ProxyConfig};
 
-// pub mod apt;
 pub mod config;
 pub mod disks;
 pub mod fs;
diff --git a/src/tools/systemd/config.rs b/src/tools/systemd/config.rs
index 3e1ac1bba..9b94531b3 100644
--- a/src/tools/systemd/config.rs
+++ b/src/tools/systemd/config.rs
@@ -1,5 +1,6 @@
+use std::sync::LazyLock;
+
 use anyhow::Error;
-use lazy_static::lazy_static;
 
 use super::types::*;
 
@@ -8,11 +9,9 @@ use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlug
 
 use proxmox_sys::{fs::replace_file, fs::CreateOptions};
 
-lazy_static! {
-    pub static ref SERVICE_CONFIG: SectionConfig = init_service();
-    pub static ref TIMER_CONFIG: SectionConfig = init_timer();
-    pub static ref MOUNT_CONFIG: SectionConfig = init_mount();
-}
+pub static SERVICE_CONFIG: LazyLock = LazyLock::new(init_service);
+pub static TIMER_CONFIG: LazyLock = LazyLock::new(init_timer);
+pub static MOUNT_CONFIG: LazyLock = LazyLock::new(init_mount);
 
 fn init_service() -> SectionConfig {
     let mut config = SectionConfig::with_systemd_syntax(&SYSTEMD_SECTION_NAME_SCHEMA);
diff --git a/src/tools/systemd/unit.rs b/src/tools/systemd/unit.rs
index 267295ef9..f947ae118 100644
--- a/src/tools/systemd/unit.rs
+++ b/src/tools/systemd/unit.rs
@@ -99,7 +99,7 @@ pub fn reload_unit(unit: &str) -> Result<(), Error> {
 #[test]
 fn test_escape_unit() -> Result<(), Error> {
     fn test_escape(i: &str, expected: &str, is_path: bool) {
-        use proxmox_sys::systemd::{escape_unit, unescape_unit};
+        use proxmox_systemd::{escape_unit, unescape_unit};
 
         let escaped = escape_unit(i, is_path);
         assert_eq!(escaped, expected);
diff --git a/src/traffic_control_cache.rs b/src/traffic_control_cache.rs
index 4c3bccee3..830a8c043 100644
--- a/src/traffic_control_cache.rs
+++ b/src/traffic_control_cache.rs
@@ -2,7 +2,7 @@
 
 use std::collections::HashMap;
 use std::net::{IpAddr, Ipv4Addr, SocketAddr};
-use std::sync::{Arc, Mutex};
+use std::sync::{Arc, LazyLock, Mutex};
 use std::time::Instant;
 
 use anyhow::Error;
@@ -21,11 +21,9 @@ use crate::tools::SharedRateLimiter;
 
 pub type SharedRateLimit = Arc;
 
-lazy_static::lazy_static! {
-    /// Shared traffic control cache singleton.
-    pub static ref TRAFFIC_CONTROL_CACHE: Arc> =
-        Arc::new(Mutex::new(TrafficControlCache::new()));
-}
+/// Shared traffic control cache singleton.
+pub static TRAFFIC_CONTROL_CACHE: LazyLock>> =
+    LazyLock::new(|| Arc::new(Mutex::new(TrafficControlCache::new())));
 
 struct ParsedTcRule {
     config: TrafficControlRule,    // original rule config
@@ -92,7 +90,7 @@ fn network_match_len(networks: &[IpInet], ip: &IpAddr) -> Option {
     match_len
 }
 
-fn cannonical_ip(ip: IpAddr) -> IpAddr {
+fn canonical_ip(ip: IpAddr) -> IpAddr {
     // TODO: use std::net::IpAddr::to_cananical once stable
     match ip {
         IpAddr::V4(addr) => IpAddr::V4(addr),
@@ -332,7 +330,7 @@ impl TrafficControlCache {
         peer: SocketAddr,
         now: i64,
     ) -> (&str, Option, Option) {
-        let peer_ip = cannonical_ip(peer.ip());
+        let peer_ip = canonical_ip(peer.ip());
 
         log::debug!("lookup_rate_limiter: {:?}", peer_ip);
 
diff --git a/templates/Makefile b/templates/Makefile
new file mode 100644
index 000000000..0f8ad72ca
--- /dev/null
+++ b/templates/Makefile
@@ -0,0 +1,41 @@
+include ../defines.mk
+
+NOTIFICATION_TEMPLATES=						\
+	default/acme-err-body.txt.hbs			\
+	default/acme-err-subject.txt.hbs		\
+	default/gc-err-body.txt.hbs				\
+	default/gc-ok-body.txt.hbs				\
+	default/gc-err-subject.txt.hbs			\
+	default/gc-ok-subject.txt.hbs			\
+	default/package-updates-body.txt.hbs	\
+	default/package-updates-subject.txt.hbs	\
+	default/prune-err-body.txt.hbs			\
+	default/prune-ok-body.txt.hbs			\
+	default/prune-err-subject.txt.hbs		\
+	default/prune-ok-subject.txt.hbs		\
+	default/sync-err-body.txt.hbs			\
+	default/sync-ok-body.txt.hbs			\
+	default/sync-err-subject.txt.hbs		\
+	default/sync-ok-subject.txt.hbs			\
+	default/tape-backup-err-body.txt.hbs	\
+	default/tape-backup-err-subject.txt.hbs	\
+	default/tape-backup-ok-body.txt.hbs		\
+	default/tape-backup-ok-subject.txt.hbs	\
+	default/tape-load-body.txt.hbs			\
+	default/tape-load-subject.txt.hbs		\
+	default/test-body.txt.hbs				\
+	default/test-body.html.hbs				\
+	default/test-subject.txt.hbs			\
+	default/verify-err-body.txt.hbs			\
+	default/verify-ok-body.txt.hbs			\
+	default/verify-err-subject.txt.hbs		\
+	default/verify-ok-subject.txt.hbs		\
+
+all:
+
+clean:
+
+install:
+	install -dm755 $(DESTDIR)$(DATAROOTDIR)/proxmox-backup/templates/default
+	$(foreach i,$(NOTIFICATION_TEMPLATES), \
+	    install -m644 $(i) $(DESTDIR)$(DATAROOTDIR)/proxmox-backup/templates/$(i) ;)
diff --git a/templates/default/acme-err-body.txt.hbs b/templates/default/acme-err-body.txt.hbs
new file mode 100644
index 000000000..3cbfea4a8
--- /dev/null
+++ b/templates/default/acme-err-body.txt.hbs
@@ -0,0 +1,7 @@
+Proxmox Backup Server was not able to renew a TLS certificate.
+
+Error: {{error}}
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/acme-err-subject.txt.hbs b/templates/default/acme-err-subject.txt.hbs
new file mode 100644
index 000000000..3cf4fe452
--- /dev/null
+++ b/templates/default/acme-err-subject.txt.hbs
@@ -0,0 +1 @@
+Could not renew certificate
diff --git a/templates/default/gc-err-body.txt.hbs b/templates/default/gc-err-body.txt.hbs
new file mode 100644
index 000000000..d6c2d0bcc
--- /dev/null
+++ b/templates/default/gc-err-body.txt.hbs
@@ -0,0 +1,8 @@
+Datastore: {{datastore}}
+
+Garbage collection failed: {{error}}
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/gc-err-subject.txt.hbs b/templates/default/gc-err-subject.txt.hbs
new file mode 100644
index 000000000..ebf49f3bd
--- /dev/null
+++ b/templates/default/gc-err-subject.txt.hbs
@@ -0,0 +1 @@
+Garbage Collect Datastore '{{ datastore }}' failed
diff --git a/templates/default/gc-ok-body.txt.hbs b/templates/default/gc-ok-body.txt.hbs
new file mode 100644
index 000000000..d2f7cd815
--- /dev/null
+++ b/templates/default/gc-ok-body.txt.hbs
@@ -0,0 +1,23 @@
+Datastore:            {{datastore}}
+Task ID:              {{status.upid}}
+Index file count:     {{status.index-file-count}}
+
+Removed garbage:      {{human-bytes status.removed-bytes}}
+Removed chunks:       {{status.removed-chunks}}
+Removed bad chunks:   {{status.removed-bad}}
+
+Leftover bad chunks:  {{status.still-bad}}
+Pending removals:     {{human-bytes status.pending-bytes}} (in {{status.pending-chunks}} chunks)
+
+Original Data usage:  {{human-bytes status.index-data-bytes}}
+On-Disk usage:        {{human-bytes status.disk-bytes}} ({{relative-percentage status.disk-bytes status.index-data-bytes}})
+On-Disk chunks:       {{status.disk-chunks}}
+
+Deduplication Factor: {{deduplication-factor}}
+
+Garbage collection successful.
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/gc-ok-subject.txt.hbs b/templates/default/gc-ok-subject.txt.hbs
new file mode 100644
index 000000000..538e37008
--- /dev/null
+++ b/templates/default/gc-ok-subject.txt.hbs
@@ -0,0 +1 @@
+Garbage Collect Datastore '{{ datastore }}' successful
diff --git a/templates/default/package-updates-body.txt.hbs b/templates/default/package-updates-body.txt.hbs
new file mode 100644
index 000000000..62f9c7c4b
--- /dev/null
+++ b/templates/default/package-updates-body.txt.hbs
@@ -0,0 +1,8 @@
+Proxmox Backup Server has the following updates available:
+{{#each updates }}
+    {{Package}}: {{OldVersion}} -> {{Version~}}
+{{/each }}
+
+To upgrade visit the web interface:
+
+
diff --git a/templates/default/package-updates-subject.txt.hbs b/templates/default/package-updates-subject.txt.hbs
new file mode 100644
index 000000000..c8a775d58
--- /dev/null
+++ b/templates/default/package-updates-subject.txt.hbs
@@ -0,0 +1 @@
+New software packages available ({{ hostname }})
diff --git a/templates/default/prune-err-body.txt.hbs b/templates/default/prune-err-body.txt.hbs
new file mode 100644
index 000000000..0973c3d9e
--- /dev/null
+++ b/templates/default/prune-err-body.txt.hbs
@@ -0,0 +1,10 @@
+
+Job ID:       {{jobname}}
+Datastore:    {{store}}
+
+Pruning failed: {{error}}
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/prune-err-subject.txt.hbs b/templates/default/prune-err-subject.txt.hbs
new file mode 100644
index 000000000..836ae7224
--- /dev/null
+++ b/templates/default/prune-err-subject.txt.hbs
@@ -0,0 +1 @@
+Pruning datastore '{{ store }}' failed
diff --git a/templates/default/prune-ok-body.txt.hbs b/templates/default/prune-ok-body.txt.hbs
new file mode 100644
index 000000000..b7e449e30
--- /dev/null
+++ b/templates/default/prune-ok-body.txt.hbs
@@ -0,0 +1,10 @@
+
+Job ID:       {{jobname}}
+Datastore:    {{store}}
+
+Pruning successful.
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/prune-ok-subject.txt.hbs b/templates/default/prune-ok-subject.txt.hbs
new file mode 100644
index 000000000..3227a0629
--- /dev/null
+++ b/templates/default/prune-ok-subject.txt.hbs
@@ -0,0 +1 @@
+Pruning datastore '{{ store }}' successful
diff --git a/templates/default/sync-err-body.txt.hbs b/templates/default/sync-err-body.txt.hbs
new file mode 100644
index 000000000..a56d9d220
--- /dev/null
+++ b/templates/default/sync-err-body.txt.hbs
@@ -0,0 +1,14 @@
+Job ID:             {{job.id}}
+Datastore:          {{job.store}}
+{{#if job.remote~}}
+Remote:             {{job.remote}}
+Remote Store:       {{job.remote-store}}
+{{else~}}
+Local Source Store: {{job.remote-store}}
+{{/if}}
+Synchronization failed: {{error}}
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/sync-err-subject.txt.hbs b/templates/default/sync-err-subject.txt.hbs
new file mode 100644
index 000000000..a1464802f
--- /dev/null
+++ b/templates/default/sync-err-subject.txt.hbs
@@ -0,0 +1,5 @@
+{{#if job.remote~}}
+Sync remote '{{ job.remote }}' datastore '{{ job.remote-store }}' failed
+{{else~}}
+Sync local datastore '{{ job.remote-store }}' failed
+{{/if}}
diff --git a/templates/default/sync-ok-body.txt.hbs b/templates/default/sync-ok-body.txt.hbs
new file mode 100644
index 000000000..25c4b33b8
--- /dev/null
+++ b/templates/default/sync-ok-body.txt.hbs
@@ -0,0 +1,14 @@
+Job ID:             {{job.id}}
+Datastore:          {{job.store}}
+{{#if job.remote~}}
+Remote:             {{job.remote}}
+Remote Store:       {{job.remote-store}}
+{{else~}}
+Local Source Store: {{job.remote-store}}
+{{/if}}
+Synchronization successful.
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/sync-ok-subject.txt.hbs b/templates/default/sync-ok-subject.txt.hbs
new file mode 100644
index 000000000..76616b5c5
--- /dev/null
+++ b/templates/default/sync-ok-subject.txt.hbs
@@ -0,0 +1,5 @@
+{{#if job.remote~}}
+Sync remote '{{ job.remote }}' datastore '{{ job.remote-store }}' successful
+{{else~}}
+Sync local datastore '{{ job.remote-store }}' successful
+{{/if}}
diff --git a/templates/default/tape-backup-err-body.txt.hbs b/templates/default/tape-backup-err-body.txt.hbs
new file mode 100644
index 000000000..cc45c8820
--- /dev/null
+++ b/templates/default/tape-backup-err-body.txt.hbs
@@ -0,0 +1,26 @@
+{{#if id ~}}
+Job ID:     {{id}}
+{{/if~}}
+Datastore:  {{job.store}}
+Tape Pool:  {{job.pool}}
+Tape Drive: {{job.drive}}
+
+{{#if snapshot-list ~}}
+Snapshots included:
+
+{{#each snapshot-list~}}
+{{this}}
+{{/each~}}
+{{/if}}
+{{#if used-tapes }}
+Used Tapes:
+{{#each used-tapes~}}
+{{this}}
+{{/each~}}
+{{/if}}
+Tape Backup failed: {{error}}
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/tape-backup-err-subject.txt.hbs b/templates/default/tape-backup-err-subject.txt.hbs
new file mode 100644
index 000000000..b52d338a9
--- /dev/null
+++ b/templates/default/tape-backup-err-subject.txt.hbs
@@ -0,0 +1,5 @@
+{{#if id~}}
+Tape Backup '{{ id }}' datastore '{{ job.store }}' failed
+{{else~}}
+Tape Backup datastore '{{ job.store }}' failed
+{{/if}}
diff --git a/templates/default/tape-backup-ok-body.txt.hbs b/templates/default/tape-backup-ok-body.txt.hbs
new file mode 100644
index 000000000..ede51d05d
--- /dev/null
+++ b/templates/default/tape-backup-ok-body.txt.hbs
@@ -0,0 +1,27 @@
+{{#if id ~}}
+Job ID:     {{id}}
+{{/if~}}
+Datastore:  {{job.store}}
+Tape Pool:  {{job.pool}}
+Tape Drive: {{job.drive}}
+
+{{#if snapshot-list ~}}
+Snapshots included:
+
+{{#each snapshot-list~}}
+{{this}}
+{{/each~}}
+{{/if}}
+Duration: {{job-duration}}
+{{#if used-tapes }}
+Used Tapes:
+{{#each used-tapes~}}
+{{this}}
+{{/each~}}
+{{/if}}
+Tape Backup successful.
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/tape-backup-ok-subject.txt.hbs b/templates/default/tape-backup-ok-subject.txt.hbs
new file mode 100644
index 000000000..c475c05b3
--- /dev/null
+++ b/templates/default/tape-backup-ok-subject.txt.hbs
@@ -0,0 +1,5 @@
+{{#if id~}}
+Tape Backup '{{ id }}' datastore '{{ job.store }}' successful
+{{else~}}
+Tape Backup datastore '{{ job.store }}' successful
+{{/if}}
diff --git a/templates/default/tape-load-body.txt.hbs b/templates/default/tape-load-body.txt.hbs
new file mode 100644
index 000000000..ddc8a9e15
--- /dev/null
+++ b/templates/default/tape-load-body.txt.hbs
@@ -0,0 +1,15 @@
+{{#if reason~}}
+The {{ device-type }} has the wrong or no tape(s) inserted. Error:
+{{ reason }}
+
+{{/if~}}
+{{#if is-changer~}}
+Please insert the requested media into the changer.
+
+Changer: {{ device }}
+{{else}}
+Please insert the requested media into the backup drive.
+
+Drive: {{ device }}
+{{/if}}
+Media: {{ label-text }}
diff --git a/templates/default/tape-load-subject.txt.hbs b/templates/default/tape-load-subject.txt.hbs
new file mode 100644
index 000000000..10f6a02ec
--- /dev/null
+++ b/templates/default/tape-load-subject.txt.hbs
@@ -0,0 +1 @@
+Load Media '{{ label-text }}' request for {{ device-type }} '{{ device }}'
diff --git a/templates/default/test-body.html.hbs b/templates/default/test-body.html.hbs
new file mode 100644
index 000000000..26a43ddee
--- /dev/null
+++ b/templates/default/test-body.html.hbs
@@ -0,0 +1 @@
+This is a test of the notification target '{{ target }}'.
diff --git a/templates/default/test-body.txt.hbs b/templates/default/test-body.txt.hbs
new file mode 100644
index 000000000..26a43ddee
--- /dev/null
+++ b/templates/default/test-body.txt.hbs
@@ -0,0 +1 @@
+This is a test of the notification target '{{ target }}'.
diff --git a/templates/default/test-subject.txt.hbs b/templates/default/test-subject.txt.hbs
new file mode 100644
index 000000000..cb8e13205
--- /dev/null
+++ b/templates/default/test-subject.txt.hbs
@@ -0,0 +1 @@
+Test notification
diff --git a/templates/default/verify-err-body.txt.hbs b/templates/default/verify-err-body.txt.hbs
new file mode 100644
index 000000000..d07b5ce0d
--- /dev/null
+++ b/templates/default/verify-err-body.txt.hbs
@@ -0,0 +1,14 @@
+
+Job ID:    {{job.id}}
+Datastore: {{job.store}}
+
+Verification failed on these snapshots/groups:
+
+{{#each errors}}
+    {{this~}}
+{{/each}}
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/verify-err-subject.txt.hbs b/templates/default/verify-err-subject.txt.hbs
new file mode 100644
index 000000000..00a2d07f2
--- /dev/null
+++ b/templates/default/verify-err-subject.txt.hbs
@@ -0,0 +1 @@
+Verify Datastore '{{ job.store }}' failed
diff --git a/templates/default/verify-ok-body.txt.hbs b/templates/default/verify-ok-body.txt.hbs
new file mode 100644
index 000000000..7560582eb
--- /dev/null
+++ b/templates/default/verify-ok-body.txt.hbs
@@ -0,0 +1,10 @@
+
+Job ID:    {{job.id}}
+Datastore: {{job.store}}
+
+Verification successful.
+
+
+Please visit the web interface for further details:
+
+
diff --git a/templates/default/verify-ok-subject.txt.hbs b/templates/default/verify-ok-subject.txt.hbs
new file mode 100644
index 000000000..6020874c6
--- /dev/null
+++ b/templates/default/verify-ok-subject.txt.hbs
@@ -0,0 +1 @@
+Verify Datastore '{{ job.store }}' successful
diff --git a/tests/blob_writer.rs b/tests/blob_writer.rs
deleted file mode 100644
index 23a3283d4..000000000
--- a/tests/blob_writer.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use std::io::Cursor;
-use std::io::{Read, Seek, SeekFrom, Write};
-use std::sync::Arc;
-
-use anyhow::{bail, Error};
-use lazy_static::lazy_static;
-
-use pbs_datastore::{DataBlob, DataBlobReader, DataBlobWriter};
-use pbs_tools::crypt_config::CryptConfig;
-
-lazy_static! {
-    static ref TEST_DATA: Vec = {
-        let mut data = Vec::new();
-
-        for i in 0..100_000 {
-            data.push((i % 255) as u8);
-        }
-
-        data
-    };
-    static ref CRYPT_CONFIG: Arc = {
-        let key = [1u8; 32];
-        Arc::new(CryptConfig::new(key).unwrap())
-    };
-    static ref TEST_DIGEST_PLAIN: [u8; 32] = [
-        83, 154, 96, 195, 167, 204, 38, 142, 204, 224, 130, 201, 24, 71, 2, 188, 130, 155, 177, 6,
-        162, 100, 61, 238, 38, 219, 63, 240, 191, 132, 87, 238
-    ];
-    static ref TEST_DIGEST_ENC: [u8; 32] = [
-        50, 162, 191, 93, 255, 132, 9, 14, 127, 23, 92, 39, 246, 102, 245, 204, 130, 104, 4, 106,
-        182, 239, 218, 14, 80, 17, 150, 188, 239, 253, 198, 117
-    ];
-}
-
-fn verify_test_blob(mut cursor: Cursor>, digest: &[u8; 32]) -> Result<(), Error> {
-    // run read tests with different buffer sizes
-    for size in [1, 3, 64 * 1024].iter() {
-        println!("Starting DataBlobReader test (size = {})", size);
-
-        cursor.seek(SeekFrom::Start(0))?;
-        let mut reader = DataBlobReader::new(&mut cursor, Some(CRYPT_CONFIG.clone()))?;
-        let mut buffer = Vec::::new();
-        // read the whole file
-        //reader.read_to_end(&mut buffer)?;
-        let mut buf = vec![0u8; *size];
-        loop {
-            let count = reader.read(&mut buf)?;
-            if count == 0 {
-                break;
-            }
-            buffer.extend(&buf[..count]);
-        }
-
-        reader.finish()?;
-        if buffer != *TEST_DATA {
-            bail!("blob data is wrong (read buffer size {})", size);
-        }
-    }
-
-    let raw_data = cursor.into_inner();
-
-    let blob = DataBlob::load_from_reader(&mut &raw_data[..])?;
-
-    let data = blob.decode(Some(&CRYPT_CONFIG), Some(digest))?;
-    if data != *TEST_DATA {
-        bail!("blob data is wrong (decode)");
-    }
-    Ok(())
-}
-
-#[test]
-fn test_uncompressed_blob_writer() -> Result<(), Error> {
-    let tmp = Cursor::new(Vec::::new());
-    let mut blob_writer = DataBlobWriter::new_uncompressed(tmp)?;
-    blob_writer.write_all(&TEST_DATA)?;
-
-    verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_PLAIN)
-}
-
-#[test]
-fn test_compressed_blob_writer() -> Result<(), Error> {
-    let tmp = Cursor::new(Vec::::new());
-    let mut blob_writer = DataBlobWriter::new_compressed(tmp)?;
-    blob_writer.write_all(&TEST_DATA)?;
-
-    verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_PLAIN)
-}
-
-#[test]
-fn test_encrypted_blob_writer() -> Result<(), Error> {
-    let tmp = Cursor::new(Vec::::new());
-    let mut blob_writer = DataBlobWriter::new_encrypted(tmp, CRYPT_CONFIG.clone())?;
-    blob_writer.write_all(&TEST_DATA)?;
-
-    verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_ENC)
-}
-
-#[test]
-fn test_encrypted_compressed_blob_writer() -> Result<(), Error> {
-    let tmp = Cursor::new(Vec::::new());
-    let mut blob_writer = DataBlobWriter::new_encrypted_compressed(tmp, CRYPT_CONFIG.clone())?;
-    blob_writer.write_all(&TEST_DATA)?;
-
-    verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_ENC)
-}
diff --git a/tests/catar.rs b/tests/catar.rs
index 36bb4f3bc..94c565012 100644
--- a/tests/catar.rs
+++ b/tests/catar.rs
@@ -19,7 +19,7 @@ fn run_test(dir_name: &str) -> Result<(), Error> {
         .write(true)
         .truncate(true)
         .open("test-proxmox.catar")?;
-    let writer = pxar::encoder::sync::StandardWriter::new(writer);
+    let writer = pxar::PxarVariant::Unified(pxar::encoder::sync::StandardWriter::new(writer));
 
     let dir = nix::dir::Dir::open(
         dir_name,
@@ -35,11 +35,12 @@ fn run_test(dir_name: &str) -> Result<(), Error> {
     let rt = tokio::runtime::Runtime::new().unwrap();
     rt.block_on(create_archive(
         dir,
-        writer,
+        PxarWriters::new(writer, None),
         Flags::DEFAULT,
         |_| Ok(()),
-        None,
         options,
+        None,
+        None,
     ))?;
 
     Command::new("cmp")
diff --git a/tests/pxar/backup-client-pxar-data.mpxar b/tests/pxar/backup-client-pxar-data.mpxar
new file mode 100644
index 000000000..00f3dc295
Binary files /dev/null and b/tests/pxar/backup-client-pxar-data.mpxar differ
diff --git a/tests/pxar/backup-client-pxar-data.ppxar.didx b/tests/pxar/backup-client-pxar-data.ppxar.didx
new file mode 100644
index 000000000..a646218b5
Binary files /dev/null and b/tests/pxar/backup-client-pxar-data.ppxar.didx differ
diff --git a/tests/pxar/backup-client-pxar-expected.mpxar b/tests/pxar/backup-client-pxar-expected.mpxar
new file mode 100644
index 000000000..ae4a18c89
Binary files /dev/null and b/tests/pxar/backup-client-pxar-expected.mpxar differ
diff --git a/tests/worker-task-abort.rs b/tests/worker-task-abort.rs
index eda86f01f..f4b348563 100644
--- a/tests/worker-task-abort.rs
+++ b/tests/worker-task-abort.rs
@@ -1,30 +1,25 @@
 use anyhow::{bail, Error};
-
-extern crate proxmox_backup;
-
-extern crate nix;
-extern crate tokio;
+use tracing::info;
 
 use proxmox_lang::try_block;
+use proxmox_rest_server::WorkerTask;
 use proxmox_sys::fs::CreateOptions;
-use proxmox_sys::{task_log, WorkerTaskContext};
+use proxmox_worker_task::WorkerTaskContext;
 
 use pbs_api_types::{Authid, UPID};
 
-use proxmox_rest_server::{CommandSocket, WorkerTask};
-
 fn garbage_collection(worker: &WorkerTask) -> Result<(), Error> {
-    task_log!(worker, "start garbage collection");
+    info!("start garbage collection");
 
     for i in 0..50 {
         worker.check_abort()?;
 
-        task_log!(worker, "progress {}", i);
+        info!("progress {i}");
 
         std::thread::sleep(std::time::Duration::from_millis(10));
     }
 
-    task_log!(worker, "end garbage collection");
+    info!("end garbage collection");
 
     Ok(())
 }
@@ -45,10 +40,8 @@ fn worker_task_abort() -> Result<(), Error> {
 
     let rt = tokio::runtime::Runtime::new().unwrap();
     rt.block_on(async move {
-        let mut commando_sock = CommandSocket::new(
-            proxmox_rest_server::our_ctrl_sock(),
-            nix::unistd::Gid::current(),
-        );
+        let mut commando_sock =
+            proxmox_daemon::command_socket::CommandSocket::new(nix::unistd::Gid::current());
 
         let init_result: Result<(), Error> = try_block!({
             proxmox_rest_server::register_task_control_commands(&mut commando_sock)?;
@@ -60,7 +53,7 @@ fn worker_task_abort() -> Result<(), Error> {
             return;
         }
 
-        if let Err(err) = commando_sock.spawn() {
+        if let Err(err) = commando_sock.spawn(proxmox_rest_server::last_worker_future()) {
             eprintln!("unable to spawn command socket - {}", err);
             return;
         }
@@ -75,7 +68,7 @@ fn worker_task_abort() -> Result<(), Error> {
                 println!("WORKER {}", worker);
 
                 let result = garbage_collection(&worker);
-                proxmox_rest_server::request_shutdown();
+                proxmox_daemon::request_shutdown();
 
                 if let Err(err) = result {
                     println!("got expected error: {}", err);
diff --git a/www/LoginView.js b/www/LoginView.js
index 33734f6c1..d4d8e73e6 100644
--- a/www/LoginView.js
+++ b/www/LoginView.js
@@ -274,6 +274,7 @@ Ext.define('PBS.LoginView', {
 			    itemId: 'usernameField',
 			    reference: 'usernameField',
 			    stateId: 'login-username',
+			    inputAttrTpl: 'autocomplete=username',
 			    bind: {
 				visible: "{!openid}",
 				disabled: "{openid}",
@@ -286,6 +287,7 @@ Ext.define('PBS.LoginView', {
 			    name: 'password',
 			    itemId: 'passwordField',
 			    reference: 'passwordField',
+			    inputAttrTpl: 'autocomplete=current-password',
 			    bind: {
 				visible: "{!openid}",
 				disabled: "{openid}",
diff --git a/www/Makefile b/www/Makefile
index 45ea3aae7..45a2a61e2 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -63,10 +63,13 @@ JSSRC=							\
 	config/SyncView.js				\
 	config/VerifyView.js				\
 	config/PruneView.js				\
+	config/GCView.js				\
 	config/WebauthnView.js				\
 	config/CertificateView.js			\
 	config/NodeOptionView.js			\
 	config/MetricServerView.js			\
+	config/NotificationConfigView.js		\
+	config/PruneAndGC.js				\
 	window/ACLEdit.js				\
 	window/BackupGroupChangeOwner.js		\
 	window/CreateDirectory.js			\
@@ -74,11 +77,13 @@ JSSRC=							\
 	window/NamespaceEdit.js				\
 	window/MaintenanceOptions.js			\
 	window/NotesEdit.js				\
+	window/NotificationMatcherOverride.js		\
 	window/RemoteEdit.js				\
 	window/TrafficControlEdit.js			\
 	window/NotifyOptions.js				\
 	window/SyncJobEdit.js				\
 	window/PruneJobEdit.js				\
+	window/GCJobEdit.js				\
 	window/UserEdit.js				\
 	window/Settings.js				\
 	window/TokenEdit.js				\
@@ -104,7 +109,6 @@ JSSRC=							\
 	Subscription.js					\
 	datastore/Summary.js				\
 	datastore/Notes.js				\
-	datastore/PruneAndGC.js				\
 	datastore/Prune.js				\
 	datastore/Content.js				\
 	datastore/OptionView.js				\
diff --git a/www/NavigationTree.js b/www/NavigationTree.js
index e567045bf..165dbf71c 100644
--- a/www/NavigationTree.js
+++ b/www/NavigationTree.js
@@ -68,6 +68,12 @@ Ext.define('PBS.store.NavigationStore', {
 			path: 'pbsCertificateConfiguration',
 			leaf: true,
 		    },
+		    {
+			text: gettext('Notifications'),
+			iconCls: 'fa fa-bell-o',
+			path: 'pbsNotificationConfigView',
+			leaf: true,
+		    },
 		],
 	    },
 	    {
@@ -260,8 +266,8 @@ Ext.define('PBS.view.main.NavigationTree', {
 		if (maintenance) {
 		    const [type, message] = PBS.Utils.parseMaintenanceMode(maintenance);
 		    qtip = `${type}${message ? ': ' + message : ''}`;
-		    let mainenanceTypeCls = type === 'delete' ? 'destroying' : 'maintenance';
-		    iconCls = `fa fa-database pmx-tree-icon-custom ${mainenanceTypeCls}`;
+		    let maintenanceTypeCls = type === 'delete' ? 'destroying' : 'maintenance';
+		    iconCls = `fa fa-database pmx-tree-icon-custom ${maintenanceTypeCls}`;
 		}
 
 		if (getChildTextAt(j).localeCompare(name) !== 0) {
diff --git a/www/OnlineHelpInfo.js b/www/OnlineHelpInfo.js
index 2402c11d3..556e87e72 100644
--- a/www/OnlineHelpInfo.js
+++ b/www/OnlineHelpInfo.js
@@ -11,6 +11,10 @@ const proxmoxOnlineHelpInfo = {
     "link": "/docs/backup-client.html#client-creating-backups",
     "title": "Creating Backups"
   },
+  "client-change-detection-mode": {
+    "link": "/docs/backup-client.html#client-change-detection-mode",
+    "title": "Change Detection Mode"
+  },
   "client-encryption": {
     "link": "/docs/backup-client.html#client-encryption",
     "title": "Encryption"
@@ -35,6 +39,14 @@ const proxmoxOnlineHelpInfo = {
     "link": "/docs/configuration-files.html#domains-cfg",
     "title": "``domains.cfg``"
   },
+  "notifications-cfg": {
+    "link": "/docs/configuration-files.html#notifications-cfg",
+    "title": "``notifications.cfg``"
+  },
+  "notifications-priv-cfg": {
+    "link": "/docs/configuration-files.html#notifications-priv-cfg",
+    "title": "``notifications-priv.cfg``"
+  },
   "faq-support-table": {
     "link": "/docs/faq.html#faq-support-table",
     "title": "How long will my Proxmox Backup Server version be supported?"
@@ -47,6 +59,14 @@ const proxmoxOnlineHelpInfo = {
     "link": "/docs/file-formats.html#pxar-format",
     "title": "Proxmox File Archive Format (``.pxar``)"
   },
+  "pxar-meta-format": {
+    "link": "/docs/file-formats.html#pxar-meta-format",
+    "title": "Proxmox File Archive Format - Meta (``.mpxar``)"
+  },
+  "ppxar-format": {
+    "link": "/docs/file-formats.html#ppxar-format",
+    "title": "Proxmox File Archive Format - Payload (``.ppxar``)"
+  },
   "data-blob-format": {
     "link": "/docs/file-formats.html#data-blob-format",
     "title": "Data Blob Format (``.blob``)"
@@ -139,6 +159,38 @@ const proxmoxOnlineHelpInfo = {
     "link": "/docs/network-management.html#sysadmin-traffic-control",
     "title": "Traffic Control"
   },
+  "notifications": {
+    "link": "/docs/notifications.html#notifications",
+    "title": "Notifications"
+  },
+  "notification-targets": {
+    "link": "/docs/notifications.html#notification-targets",
+    "title": "Notification Targets"
+  },
+  "notification-targets-sendmail": {
+    "link": "/docs/notifications.html#notification-targets-sendmail",
+    "title": "Sendmail"
+  },
+  "notification-targets-smtp": {
+    "link": "/docs/notifications.html#notification-targets-smtp",
+    "title": "SMTP"
+  },
+  "notification-targets-gotify": {
+    "link": "/docs/notifications.html#notification-targets-gotify",
+    "title": "Gotify"
+  },
+  "notification-matchers": {
+    "link": "/docs/notifications.html#notification-matchers",
+    "title": "Notification Matchers"
+  },
+  "notification-events": {
+    "link": "/docs/notifications.html#notification-events",
+    "title": "Notification Events"
+  },
+  "notification-mode": {
+    "link": "/docs/notifications.html#notification-mode",
+    "title": "Notification Mode"
+  },
   "pve-integration": {
     "link": "/docs/pve-integration.html#pve-integration",
     "title": "`Proxmox VE`_ Integration"
@@ -382,5 +434,9 @@ const proxmoxOnlineHelpInfo = {
   "user-realms-ldap": {
     "link": "/docs/user-management.html#user-realms-ldap",
     "title": "LDAP"
+  },
+  "user-realms-ad": {
+    "link": "/docs/user-management.html#user-realms-ad",
+    "title": "Active Directory"
   }
 };
diff --git a/www/SystemConfiguration.js b/www/SystemConfiguration.js
index e94fe7ca1..23330b6a5 100644
--- a/www/SystemConfiguration.js
+++ b/www/SystemConfiguration.js
@@ -41,7 +41,7 @@ Ext.define('PBS.SystemConfiguration', {
 		    flex: 1,
 		    minHeight: 200,
 		    showApplyBtn: true,
-		    types: ['bond', 'bridge'],
+		    types: ['bond', 'bridge', 'vlan'],
 		    nodename: 'localhost',
 		},
 	    ],
diff --git a/www/Utils.js b/www/Utils.js
index 5357949b6..4853be36c 100644
--- a/www/Utils.js
+++ b/www/Utils.js
@@ -199,12 +199,18 @@ Ext.define('PBS.Utils', {
 	return fingerprint.substring(0, 23);
     },
 
-    render_task_status: function(value, metadata, record) {
-	if (!record.data['last-run-upid']) {
+    render_task_status: function(value, metadata, record, rowIndex, colIndex, store) {
+	// GC tasks use 'upid' for backwards-compat, rest use 'last-run-upid'
+	if (
+	    !record.data['last-run-upid'] &&
+	    !store.getById('last-run-upid')?.data.value &&
+	    !record.data.upid &&
+	    !store.getById('upid')?.data.value
+	) {
 	    return '-';
 	}
 
-	if (!record.data['last-run-endtime']) {
+	if (!record.data['last-run-endtime'] && !store.getById('last-run-endtime')?.data.value) {
 	    metadata.tdCls = 'x-grid-row-loading';
 	    return '';
 	}
@@ -429,6 +435,23 @@ Ext.define('PBS.Utils', {
 	    zfscreate: [gettext('ZFS Storage'), gettext('Create')],
 	});
 
+	Proxmox.Utils.overrideNotificationFieldName({
+	    'datastore': gettext('Datastore'),
+	    'job-id': gettext('Job ID'),
+	    'media-pool': gettext('Media Pool'),
+	});
+
+	Proxmox.Utils.overrideNotificationFieldValue({
+	    'acme': gettext('ACME certificate renewal'),
+	    'gc': gettext('Garbage collection'),
+	    'package-updates': gettext('Package updates are available'),
+	    'prune': gettext('Prune job'),
+	    'sync': gettext('Sync job'),
+	    'tape-backup': gettext('Tape backup notifications'),
+	    'tape-load': gettext('Tape loading request'),
+	    'verify': gettext('Verification job'),
+	});
+
 	Proxmox.Schema.overrideAuthDomains({
 	    pbs: {
 		name: 'Proxmox Backup authentication server',
@@ -438,6 +461,28 @@ Ext.define('PBS.Utils', {
 		sync: false,
 	    },
 	});
+
+	// TODO: use `overrideEndpointTypes` later - not done right now to avoid
+	// breakage if widget-toolkit is not updated yet.
+	Proxmox.Schema.notificationEndpointTypes = {
+	    sendmail: {
+		name: 'Sendmail',
+		    ipanel: 'pmxSendmailEditPanel',
+		    iconCls: 'fa-envelope-o',
+		    defaultMailAuthor: 'Proxmox Backup Server - $hostname',
+	    },
+	    smtp: {
+		name: 'SMTP',
+		    ipanel: 'pmxSmtpEditPanel',
+		    iconCls: 'fa-envelope-o',
+		    defaultMailAuthor: 'Proxmox Backup Server - $hostname',
+	    },
+	    gotify: {
+		name: 'Gotify',
+		    ipanel: 'pmxGotifyEditPanel',
+		    iconCls: 'fa-bell-o',
+	    },
+	};
     },
 
     // Convert an ArrayBuffer to a base64url encoded string.
@@ -614,6 +659,9 @@ Ext.define('PBS.Utils', {
 	    if (key === 'bytes-read' || key === 'bytes-written') {
 		val = Proxmox.Utils.format_size(val);
 	    }
+	    if (key === 'drive-activity') {
+		val = PBS.Utils.renderDriveActivity(val);
+	    }
 	    list.push({ key: key, value: val });
 	}
 
@@ -647,8 +695,37 @@ Ext.define('PBS.Utils', {
 	}).show();
     },
 
-    renderDriveState: function(value, md) {
+    tapeDriveActivities: {
+	'no-activity': gettext('No Activity'),
+	'cleaning': gettext('Cleaning'),
+	'loading': gettext('Loading'),
+	'unloading': gettext('Unloading'),
+	'other': gettext('Other Activity'),
+	'reading': gettext('Reading data'),
+	'writing': gettext('Writing data'),
+	'locating': gettext('Locating'),
+	'rewinding': gettext('Rewinding'),
+	'erasing': gettext('Erasing'),
+	'formatting': gettext('Formatting'),
+	'calibrating': gettext('Calibrating'),
+	'other-dt': gettext('Other DT Activity'),
+	'microcode-update': gettext('Updating Microcode'),
+	'reading-encrypted': gettext('Reading encrypted data'),
+	'writing-encrypted': gettext('Writing encrypted data'),
+    },
+
+    renderDriveActivity: function(value) {
 	if (!value) {
+	    return Proxmox.Utils.unknownText;
+	}
+	return PBS.Utils.tapeDriveActivities[value] ?? value;
+    },
+
+    renderDriveState: function(value, md, rec) {
+	if (!value) {
+	    if (rec?.data?.activity && rec?.data?.activity !== 'no-activity') {
+		return PBS.Utils.renderDriveActivity(rec.data.activity);
+	    }
 	    return gettext('Idle');
 	}
 
diff --git a/www/config/GCView.js b/www/config/GCView.js
new file mode 100644
index 000000000..a6e79fb34
--- /dev/null
+++ b/www/config/GCView.js
@@ -0,0 +1,231 @@
+Ext.define('pbs-gc-jobs-status', {
+    extend: 'Ext.data.Model',
+    fields: [
+	'store', 'upid', 'removed-bytes', 'pending-bytes', 'schedule',
+	'next-run', 'last-run-endtime', 'last-run-state', 'duration',
+    ],
+    idProperty: 'store',
+    proxy: {
+	type: 'proxmox',
+	url: '/api2/json/admin/gc',
+    },
+});
+
+Ext.define('PBS.config.GCJobView', {
+    extend: 'Ext.grid.GridPanel',
+    alias: 'widget.pbsGCJobView',
+
+    stateful: true,
+    allowDeselect: false,
+
+    title: gettext('Garbage Collect Jobs'),
+
+    controller: {
+	xclass: 'Ext.app.ViewController',
+
+	init: function(view) {
+	    let params = {};
+	    let store = view.getStore();
+	    let proxy = store.rstore.getProxy();
+	    if (view.datastore) {
+		params.store = view.datastore;
+
+		// after the store is loaded, select the row to enable the Edit,.. buttons
+		store.rstore.proxy.on({
+		    'afterload': {
+			fn: () => view.getSelectionModel().select(0),
+			single: true,
+		    },
+		});
+
+		// do not highlight the selected row
+		view.items.items[0].selectedItemCls = '';
+		view.items.items[0].overItemCls = '';
+	    }
+	    proxy.setExtraParams(params);
+	    Proxmox.Utils.monStoreErrors(view, store.rstore);
+	},
+
+	getDatastoreName: function() {
+	    return this.getView().getSelection()[0]?.data.store;
+	},
+
+	getData: function() {
+	    let view = this.getView();
+	    let datastore = this.getDatastoreName();
+	    return view.getStore().getById(datastore).data;
+	},
+
+	editGCJob: function() {
+	    let data = this.getData();
+	    Ext.create('PBS.window.GCJobEdit', {
+		datastore: data.store,
+		id: data.store,
+		schedule: data.schedule,
+		listeners: {
+		    destroy: () => this.reload(),
+		},
+	    }).show();
+	},
+
+	garbageCollect: function() {
+	    let datastore = this.getDatastoreName();
+	    Proxmox.Utils.API2Request({
+		url: `/admin/datastore/${datastore}/gc`,
+		method: 'POST',
+		failure: function(response) {
+		    Ext.Msg.alert(gettext('Error'), response.htmlStatus);
+		},
+		success: function(response, options) {
+		    Ext.create('Proxmox.window.TaskViewer', {
+			upid: response.result.data,
+		    }).show();
+		},
+	    });
+	},
+
+	showTaskLog: function() {
+	    let me = this;
+
+	    let upid = this.getData().upid;
+	    if (!upid) return;
+
+	    Ext.create('Proxmox.window.TaskViewer', { upid }).show();
+	},
+
+	startStore: function() { this.getView().getStore().rstore.startUpdate(); },
+	stopStore: function() { this.getView().getStore().rstore.stopUpdate(); },
+	reload: function() { this.getView().getStore().rstore.load(); },
+
+
+	filterState: function(view, state) {
+	    delete state.height;
+	},
+    },
+
+    listeners: {
+	activate: 'startStore',
+	beforedestroy: 'stopStore',
+	deactivate: 'stopStore',
+	itemdblclick: 'editGCJob',
+	beforestatesave: 'filterState',
+    },
+
+    store: {
+	type: 'diff',
+	autoDestroy: true,
+	autoDestroyRstore: true,
+	sorters: 'store',
+	rstore: {
+	    type: 'update',
+	    storeid: 'pbs-gc-jobs-status',
+	    model: 'pbs-gc-jobs-status',
+	    interval: 5000,
+	},
+    },
+
+    tbar: [
+	{
+	    xtype: 'proxmoxButton',
+	    text: gettext('Edit'),
+	    handler: 'editGCJob',
+	    enableFn: (rec) => !!rec,
+	    disabled: true,
+	},
+	'-',
+	{
+	    xtype: 'proxmoxButton',
+	    text: gettext('Show Log'),
+	    handler: 'showTaskLog',
+	    enableFn: (rec) => !!rec.data.upid,
+	    disabled: true,
+	},
+	{
+	    xtype: 'proxmoxButton',
+	    text: gettext('Run now'),
+	    handler: 'garbageCollect',
+	    enableFn: (rec) => !!rec,
+	    disabled: true,
+	},
+    ],
+
+    columns: [
+	{
+	    header: gettext('Datastore'),
+	    dataIndex: 'store',
+	    renderer: Ext.String.htmlEncode,
+	    hideable: false,
+	    minWidth: 120,
+	    maxWidth: 300,
+	    flex: 2,
+	},
+	{
+	    header: gettext('Schedule'),
+	    dataIndex: 'schedule',
+	    hideable: false,
+	    renderer: (value) => value ? value : Proxmox.Utils.NoneText,
+	    minWidth: 85,
+	    flex: 1,
+	},
+	{
+	    header: gettext('Last GC'),
+	    dataIndex: 'last-run-endtime',
+	    renderer: PBS.Utils.render_optional_timestamp,
+	    minWidth: 150,
+	    flex: 1,
+	},
+	{
+	    text: gettext('Duration'),
+	    dataIndex: 'duration',
+	    renderer: Proxmox.Utils.render_duration,
+	    minWidth: 80,
+	    flex: 1,
+	},
+	{
+	    header: gettext('Last Status'),
+	    dataIndex: 'last-run-state',
+	    renderer: PBS.Utils.render_task_status,
+	    minWidth: 80,
+	    flex: 1,
+	},
+	{
+	    header: gettext('Next Run'),
+	    dataIndex: 'next-run',
+	    renderer: PBS.Utils.render_next_task_run,
+	    minWidth: 150,
+	    flex: 1,
+	},
+	{
+	    header: gettext('Removed Data'),
+	    dataIndex: 'removed-bytes',
+	    renderer: (value, meta, record) => record.data.upid !== null
+		? Proxmox.Utils.format_size(value, true) : "-",
+	    minWidth: 85,
+	    flex: 1,
+	},
+	{
+	    header: gettext('Pending Data'),
+	    dataIndex: 'pending-bytes',
+	    renderer: (value, meta, record) => record.data.upid !== null
+		? Proxmox.Utils.format_size(value, true) : "-",
+	    minWidth: 80,
+	    flex: 3,
+	},
+    ],
+
+    initComponent: function() {
+	let me = this;
+	let isSingleDatastore = !!me.datastore;
+
+	me.stateId = isSingleDatastore ? 'grid-gc-jobs-single' : 'grid-gc-jobs-v1';
+
+	for (let column of me.columns) {
+	    column.sortable = !isSingleDatastore;
+	    if (column.dataIndex === 'store') {
+		column.hidden = isSingleDatastore;
+	    }
+	}
+
+	me.callParent();
+    },
+});
diff --git a/www/config/NotificationConfigView.js b/www/config/NotificationConfigView.js
new file mode 100644
index 000000000..904cee71d
--- /dev/null
+++ b/www/config/NotificationConfigView.js
@@ -0,0 +1,11 @@
+Ext.define('PBS.config.NotificationConfigView', {
+    extend: 'Proxmox.panel.NotificationConfigView',
+    alias: ['widget.pbsNotificationConfigView'],
+    mixins: ['Proxmox.Mixin.CBind'],
+
+    cbindData: function(_initialConfig) {
+        return {
+            baseUrl: '/config/notifications',
+        };
+    },
+});
diff --git a/www/config/PruneAndGC.js b/www/config/PruneAndGC.js
new file mode 100644
index 000000000..ab92c8fe4
--- /dev/null
+++ b/www/config/PruneAndGC.js
@@ -0,0 +1,60 @@
+Ext.define('PBS.config.PruneAndGC', {
+    extend: 'Ext.panel.Panel',
+    alias: 'widget.pbsPruneAndGC',
+    title: gettext('Prune & GC Jobs'),
+
+    mixins: ['Proxmox.Mixin.CBind'],
+
+    layout: {
+	type: 'vbox',
+	align: 'stretch',
+	multi: true,
+    },
+    defaults: {
+	collapsible: false,
+	margin: '7 10 3 10',
+    },
+    scrollable: true,
+    items: [
+	{
+	    xtype: 'pbsGCJobView',
+	    itemId: 'gcjobs',
+	    nodename: 'localhost',
+	    cbind: {
+		datastore: '{datastore}',
+	    },
+	    minHeight: 125, // shows at least one line of content
+	},
+	{
+	    xtype: 'splitter',
+	    performCollapse: false,
+	},
+	{
+	    xtype: 'pbsPruneJobView',
+	    nodename: 'localhost',
+	    itemId: 'prunejobs',
+	    cbind: {
+		datastore: '{datastore}',
+	    },
+	    flex: 1,
+	    minHeight: 160, // shows at least one line of content
+	},
+    ],
+    initComponent: function() {
+	let me = this;
+
+	let subPanelIds = me.items.map(el => el.itemId).filter(id => !!id);
+
+	me.callParent();
+
+	for (const itemId of subPanelIds) {
+	    let component = me.getComponent(itemId);
+	    component.relayEvents(me, ['activate', 'deactivate', 'destroy']);
+	}
+    },
+
+    cbindData: function(initialConfig) {
+        let me = this;
+        me.datastore = initialConfig.datastore ? initialConfig.datastore : undefined;
+    },
+});
diff --git a/www/config/SyncView.js b/www/config/SyncView.js
index c6458a9eb..4669a23e2 100644
--- a/www/config/SyncView.js
+++ b/www/config/SyncView.js
@@ -3,7 +3,7 @@ Ext.define('pbs-sync-jobs-status', {
     fields: [
 	'id', 'owner', 'remote', 'remote-store', 'remote-ns', 'store', 'ns',
 	'schedule', 'group-filter', 'next-run', 'last-run-upid', 'last-run-state',
-	'last-run-endtime', 'transfer-last',
+	'last-run-endtime', 'transfer-last', 'max-depth',
 	{
 	    name: 'duration',
 	    calculate: function(data) {
@@ -224,9 +224,9 @@ Ext.define('PBS.config.SyncJobView', {
 	    renderer: PBS.Utils.render_optional_namespace,
 	},
 	{
-	    header: gettext('Max. Recursion'),
+	    header: gettext('Max. Depth'),
 	    dataIndex: 'max-depth',
-	    width: 10,
+	    width: 100,
 	    sortable: true,
 	},
 	{
diff --git a/www/config/UserView.js b/www/config/UserView.js
index 19dce06f9..812cad390 100644
--- a/www/config/UserView.js
+++ b/www/config/UserView.js
@@ -186,6 +186,7 @@ Ext.define('PBS.config.UserView', {
 	    xtype: 'proxmoxButton',
 	    text: gettext('Unlock TFA'),
 	    handler: 'unlockTfa',
+	    disabled: true,
 	    enableFn: ({ data }) =>
 	        data['totp-locked'] || (data['tfa-locked-until'] > (new Date().getTime() / 1000)),
 	},
diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index c2403ff9c..e11b14b54 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -786,6 +786,9 @@ Ext.define('PBS.DataStoreContent', {
 		'backup-time': (time.getTime()/1000).toFixed(0),
 		'backup-type': type,
 	    };
+	    if (rec.data.filename.endsWith(".mpxar.didx")) {
+		extraParams['archive-name'] = rec.data.filename;
+	    }
 	    if (view.namespace && view.namespace !== '') {
 		extraParams.ns = view.namespace;
 	    }
@@ -1050,7 +1053,7 @@ Ext.define('PBS.DataStoreContent', {
 		    tooltip: gettext('Browse'),
 		    getClass: (v, m, { data }) => {
 			if (
-			    (data.ty === 'file' && data.filename.endsWith('pxar.didx')) ||
+			    (data.ty === 'file' && (data.filename.endsWith('.pxar.didx') || data.filename.endsWith('.mpxar.didx'))) ||
 			    (data.ty === 'ns' && !data.root)
 			) {
 			    return 'fa fa-folder-open-o';
@@ -1058,7 +1061,9 @@ Ext.define('PBS.DataStoreContent', {
 			return 'pmx-hidden';
 		    },
 		    isActionDisabled: (v, r, c, i, { data }) =>
-			!(data.ty === 'file' && data.filename.endsWith('pxar.didx') && data['crypt-mode'] < 3) && data.ty !== 'ns',
+			!(data.ty === 'file' &&
+			(data.filename.endsWith('.pxar.didx') || data.filename.endsWith('.mpxar.didx')) &&
+			data['crypt-mode'] < 3) && data.ty !== 'ns',
 		},
 	    ],
 	},
diff --git a/www/datastore/DataStoreList.js b/www/datastore/DataStoreList.js
index b496bcbc5..fc68cfc10 100644
--- a/www/datastore/DataStoreList.js
+++ b/www/datastore/DataStoreList.js
@@ -231,17 +231,16 @@ Ext.define('PBS.datastore.DataStores', {
 	    xtype: 'pbsDataStoreList',
 	    iconCls: 'fa fa-book',
 	},
-
+	{
+	    iconCls: 'fa fa-trash-o',
+	    itemId: 'prunegc',
+	    xtype: 'pbsPruneAndGC',
+	},
 	{
 	    iconCls: 'fa fa-refresh',
 	    itemId: 'syncjobs',
 	    xtype: 'pbsSyncJobView',
 	},
-	{
-	    iconCls: 'fa fa-trash-o',
-	    itemId: 'prunejobs',
-	    xtype: 'pbsPruneJobView',
-	},
 	{
 	    iconCls: 'fa fa-check-circle',
 	    itemId: 'verifyjobs',
diff --git a/www/datastore/Notes.js b/www/datastore/Notes.js
index 2928b7ecc..b99147284 100644
--- a/www/datastore/Notes.js
+++ b/www/datastore/Notes.js
@@ -9,7 +9,7 @@ Ext.define('PBS.DataStoreNotes', {
     scrollable: true,
     animCollapse: false,
 
-    cbindData: function(initalConfig) {
+    cbindData: function(initialConfig) {
 	let me = this;
 	me.url = `/api2/extjs/config/datastore/${me.datastore}`;
 	return { };
diff --git a/www/datastore/OptionView.js b/www/datastore/OptionView.js
index 6f1be969d..e1f38af6f 100644
--- a/www/datastore/OptionView.js
+++ b/www/datastore/OptionView.js
@@ -159,6 +159,21 @@ Ext.define('PBS.Datastore.Options', {
     },
 
     rows: {
+	"notification-mode": {
+	    required: true,
+	    defaultValue: 'legacy-sendmail',
+	    header: gettext('Notification mode'),
+	    renderer: function(value) {
+		if (value === 'notification-system') {
+		    return gettext('Notification system');
+		} else {
+		    return gettext('Email (legacy)');
+		}
+	    },
+	    editor: {
+		xtype: 'pbsNotifyOptionEdit',
+	    },
+	},
 	"notify": {
 	    required: true,
 	    header: gettext('Notify'),
diff --git a/www/datastore/Panel.js b/www/datastore/Panel.js
index fd1b46113..ad9fc10fe 100644
--- a/www/datastore/Panel.js
+++ b/www/datastore/Panel.js
@@ -3,7 +3,7 @@ Ext.define('PBS.DataStorePanel', {
     alias: 'widget.pbsDataStorePanel',
     mixins: ['Proxmox.Mixin.CBind'],
 
-    cbindData: function(initalConfig) {
+    cbindData: function(initialConfig) {
 	let me = this;
 	return {
 	    aclPath: `/datastore/${me.datastore}`,
@@ -58,8 +58,7 @@ Ext.define('PBS.DataStorePanel', {
 	    },
 	},
 	{
-	    title: gettext('Prune & GC'),
-	    xtype: 'pbsDatastorePruneAndGC',
+	    xtype: 'pbsPruneAndGC',
 	    itemId: 'prunegc',
 	    iconCls: 'fa fa-trash-o',
 	    cbind: {
diff --git a/www/datastore/Prune.js b/www/datastore/Prune.js
index 81f6927b6..5752907e3 100644
--- a/www/datastore/Prune.js
+++ b/www/datastore/Prune.js
@@ -52,6 +52,7 @@ Ext.define('PBS.Datastore.PruneInputPanel', {
 	if (me.ns && me.ns !== '') {
 	    values.ns = me.ns;
 	}
+	values["use-task"] = true;
 	return values;
     },
 
diff --git a/www/datastore/PruneAndGC.js b/www/datastore/PruneAndGC.js
deleted file mode 100644
index aab98dadf..000000000
--- a/www/datastore/PruneAndGC.js
+++ /dev/null
@@ -1,133 +0,0 @@
-Ext.define('PBS.Datastore.GCOptions', {
-    extend: 'Proxmox.grid.ObjectGrid',
-    alias: 'widget.pbsDatastoreGCOpts',
-    mixins: ['Proxmox.Mixin.CBind'],
-
-    onlineHelp: 'maintenance_pruning',
-
-    cbindData: function(initial) {
-	let me = this;
-
-	me.datastore = encodeURIComponent(me.datastore);
-	me.url = `/api2/json/config/datastore/${me.datastore}`;
-	me.editorConfig = {
-	    url: `/api2/extjs/config/datastore/${me.datastore}`,
-	};
-	return {};
-    },
-
-    controller: {
-	xclass: 'Ext.app.ViewController',
-
-	edit: function() { this.getView().run_editor(); },
-
-	garbageCollect: function() {
-	    let me = this;
-	    let view = me.getView();
-	    Proxmox.Utils.API2Request({
-		url: `/admin/datastore/${view.datastore}/gc`,
-		method: 'POST',
-		failure: function(response) {
-		    Ext.Msg.alert(gettext('Error'), response.htmlStatus);
-		},
-		success: function(response, options) {
-		    Ext.create('Proxmox.window.TaskViewer', {
-			upid: response.result.data,
-		    }).show();
-		},
-	    });
-	},
-    },
-
-    tbar: [
-	{
-	    xtype: 'proxmoxButton',
-	    text: gettext('Edit'),
-	    disabled: true,
-	    handler: 'edit',
-	},
-	'-',
-	{
-	    xtype: 'proxmoxButton',
-	    text: gettext('Start Garbage Collection'),
-	    selModel: null,
-	    handler: 'garbageCollect',
-	},
-    ],
-
-    listeners: {
-	activate: function() { this.rstore.startUpdate(); },
-	destroy: function() { this.rstore.stopUpdate(); },
-	deactivate: function() { this.rstore.stopUpdate(); },
-	itemdblclick: 'edit',
-    },
-
-    rows: {
-	"gc-schedule": {
-	    required: true,
-	    defaultValue: Proxmox.Utils.NoneText,
-	    header: gettext('Garbage Collection Schedule'),
-	    editor: {
-		xtype: 'proxmoxWindowEdit',
-		title: gettext('GC Schedule'),
-		onlineHelp: 'maintenance_gc',
-		items: {
-		    xtype: 'pbsCalendarEvent',
-		    name: 'gc-schedule',
-		    fieldLabel: gettext("GC Schedule"),
-		    emptyText: Proxmox.Utils.noneText,
-		    deleteEmpty: true,
-		},
-	    },
-	},
-    },
-});
-
-Ext.define('PBS.Datastore.PruneAndGC', {
-    extend: 'Ext.panel.Panel',
-    alias: 'widget.pbsDatastorePruneAndGC',
-    mixins: ['Proxmox.Mixin.CBind'],
-
-    layout: {
-	type: 'vbox',
-	align: 'stretch',
-	multi: true,
-    },
-    defaults: {
-	collapsible: false,
-	margin: '7 10 3 10',
-    },
-    items: [
-	{
-	    xtype: 'pbsDatastoreGCOpts',
-	    title: gettext('Garbage Collection'),
-	    itemId: 'datastore-gc',
-	    nodename: 'localhost',
-	    cbind: {
-		datastore: '{datastore}',
-	    },
-	},
-	{
-	    xtype: 'pbsPruneJobView',
-	    nodename: 'localhost',
-	    itemId: 'datastore-prune-jobs',
-	    flex: 1,
-	    minHeight: 200,
-	    cbind: {
-		datastore: '{datastore}',
-	    },
-	},
-    ],
-    initComponent: function() {
-	let me = this;
-
-	let subPanelIds = me.items.map(el => el.itemId);
-
-	me.callParent();
-
-	for (const itemId of subPanelIds) {
-	    let component = me.getComponent(itemId);
-	    component.relayEvents(me, ['activate', 'deactivate', 'destroy']);
-	}
-    },
-});
diff --git a/www/form/NamespaceMaxDepth.js b/www/form/NamespaceMaxDepth.js
index cfe80de05..64f8d512b 100644
--- a/www/form/NamespaceMaxDepth.js
+++ b/www/form/NamespaceMaxDepth.js
@@ -37,10 +37,10 @@ Ext.define('PBS.form.NamespaceMaxDepthReduced', {
 
     calcMaxPrefixLength: function(ns1, ns2) {
 	let maxPrefixLength = 0;
-	if (ns1 !== undefined && ns1 !== null) {
+	if (ns1 !== undefined && ns1 !== null && typeof ns1 === 'string') {
 	    maxPrefixLength = (ns1.match(/[/]/g) || []).length + (ns1 === '' ? 0 : 1);
 	}
-	if (ns2 !== undefined && ns2 !== null) {
+	if (ns2 !== undefined && ns2 !== null && typeof ns2 === 'string') {
 	    let ns2PrefixLength = (ns2.match(/[/]/g) || []).length + (ns2 === '' ? 0 : 1);
 	    if (ns2PrefixLength > maxPrefixLength) {
 		maxPrefixLength = ns2PrefixLength;
diff --git a/www/form/PermissionPathSelector.js b/www/form/PermissionPathSelector.js
index 3f8b53766..8dcea1954 100644
--- a/www/form/PermissionPathSelector.js
+++ b/www/form/PermissionPathSelector.js
@@ -19,6 +19,7 @@ Ext.define('PBS.data.PermissionPathsStore', {
 	{ 'value': '/system/network' },
 	{ 'value': '/system/network/dns' },
 	{ 'value': '/system/network/interfaces' },
+	{ 'value': '/system/notifications' },
 	{ 'value': '/system/services' },
 	{ 'value': '/system/status' },
 	{ 'value': '/system/tasks' },
diff --git a/www/tape/ChangerStatus.js b/www/tape/ChangerStatus.js
index fdafc459e..e18af90e4 100644
--- a/www/tape/ChangerStatus.js
+++ b/www/tape/ChangerStatus.js
@@ -1,6 +1,6 @@
 Ext.define('pbs-slot-model', {
     extend: 'Ext.data.Model',
-    fields: ['entry-id', 'label-text', 'is-labeled', ' model', 'name', 'vendor', 'serial', 'state', 'status', 'pool',
+    fields: ['entry-id', 'label-text', 'is-labeled', ' model', 'name', 'vendor', 'serial', 'state', 'status', 'pool', 'activity',
 	{
 	    name: 'is-blocked',
 	    calculate: function(data) {
@@ -488,7 +488,7 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
 		});
 		let drives_fut = Proxmox.Async.api2({
 		    timeout: 5*60*1000,
-		    url: `/api2/extjs/tape/drive?changer=${encodeURIComponent(changer)}`,
+		    url: `/api2/extjs/tape/drive?query-activity=true&changer=${encodeURIComponent(changer)}`,
 		});
 
 		let tapes_fut = Proxmox.Async.api2({
@@ -852,6 +852,13 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
 				    flex: 1,
 				    renderer: Ext.htmlEncode,
 				},
+				{
+				    text: gettext('Activity'),
+				    dataIndex: 'activity',
+				    renderer: PBS.Utils.renderDriveActivity,
+				    hidden: true,
+				    flex: 1,
+				},
 				{
 				    text: gettext('State'),
 				    dataIndex: 'state',
diff --git a/www/tape/DriveStatus.js b/www/tape/DriveStatus.js
index 2c55fc97f..007273f67 100644
--- a/www/tape/DriveStatus.js
+++ b/www/tape/DriveStatus.js
@@ -45,7 +45,7 @@ Ext.define('PBS.TapeManagement.DriveStatus', {
 	onLoad: function() {
 	    let me = this;
 	    let statusgrid = me.lookup('statusgrid');
-	    let online = statusgrid.getObjectValue('file-number') !== undefined;
+	    let online = statusgrid.getObjectValue('file-number') !== undefined || statusgrid.getObjectValue('manufactured');
 	    let vm = me.getViewModel();
 	    vm.set('online', online);
 	    let title = online ? gettext('Status') : gettext('Status (No Tape loaded)');
@@ -348,6 +348,10 @@ Ext.define('PBS.TapeManagement.DriveStatusGrid', {
 	    header: gettext('Compression'),
 	    renderer: Proxmox.Utils.format_boolean,
 	},
+	'drive-activity': {
+	    header: gettext('Drive Activity'),
+	    renderer: PBS.Utils.renderDriveActivity,
+	},
 	'file-number': {
 	    header: gettext('Tape Position'),
 	    renderer: function(value, mD, r, rI, cI, store) {
diff --git a/www/tape/TapeInventory.js b/www/tape/TapeInventory.js
index 47d19acc0..305134e3e 100644
--- a/www/tape/TapeInventory.js
+++ b/www/tape/TapeInventory.js
@@ -16,6 +16,7 @@ Ext.define('pbs-model-tapes', {
 	'seq-nr',
 	'status',
 	'uuid',
+	'bytes-used',
     ],
     idProperty: 'uuid',
     proxy: {
@@ -326,5 +327,11 @@ Ext.define('PBS.TapeManagement.TapeInventory', {
 	    flex: 1,
 	    hidden: true,
 	},
+	{
+	    text: gettext("Bytes Used"),
+	    dataIndex: 'bytes-used',
+	    flex: 1,
+	    renderer: Proxmox.Utils.render_size,
+	},
     ],
 });
diff --git a/www/tape/window/TapeBackup.js b/www/tape/window/TapeBackup.js
index 941b6b836..7a45e388c 100644
--- a/www/tape/window/TapeBackup.js
+++ b/www/tape/window/TapeBackup.js
@@ -8,6 +8,15 @@ Ext.define('PBS.TapeManagement.TapeBackupWindow', {
     showTaskViewer: true,
     isCreate: true,
 
+    viewModel: {
+	data: {
+	    notificationMode: 'notification-system',
+	},
+	formulas: {
+	    notificationSystemSelected: (get) => get('notificationMode') === 'notification-system',
+	},
+    },
+
     items: [
 	{
 	    xtype: 'inputpanel',
@@ -39,6 +48,7 @@ Ext.define('PBS.TapeManagement.TapeBackupWindow', {
 		    fieldLabel: gettext('Max Depth'),
 		    disabled: true,
 		    name: 'max-depth',
+		    deleteEmpty: false,
 		},
 		{
 		    xtype: 'pbsMediaPoolSelector',
@@ -81,6 +91,19 @@ Ext.define('PBS.TapeManagement.TapeBackupWindow', {
 		    name: 'eject-media',
 		    fieldLabel: gettext('Eject Media'),
 		},
+		{
+		    xtype: 'proxmoxKVComboBox',
+		    labelWidth: 150,
+		    comboItems: [
+			['legacy-sendmail', gettext('Email (legacy)')],
+			['notification-system', gettext('Notification system')],
+		    ],
+		    fieldLabel: gettext('Notification mode'),
+		    name: 'notification-mode',
+		    bind: {
+			value: '{notificationMode}',
+		    },
+		},
 		{
 		    xtype: 'pmxUserSelector',
 		    labelWidth: 150,
@@ -90,6 +113,9 @@ Ext.define('PBS.TapeManagement.TapeBackupWindow', {
 		    value: null,
 		    allowBlank: true,
 		    skipEmptyText: true,
+		    bind: {
+			disabled: "{notificationSystemSelected}",
+		    },
 		    renderer: Ext.String.htmlEncode,
 		},
 	    ],
diff --git a/www/tape/window/TapeBackupJob.js b/www/tape/window/TapeBackupJob.js
index abbbaa0b2..12623712a 100644
--- a/www/tape/window/TapeBackupJob.js
+++ b/www/tape/window/TapeBackupJob.js
@@ -46,6 +46,23 @@ Ext.define('PBS.TapeManagement.BackupJobEdit', {
 	},
     },
 
+    viewModel: {
+	data: {
+	    notificationMode: '__default__',
+	},
+	formulas: {
+	    notificationSystemSelected: (get) => get('notificationMode') === 'notification-system',
+	},
+    },
+
+    initComponent: function() {
+	let me = this;
+	// Automatically select the new system for new jobs
+	let mode = me.isCreate ? "notification-system" : "__default__";
+	me.getViewModel().set('notificationMode', mode);
+	me.callParent();
+    },
+
     items: {
 	xtype: 'tabpanel',
 	bodyPadding: 10,
@@ -109,6 +126,19 @@ Ext.define('PBS.TapeManagement.BackupJobEdit', {
 			fieldLabel: gettext('Drive'),
 			name: 'drive',
 		    },
+		    {
+			xtype: 'proxmoxKVComboBox',
+			comboItems: [
+			    ['__default__', `${Proxmox.Utils.defaultText}  (Email)`],
+			    ['legacy-sendmail', gettext('Email (legacy)')],
+			    ['notification-system', gettext('Notification system')],
+			],
+			fieldLabel: gettext('Notification mode'),
+			name: 'notification-mode',
+			bind: {
+			    value: '{notificationMode}',
+			},
+		    },
 		    {
 			xtype: 'pmxUserSelector',
 			name: 'notify-user',
@@ -117,6 +147,9 @@ Ext.define('PBS.TapeManagement.BackupJobEdit', {
 			allowBlank: true,
 			value: null,
 			renderer: Ext.String.htmlEncode,
+			bind: {
+			    disabled: "{notificationSystemSelected}",
+			},
 		    },
 		],
 
diff --git a/www/tape/window/TapeRestore.js b/www/tape/window/TapeRestore.js
index c686c9586..61a2fcd75 100644
--- a/www/tape/window/TapeRestore.js
+++ b/www/tape/window/TapeRestore.js
@@ -33,6 +33,7 @@ Ext.define('PBS.TapeManagement.TapeRestoreWindow', {
 	data: {
 	    uuid: "",
 	    singleDatastore: true,
+	    notificationMode: 'notification-system',
 	},
 	formulas: {
 	    singleSelectorLabel: get =>
@@ -40,6 +41,7 @@ Ext.define('PBS.TapeManagement.TapeRestoreWindow', {
 	    singleSelectorEmptyText: get => get('singleDatastore') ? '' : Proxmox.Utils.NoneText,
 	    singleSelectorLabelNs: get =>
 		get('singleDatastore') ? gettext('Target Namespace') : gettext('Default Namespace'),
+	    notificationSystemSelected: (get) => get('notificationMode') === 'notification-system',
 	},
     },
 
@@ -428,6 +430,18 @@ Ext.define('PBS.TapeManagement.TapeRestoreWindow', {
 			return values;
 		    },
 		    column1: [
+			{
+			    xtype: 'proxmoxKVComboBox',
+			    comboItems: [
+				['legacy-sendmail', gettext('Email (legacy)')],
+				['notification-system', gettext('Notification system')],
+			    ],
+			    fieldLabel: gettext('Notification mode'),
+			    name: 'notification-mode',
+			    bind: {
+				value: '{notificationMode}',
+			    },
+			},
 			{
 			    xtype: 'pmxUserSelector',
 			    name: 'notify-user',
@@ -437,6 +451,9 @@ Ext.define('PBS.TapeManagement.TapeRestoreWindow', {
 			    allowBlank: true,
 			    skipEmptyText: true,
 			    renderer: Ext.String.htmlEncode,
+			    bind: {
+				disabled: "{notificationSystemSelected}",
+			    },
 			},
 			{
 			    xtype: 'pbsAuthidSelector',
diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js
index aecf6b8de..b61154606 100644
--- a/www/window/DataStoreEdit.js
+++ b/www/window/DataStoreEdit.js
@@ -39,6 +39,9 @@ Ext.define('PBS.DataStoreEdit', {
 		title: gettext('General'),
 		xtype: 'inputpanel',
 		onlineHelp: 'datastore_intro',
+		cbind: {
+		    isCreate: '{isCreate}',
+		},
 		column1: [
 		    {
 			xtype: 'pmxDisplayEditField',
@@ -90,6 +93,16 @@ Ext.define('PBS.DataStoreEdit', {
 			fieldLabel: gettext('Comment'),
 		    },
 		],
+
+		onGetValues: function(values) {
+		    let me = this;
+
+		    if (me.isCreate) {
+			// New datastores default to using the notification system
+			values['notification-mode'] = 'notification-system';
+		    }
+		    return values;
+		},
 	    },
 	    {
 		title: gettext('Prune Options'),
diff --git a/www/window/GCJobEdit.js b/www/window/GCJobEdit.js
new file mode 100644
index 000000000..5d6e64b95
--- /dev/null
+++ b/www/window/GCJobEdit.js
@@ -0,0 +1,28 @@
+Ext.define('PBS.window.GCJobEdit', {
+    extend: 'Proxmox.window.Edit',
+    alias: 'widget.pbsGCJobEdit',
+    mixins: ['Proxmox.Mixin.CBind'],
+
+    userid: undefined,
+    onlineHelp: 'maintenance_gc',
+    isAdd: false,
+
+    subject: gettext('Garbage Collect Schedule'),
+
+    cbindData: function(initial) {
+        let me = this;
+
+        me.datastore = encodeURIComponent(me.datastore);
+	me.url = `/api2/extjs/config/datastore/${me.datastore}`;
+        me.method = 'PUT';
+        me.autoLoad = true;
+	return {};
+    },
+
+    items: {
+	xtype: 'pbsCalendarEvent',
+	name: 'gc-schedule',
+	fieldLabel: gettext("GC Schedule"),
+	emptyText: gettext('none (disabled)'),
+    },
+});
diff --git a/www/window/NotificationMatcherOverride.js b/www/window/NotificationMatcherOverride.js
new file mode 100644
index 000000000..bc7b7c1d1
--- /dev/null
+++ b/www/window/NotificationMatcherOverride.js
@@ -0,0 +1,1105 @@
+// Override some components from widget toolkit.
+// This was done so that we can already use the improved UI for editing
+// match rules without waiting for the needed API calls in PVE to be merged
+//
+// This can and *should* be removed once these changes have landed in
+// widget toolkit:
+// https://lists.proxmox.com/pipermail/pve-devel/2024-April/063539.html
+
+
+Ext.define('pbs-notification-fields', {
+    extend: 'Ext.data.Model',
+    fields: ['name', 'description'],
+    idProperty: 'name',
+});
+
+Ext.define('pbs-notification-field-values', {
+    extend: 'Ext.data.Model',
+    fields: ['value', 'comment', 'field'],
+    idProperty: 'value',
+});
+
+Ext.define('PBS.panel.NotificationRulesEditPanel', {
+    override: 'Proxmox.panel.NotificationRulesEditPanel',
+    extend: 'Proxmox.panel.InputPanel',
+    xtype: 'pmxNotificationMatchRulesEditPanel',
+    mixins: ['Proxmox.Mixin.CBind'],
+
+    controller: {
+	xclass: 'Ext.app.ViewController',
+
+	// we want to also set the empty value, but 'bind' does not do that so
+	// we have to set it then (and only then) to get the correct value in
+	// the tree
+	control: {
+	    'field': {
+		change: function(cmp) {
+		    let me = this;
+		    let vm = me.getViewModel();
+		    if (cmp.field) {
+			let record = vm.get('selectedRecord');
+			if (!record) {
+			    return;
+			}
+			let data = Ext.apply({}, record.get('data'));
+			let value = cmp.getValue();
+			// only update if the value is empty (or empty array)
+			if (!value || !value.length) {
+			    data[cmp.field] = value;
+			    record.set({ data });
+			}
+		    }
+		},
+	    },
+	},
+    },
+
+    viewModel: {
+	data: {
+	    selectedRecord: null,
+	    matchFieldType: 'exact',
+	    matchFieldField: '',
+	    matchFieldValue: '',
+	    rootMode: 'all',
+	},
+
+	formulas: {
+	    nodeType: {
+		get: function(get) {
+		    let record = get('selectedRecord');
+		    return record?.get('type');
+		},
+		set: function(value) {
+		    let me = this;
+		    let record = me.get('selectedRecord');
+
+		    let data;
+
+		    switch (value) {
+			case 'match-severity':
+			    data = {
+				value: ['info', 'notice', 'warning', 'error', 'unknown'],
+			    };
+			    break;
+			case 'match-field':
+			    data = {
+				type: 'exact',
+				field: '',
+				value: '',
+			    };
+			    break;
+			case 'match-calendar':
+			    data = {
+				value: '',
+			    };
+			    break;
+		    }
+
+		    let node = {
+			type: value,
+			data,
+		    };
+		    record.set(node);
+		},
+	    },
+	    showMatchingMode: function(get) {
+		let record = get('selectedRecord');
+		if (!record) {
+		    return false;
+		}
+		return record.isRoot();
+	    },
+	    showMatcherType: function(get) {
+		let record = get('selectedRecord');
+		if (!record) {
+		    return false;
+		}
+		return !record.isRoot();
+	    },
+
+	    rootMode: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let me = this;
+		    let record = me.get('selectedRecord');
+		    let currentData = record.get('data');
+		    let invert = false;
+		    if (value.startsWith('not')) {
+			value = value.substring(3);
+			invert = true;
+		    }
+		    record.set({
+			data: {
+			    ...currentData,
+			    value,
+			    invert,
+			},
+		    });
+		},
+		get: function(record) {
+		    let prefix = record?.get('data').invert ? 'not' : '';
+		    return prefix + record?.get('data')?.value;
+		},
+	    },
+	},
+    },
+
+    column1: [
+	{
+	    xtype: 'pbsNotificationMatchRuleTree',
+	    cbind: {
+		isCreate: '{isCreate}',
+	    },
+	},
+    ],
+    column2: [
+	{
+	    xtype: 'pbsNotificationMatchRuleSettings',
+	    cbind: {
+		baseUrl: '{baseUrl}',
+	    },
+	},
+
+    ],
+
+    onGetValues: function(values) {
+	let me = this;
+
+	let deleteArrayIfEmpty = (field) => {
+	    if (Ext.isArray(values[field])) {
+		if (values[field].length === 0) {
+		    delete values[field];
+		    if (!me.isCreate) {
+			Proxmox.Utils.assemble_field_data(values, { 'delete': field });
+		    }
+		}
+	    }
+	};
+	deleteArrayIfEmpty('match-field');
+	deleteArrayIfEmpty('match-severity');
+	deleteArrayIfEmpty('match-calendar');
+
+	return values;
+    },
+});
+
+Ext.define('PBS.panel.NotificationMatchRuleTree', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pbsNotificationMatchRuleTree',
+    mixins: ['Proxmox.Mixin.CBind'],
+    border: false,
+
+    getNodeTextAndIcon: function(type, data) {
+	let text;
+	let iconCls;
+
+	switch (type) {
+	    case 'match-severity': {
+		let v = data.value;
+		if (Ext.isArray(data.value)) {
+		    v = data.value.join(', ');
+		}
+		text = Ext.String.format(gettext("Match severity: {0}"), v);
+		iconCls = 'fa fa-exclamation';
+		if (!v) {
+		    iconCls += ' internal-error';
+		}
+	    } break;
+	    case 'match-field': {
+		let field = data.field;
+		let value = data.value;
+		text = Ext.String.format(gettext("Match field: {0}={1}"), field, value);
+		iconCls = 'fa fa-square-o';
+		if (!field || !value || (Ext.isArray(value) && !value.length)) {
+		    iconCls += ' internal-error';
+		}
+	    } break;
+	    case 'match-calendar': {
+		let v = data.value;
+		text = Ext.String.format(gettext("Match calendar: {0}"), v);
+		iconCls = 'fa fa-calendar-o';
+		if (!v || !v.length) {
+		    iconCls += ' internal-error';
+		}
+	    } break;
+	    case 'mode':
+		if (data.value === 'all') {
+		    text = gettext("All");
+		} else if (data.value === 'any') {
+		    text = gettext("Any");
+		}
+		if (data.invert) {
+		    text = `!${text}`;
+		}
+		iconCls = 'fa fa-filter';
+
+		break;
+	}
+
+	return [text, iconCls];
+    },
+
+    initComponent: function() {
+	let me = this;
+
+	let treeStore = Ext.create('Ext.data.TreeStore', {
+	    root: {
+		expanded: true,
+		expandable: false,
+		text: '',
+		type: 'mode',
+		data: {
+		    value: 'all',
+		    invert: false,
+		},
+		children: [],
+		iconCls: 'fa fa-filter',
+	    },
+	});
+
+	let realMatchFields = Ext.create({
+	    xtype: 'hiddenfield',
+	    setValue: function(value) {
+		this.value = value;
+		this.checkChange();
+	    },
+	    getValue: function() {
+		return this.value;
+	    },
+	    getErrors: function() {
+		for (const matcher of this.value ?? []) {
+		    let matches = matcher.match(/^([^:]+):([^=]+)=(.+)$/);
+		    if (!matches) {
+			return [""]; // fake error for validation
+		    }
+		}
+		return [];
+	    },
+	    getSubmitValue: function() {
+		let value = this.value;
+		if (!value) {
+		    value = [];
+		}
+		return value;
+	    },
+	    name: 'match-field',
+	});
+
+	let realMatchSeverity = Ext.create({
+	    xtype: 'hiddenfield',
+	    setValue: function(value) {
+		this.value = value;
+		this.checkChange();
+	    },
+	    getValue: function() {
+		return this.value;
+	    },
+	    getErrors: function() {
+		for (const severities of this.value ?? []) {
+		    if (!severities) {
+			return [""]; // fake error for validation
+		    }
+		}
+		return [];
+	    },
+	    getSubmitValue: function() {
+		let value = this.value;
+		if (!value) {
+		    value = [];
+		}
+		return value;
+	    },
+	    name: 'match-severity',
+	});
+
+	let realMode = Ext.create({
+	    xtype: 'hiddenfield',
+	    name: 'mode',
+	    setValue: function(value) {
+		this.value = value;
+		this.checkChange();
+	    },
+	    getValue: function() {
+		return this.value;
+	    },
+	    getSubmitValue: function() {
+		let value = this.value;
+		return value;
+	    },
+	});
+
+	let realMatchCalendar = Ext.create({
+	    xtype: 'hiddenfield',
+	    name: 'match-calendar',
+
+	    setValue: function(value) {
+		this.value = value;
+		this.checkChange();
+	    },
+	    getValue: function() {
+		return this.value;
+	    },
+	    getErrors: function() {
+		for (const timespan of this.value ?? []) {
+		    if (!timespan) {
+			return [""]; // fake error for validation
+		    }
+		}
+		return [];
+	    },
+	    getSubmitValue: function() {
+		let value = this.value;
+		return value;
+	    },
+	});
+
+	let realInvertMatch = Ext.create({
+	    xtype: 'proxmoxcheckbox',
+	    name: 'invert-match',
+	    hidden: true,
+	    deleteEmpty: !me.isCreate,
+	});
+
+	let storeChanged = function(store) {
+	    store.suspendEvent('datachanged');
+
+	    let matchFieldStmts = [];
+	    let matchSeverityStmts = [];
+	    let matchCalendarStmts = [];
+	    let modeStmt = 'all';
+	    let invertMatchStmt = false;
+
+	    store.each(function(model) {
+		let type = model.get('type');
+		let data = model.get('data');
+
+		switch (type) {
+		    case 'match-field':
+			matchFieldStmts.push(`${data.type}:${data.field ?? ''}=${data.value ?? ''}`);
+			break;
+		    case 'match-severity':
+			if (Ext.isArray(data.value)) {
+			    matchSeverityStmts.push(data.value.join(','));
+			} else {
+			    matchSeverityStmts.push(data.value);
+			}
+			break;
+		    case 'match-calendar':
+			matchCalendarStmts.push(data.value);
+			break;
+		    case 'mode':
+			modeStmt = data.value;
+			invertMatchStmt = data.invert;
+			break;
+		}
+
+		let [text, iconCls] = me.getNodeTextAndIcon(type, data);
+		model.set({
+		    text,
+		    iconCls,
+		});
+	    });
+
+	    realMatchFields.suspendEvent('change');
+	    realMatchFields.setValue(matchFieldStmts);
+	    realMatchFields.resumeEvent('change');
+
+	    realMatchCalendar.suspendEvent('change');
+	    realMatchCalendar.setValue(matchCalendarStmts);
+	    realMatchCalendar.resumeEvent('change');
+
+	    realMode.suspendEvent('change');
+	    realMode.setValue(modeStmt);
+	    realMode.resumeEvent('change');
+
+	    realInvertMatch.suspendEvent('change');
+	    realInvertMatch.setValue(invertMatchStmt);
+	    realInvertMatch.resumeEvent('change');
+
+	    realMatchSeverity.suspendEvent('change');
+	    realMatchSeverity.setValue(matchSeverityStmts);
+	    realMatchSeverity.resumeEvent('change');
+
+	    store.resumeEvent('datachanged');
+	};
+
+	realMatchFields.addListener('change', function(field, value) {
+	    let parseMatchField = function(filter) {
+		let [, type, matchedField, matchedValue] =
+		    filter.match(/^(?:(regex|exact):)?([A-Za-z0-9_][A-Za-z0-9._-]*)=(.+)$/);
+		if (type === undefined) {
+		    type = "exact";
+		}
+
+		if (type === 'exact') {
+		    matchedValue = matchedValue.split(',');
+		}
+
+		return {
+		    type: 'match-field',
+		    data: {
+			type,
+			field: matchedField,
+			value: matchedValue,
+		    },
+		    leaf: true,
+		};
+	    };
+
+	    for (let node of treeStore.queryBy(
+		record => record.get('type') === 'match-field',
+	    ).getRange()) {
+		node.remove(true);
+	    }
+
+	    if (!value) {
+		return;
+	    }
+	    let records = value.map(parseMatchField);
+
+	    let rootNode = treeStore.getRootNode();
+
+	    for (let record of records) {
+		rootNode.appendChild(record);
+	    }
+	});
+
+	realMatchSeverity.addListener('change', function(field, value) {
+	    let parseSeverity = function(severities) {
+		return {
+		    type: 'match-severity',
+		    data: {
+			value: severities.split(','),
+		    },
+		    leaf: true,
+		};
+	    };
+
+	    for (let node of treeStore.queryBy(
+		record => record.get('type') === 'match-severity').getRange()) {
+		node.remove(true);
+	    }
+
+	    let records = value.map(parseSeverity);
+	    let rootNode = treeStore.getRootNode();
+
+	    for (let record of records) {
+		rootNode.appendChild(record);
+	    }
+	});
+
+	realMatchCalendar.addListener('change', function(field, value) {
+	    let parseCalendar = function(timespan) {
+		return {
+		    type: 'match-calendar',
+		    data: {
+			value: timespan,
+		    },
+		    leaf: true,
+		};
+	    };
+
+	    for (let node of treeStore.queryBy(
+		record => record.get('type') === 'match-calendar').getRange()) {
+		node.remove(true);
+	    }
+
+	    let records = value.map(parseCalendar);
+	    let rootNode = treeStore.getRootNode();
+
+	    for (let record of records) {
+		rootNode.appendChild(record);
+	    }
+	});
+
+	realMode.addListener('change', function(field, value) {
+	    let data = treeStore.getRootNode().get('data');
+	    treeStore.getRootNode().set('data', {
+		...data,
+		value,
+	    });
+	});
+
+	realInvertMatch.addListener('change', function(field, value) {
+	    let data = treeStore.getRootNode().get('data');
+	    treeStore.getRootNode().set('data', {
+		...data,
+		invert: value,
+	    });
+	});
+
+	treeStore.addListener('datachanged', storeChanged);
+
+	let treePanel = Ext.create({
+	    xtype: 'treepanel',
+	    store: treeStore,
+	    minHeight: 300,
+	    maxHeight: 300,
+	    scrollable: true,
+
+	    bind: {
+		selection: '{selectedRecord}',
+	    },
+	});
+
+	let addNode = function() {
+	    let node = {
+		type: 'match-field',
+		data: {
+		    type: 'exact',
+		    field: '',
+		    value: '',
+		},
+		leaf: true,
+	    };
+	    treeStore.getRootNode().appendChild(node);
+	    treePanel.setSelection(treeStore.getRootNode().lastChild);
+	};
+
+	let deleteNode = function() {
+	    let selection = treePanel.getSelection();
+	    for (let selected of selection) {
+		if (!selected.isRoot()) {
+		    selected.remove(true);
+		}
+	    }
+	};
+
+	Ext.apply(me, {
+	    items: [
+		realMatchFields,
+		realMode,
+		realMatchSeverity,
+		realInvertMatch,
+		realMatchCalendar,
+		treePanel,
+		{
+		    xtype: 'button',
+		    margin: '5 5 5 0',
+		    text: gettext('Add'),
+		    iconCls: 'fa fa-plus-circle',
+		    handler: addNode,
+		},
+		{
+		    xtype: 'button',
+		    margin: '5 5 5 0',
+		    text: gettext('Remove'),
+		    iconCls: 'fa fa-minus-circle',
+		    handler: deleteNode,
+		},
+	    ],
+	});
+	me.callParent();
+    },
+});
+
+Ext.define('PBS.panel.NotificationMatchRuleSettings', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pbsNotificationMatchRuleSettings',
+    mixins: ['Proxmox.Mixin.CBind'],
+    border: false,
+    layout: 'anchor',
+
+    items: [
+	{
+	    xtype: 'proxmoxKVComboBox',
+	    name: 'mode',
+	    fieldLabel: gettext('Match if'),
+	    allowBlank: false,
+	    isFormField: false,
+
+	    matchFieldWidth: false,
+
+	    comboItems: [
+		['all', gettext('All rules match')],
+		['any', gettext('Any rule matches')],
+		['notall', gettext('At least one rule does not match')],
+		['notany', gettext('No rule matches')],
+	    ],
+	    // Hide initially to avoid glitches when opening the window
+	    hidden: true,
+	    bind: {
+		hidden: '{!showMatchingMode}',
+		disabled: '{!showMatchingMode}',
+		value: '{rootMode}',
+	    },
+	},
+	{
+	    xtype: 'proxmoxKVComboBox',
+	    fieldLabel: gettext('Node type'),
+	    isFormField: false,
+	    allowBlank: false,
+	    // Hide initially to avoid glitches when opening the window
+	    hidden: true,
+	    bind: {
+		value: '{nodeType}',
+		hidden: '{!showMatcherType}',
+		disabled: '{!showMatcherType}',
+	    },
+
+	    comboItems: [
+		['match-field', gettext('Match Field')],
+		['match-severity', gettext('Match Severity')],
+		['match-calendar', gettext('Match Calendar')],
+	    ],
+	},
+	{
+	    xtype: 'pbsNotificationMatchFieldSettings',
+	    cbind: {
+		baseUrl: '{baseUrl}',
+	    },
+	},
+	{
+	    xtype: 'pbsNotificationMatchSeveritySettings',
+	},
+	{
+	    xtype: 'pbsNotificationMatchCalendarSettings',
+	},
+    ],
+});
+
+Ext.define('PBS.panel.MatchCalendarSettings', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pbsNotificationMatchCalendarSettings',
+    border: false,
+    layout: 'anchor',
+    // Hide initially to avoid glitches when opening the window
+    hidden: true,
+    bind: {
+	hidden: '{!typeIsMatchCalendar}',
+    },
+    viewModel: {
+	// parent is set in `initComponents`
+	formulas: {
+	    typeIsMatchCalendar: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		get: function(record) {
+		    return record?.get('type') === 'match-calendar';
+		},
+	    },
+
+	    matchCalendarValue: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let me = this;
+		    let record = me.get('selectedRecord');
+		    let currentData = record.get('data');
+		    record.set({
+			data: {
+			    ...currentData,
+			    value: value,
+			},
+		    });
+		},
+		get: function(record) {
+		    return record?.get('data')?.value;
+		},
+	    },
+	},
+    },
+    items: [
+	{
+	    xtype: 'proxmoxKVComboBox',
+	    fieldLabel: gettext('Timespan to match'),
+	    isFormField: false,
+	    allowBlank: false,
+	    editable: true,
+	    displayField: 'key',
+	    field: 'value',
+	    bind: {
+		value: '{matchCalendarValue}',
+		disabled: '{!typeIsMatchCalender}',
+	    },
+
+	    comboItems: [
+		['mon 8-12', ''],
+		['tue..fri,sun 0:00-23:59', ''],
+	    ],
+	},
+    ],
+
+    initComponent: function() {
+	let me = this;
+	Ext.apply(me.viewModel, {
+	    parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(),
+	});
+	me.callParent();
+    },
+});
+
+Ext.define('PBS.panel.MatchSeveritySettings', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pbsNotificationMatchSeveritySettings',
+    border: false,
+    layout: 'anchor',
+    // Hide initially to avoid glitches when opening the window
+    hidden: true,
+    bind: {
+	hidden: '{!typeIsMatchSeverity}',
+    },
+    viewModel: {
+	// parent is set in `initComponents`
+	formulas: {
+	    typeIsMatchSeverity: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		get: function(record) {
+		    return record?.get('type') === 'match-severity';
+		},
+	    },
+	    matchSeverityValue: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let record = this.get('selectedRecord');
+		    let currentData = record.get('data');
+		    record.set({
+			data: {
+			    ...currentData,
+			    value: value,
+			},
+		    });
+		},
+		get: function(record) {
+		    return record?.get('data')?.value;
+		},
+	    },
+	},
+    },
+    items: [
+	{
+	    xtype: 'proxmoxKVComboBox',
+	    fieldLabel: gettext('Severities to match'),
+	    isFormField: false,
+	    allowBlank: true,
+	    multiSelect: true,
+	    field: 'value',
+	    // Hide initially to avoid glitches when opening the window
+	    hidden: true,
+	    bind: {
+		value: '{matchSeverityValue}',
+		hidden: '{!typeIsMatchSeverity}',
+		disabled: '{!typeIsMatchSeverity}',
+	    },
+
+	    comboItems: [
+		['info', gettext('Info')],
+		['notice', gettext('Notice')],
+		['warning', gettext('Warning')],
+		['error', gettext('Error')],
+		['unknown', gettext('Unknown')],
+	    ],
+	},
+    ],
+
+    initComponent: function() {
+	let me = this;
+	Ext.apply(me.viewModel, {
+	    parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(),
+	});
+	me.callParent();
+    },
+});
+
+Ext.define('PBS.panel.MatchFieldSettings', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pbsNotificationMatchFieldSettings',
+    border: false,
+    layout: 'anchor',
+    // Hide initially to avoid glitches when opening the window
+    hidden: true,
+    bind: {
+	hidden: '{!typeIsMatchField}',
+    },
+    controller: {
+	xclass: 'Ext.app.ViewController',
+
+	control: {
+	    'field[reference=fieldSelector]': {
+		change: function(field) {
+		    let view = this.getView();
+		    let valueField = view.down('field[reference=valueSelector]');
+		    let store = valueField.getStore();
+		    let val = field.getValue();
+
+		    if (val) {
+			store.setFilters([
+			    {
+				property: 'field',
+				value: val,
+			    },
+			]);
+		    }
+		},
+	    },
+	},
+    },
+    viewModel: {
+	// parent is set in `initComponents`
+	formulas: {
+	    typeIsMatchField: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		get: function(record) {
+		    return record?.get('type') === 'match-field';
+		},
+	    },
+	    isRegex: function(get) {
+		return get('matchFieldType') === 'regex';
+	    },
+	    matchFieldType: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let record = this.get('selectedRecord');
+		    let currentData = record.get('data');
+
+		    let newValue = [];
+
+		    // Build equivalent regular expression if switching
+		    // to 'regex' mode
+		    if (value === 'regex') {
+			let regexVal = "^";
+			if (currentData.value) {
+			    regexVal += `(${currentData.value.join('|')})`;
+			}
+			regexVal += "$";
+			newValue.push(regexVal);
+		    }
+
+		    record.set({
+			data: {
+			    ...currentData,
+			    type: value,
+			    value: newValue,
+			},
+		    });
+		},
+		get: function(record) {
+		    return record?.get('data')?.type;
+		},
+	    },
+	    matchFieldField: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let record = this.get('selectedRecord');
+		    let currentData = record.get('data');
+
+		    record.set({
+			data: {
+			    ...currentData,
+			    field: value,
+			    // Reset value if field changes
+			    value: [],
+			},
+		    });
+		},
+		get: function(record) {
+		    return record?.get('data')?.field;
+		},
+	    },
+	    matchFieldValue: {
+		bind: {
+		    bindTo: '{selectedRecord}',
+		    deep: true,
+		},
+		set: function(value) {
+		    let record = this.get('selectedRecord');
+		    let currentData = record.get('data');
+		    record.set({
+			data: {
+			    ...currentData,
+			    value: value,
+			},
+		    });
+		},
+		get: function(record) {
+		    return record?.get('data')?.value;
+		},
+	    },
+	},
+    },
+
+    initComponent: function() {
+	let me = this;
+
+	let store = Ext.create('Ext.data.Store', {
+	    model: 'pbs-notification-fields',
+	    autoLoad: true,
+	    proxy: {
+		type: 'proxmox',
+		url: `/api2/json/${me.baseUrl}/matcher-fields`,
+	    },
+	    listeners: {
+		'load': function() {
+		    this.each(function(record) {
+			record.set({
+			    description:
+				Proxmox.Utils.formatNotificationFieldName(
+				    record.get('name'),
+				),
+			});
+		    });
+
+		    // Commit changes so that the description field is not marked
+		    // as dirty
+		    this.commitChanges();
+		},
+	    },
+	});
+
+	let valueStore = Ext.create('Ext.data.Store', {
+	    model: 'pbs-notification-field-values',
+	    autoLoad: true,
+	    proxy: {
+		type: 'proxmox',
+
+		url: `/api2/json/${me.baseUrl}/matcher-field-values`,
+	    },
+	    listeners: {
+		'load': function() {
+		    this.each(function(record) {
+			if (record.get('field') === 'type') {
+			    record.set({
+				comment:
+				    Proxmox.Utils.formatNotificationFieldValue(
+					record.get('value'),
+				    ),
+			    });
+			}
+		    }, this, true);
+
+		    // Commit changes so that the description field is not marked
+		    // as dirty
+		    this.commitChanges();
+		},
+	    },
+	});
+
+	Ext.apply(me.viewModel, {
+	    parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(),
+	});
+	Ext.apply(me, {
+	    items: [
+		{
+		    fieldLabel: gettext('Match Type'),
+		    xtype: 'proxmoxKVComboBox',
+		    reference: 'type',
+		    isFormField: false,
+		    allowBlank: false,
+		    submitValue: false,
+		    field: 'type',
+
+		    bind: {
+			value: '{matchFieldType}',
+		    },
+
+		    comboItems: [
+			['exact', gettext('Exact')],
+			['regex', gettext('Regex')],
+		    ],
+		},
+		{
+		    fieldLabel: gettext('Field'),
+		    reference: 'fieldSelector',
+		    xtype: 'proxmoxComboGrid',
+		    isFormField: false,
+		    submitValue: false,
+		    allowBlank: false,
+		    editable: false,
+		    store: store,
+		    queryMode: 'local',
+		    valueField: 'name',
+		    displayField: 'description',
+		    field: 'field',
+		    bind: {
+			value: '{matchFieldField}',
+		    },
+		    listConfig: {
+			columns: [
+			    {
+				header: gettext('Description'),
+				dataIndex: 'description',
+				flex: 2,
+			    },
+			    {
+				header: gettext('Field Name'),
+				dataIndex: 'name',
+				flex: 1,
+			    },
+			],
+		    },
+		},
+		{
+		    fieldLabel: gettext('Value'),
+		    reference: 'valueSelector',
+		    xtype: 'proxmoxComboGrid',
+		    autoSelect: false,
+		    editable: false,
+		    isFormField: false,
+		    submitValue: false,
+		    allowBlank: false,
+		    showClearTrigger: true,
+		    field: 'value',
+		    store: valueStore,
+		    valueField: 'value',
+		    displayField: 'value',
+		    notFoundIsValid: false,
+		    multiSelect: true,
+		    bind: {
+			value: '{matchFieldValue}',
+			hidden: '{isRegex}',
+		    },
+		    listConfig: {
+			columns: [
+			    {
+				header: gettext('Value'),
+				dataIndex: 'value',
+				flex: 1,
+			    },
+			    {
+				header: gettext('Comment'),
+				dataIndex: 'comment',
+				flex: 2,
+			    },
+			],
+		    },
+		},
+		{
+		    fieldLabel: gettext('Regex'),
+		    xtype: 'proxmoxtextfield',
+		    editable: true,
+		    isFormField: false,
+		    submitValue: false,
+		    allowBlank: false,
+		    field: 'value',
+		    bind: {
+			value: '{matchFieldValue}',
+			hidden: '{!isRegex}',
+		    },
+		},
+	    ],
+	});
+	me.callParent();
+    },
+});
diff --git a/www/window/NotifyOptions.js b/www/window/NotifyOptions.js
index 11765b43b..6590df4a8 100644
--- a/www/window/NotifyOptions.js
+++ b/www/window/NotifyOptions.js
@@ -27,7 +27,7 @@ Ext.define('PBS.window.NotifyOptions', {
     xtype: 'pbsNotifyOptionEdit',
     mixins: ['Proxmox.Mixin.CBind'],
 
-    onlineHelp: 'maintenance_notification',
+    onlineHelp: 'notification_mode',
 
     user: undefined,
     tokenname: undefined,
@@ -44,6 +44,15 @@ Ext.define('PBS.window.NotifyOptions', {
 	labelWidth: 120,
     },
 
+    viewModel: {
+	data: {
+	    notificationMode: '__default__',
+	},
+	formulas: {
+	    notificationSystemSelected: (get) => get('notificationMode') === 'notification-system',
+	},
+    },
+
     items: {
 	xtype: 'inputpanel',
 	onGetValues: function(values) {
@@ -54,12 +63,30 @@ Ext.define('PBS.window.NotifyOptions', {
 	    }
 	    values.notify = PBS.Utils.printPropertyString(notify);
 
+	    if (values.delete && !Ext.isArray(values.delete)) {
+		values.delete = values.delete.split(',');
+	    }
+
 	    PBS.Utils.delete_if_default(values, 'notify', '');
 	    PBS.Utils.delete_if_default(values, 'notify-user', '');
 
 	    return values;
 	},
 	items: [
+	    {
+		xtype: 'proxmoxKVComboBox',
+		comboItems: [
+		    ['__default__', `${Proxmox.Utils.defaultText}  (Email)`],
+		    ['legacy-sendmail', gettext('Email (legacy)')],
+		    ['notification-system', gettext('Notification system')],
+		],
+		deleteEmpty: true,
+		fieldLabel: gettext('Notification mode'),
+		name: 'notification-mode',
+		bind: {
+		    value: '{notificationMode}',
+		},
+	    },
 	    {
 		xtype: 'pmxUserSelector',
 		name: 'notify-user',
@@ -69,6 +96,9 @@ Ext.define('PBS.window.NotifyOptions', {
 		allowBlank: true,
 		renderer: Ext.String.htmlEncode,
 		deleteEmpty: true,
+		bind: {
+		    disabled: "{notificationSystemSelected}",
+		},
 	    },
 	    {
 		xtype: 'pbsNotifyType',
@@ -76,6 +106,9 @@ Ext.define('PBS.window.NotifyOptions', {
 		fieldLabel: gettext('Verification Jobs'),
 		value: '__default__',
 		deleteEmpty: false,
+		bind: {
+		    disabled: "{notificationSystemSelected}",
+		},
 	    },
 	    {
 		xtype: 'pbsNotifyType',
@@ -83,6 +116,9 @@ Ext.define('PBS.window.NotifyOptions', {
 		fieldLabel: gettext('Sync Jobs'),
 		value: '__default__',
 		deleteEmpty: false,
+		bind: {
+		    disabled: "{notificationSystemSelected}",
+		},
 	    },
 	    {
 		xtype: 'pbsNotifyErrorDefaultType',
@@ -90,6 +126,9 @@ Ext.define('PBS.window.NotifyOptions', {
 		fieldLabel: gettext('Prune Jobs'),
 		value: '__default__',
 		deleteEmpty: false,
+		bind: {
+		    disabled: "{notificationSystemSelected}",
+		},
 	    },
 	    {
 		xtype: 'pbsNotifyType',
@@ -97,6 +136,9 @@ Ext.define('PBS.window.NotifyOptions', {
 		fieldLabel: gettext('Garbage Collection'),
 		value: '__default__',
 		deleteEmpty: false,
+		bind: {
+		    disabled: "{notificationSystemSelected}",
+		},
 	    },
 	],
     },
@@ -107,6 +149,8 @@ Ext.define('PBS.window.NotifyOptions', {
 	let options = {
 	    'notify-user': values['notify-user'],
 	    'verify-new': values['verify-new'],
+	    'notification-mode': values['notification-mode']
+		? values['notification-mode'] : '__default__',
 	};
 
 	let notify = {};
diff --git a/www/window/Settings.js b/www/window/Settings.js
index 763cefabc..c785dd881 100644
--- a/www/window/Settings.js
+++ b/www/window/Settings.js
@@ -96,7 +96,7 @@ Ext.define('PBS.window.Settings', {
 		click: function() {
 		    let blacklist = ['login-username'];
 		    let sp = Ext.state.Manager.getProvider();
-		    for (const state of Object.values(sp.state)) {
+		    for (const state of Object.keys(sp.state)) {
 			if (blacklist.indexOf(state) !== -1) {
 			    continue;
 			}
diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js
index e06fdd81a..6543995e8 100644
--- a/www/window/SyncJobEdit.js
+++ b/www/window/SyncJobEdit.js
@@ -132,8 +132,8 @@ Ext.define('PBS.window.SyncJobEdit', {
 			name: 'schedule',
 			emptyText: gettext('none (disabled)'),
 			cbind: {
-				deleteEmpty: '{!isCreate}',
-				value: '{scheduleValue}',
+			    deleteEmpty: '{!isCreate}',
+			    value: '{scheduleValue}',
 			},
 		    },
 		    {