Merge commit '6c44f3e584ceefdb24dd7ae016965542229200f2'
This commit is contained in:
commit
b0598bc0ae
68
Cargo.toml
68
Cargo.toml
@ -1,5 +1,5 @@
|
||||
[workspace.package]
|
||||
version = "3.1.5"
|
||||
version = "3.2.8"
|
||||
authors = [
|
||||
"Dietmar Maurer <dietmar@proxmox.com>",
|
||||
"Dominik Csapak <d.csapak@proxmox.com>",
|
||||
@ -13,6 +13,7 @@ authors = [
|
||||
edition = "2021"
|
||||
license = "AGPL-3"
|
||||
repository = "https://git.proxmox.com/?p=proxmox-backup.git"
|
||||
rust-version = "1.80"
|
||||
|
||||
[package]
|
||||
name = "proxmox-backup"
|
||||
@ -53,39 +54,48 @@ path = "src/lib.rs"
|
||||
|
||||
[workspace.dependencies]
|
||||
# proxmox workspace
|
||||
proxmox-apt = "0.10.5"
|
||||
proxmox-apt = { version = "0.11", features = [ "cache" ] }
|
||||
proxmox-apt-api-types = "1.0.1"
|
||||
proxmox-async = "0.4"
|
||||
proxmox-auth-api = "0.3"
|
||||
proxmox-auth-api = "0.4"
|
||||
proxmox-borrow = "1"
|
||||
proxmox-compression = "0.2"
|
||||
proxmox-config-digest = "0.1.0"
|
||||
proxmox-daemon = "0.1.0"
|
||||
proxmox-fuse = "0.1.3"
|
||||
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
|
||||
proxmox-human-byte = "0.1"
|
||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||
proxmox-lang = "1.1"
|
||||
proxmox-log = "0.2.4"
|
||||
proxmox-ldap = "0.2.1"
|
||||
proxmox-metrics = "0.3"
|
||||
proxmox-metrics = "0.3.1"
|
||||
proxmox-notify = "0.4"
|
||||
proxmox-openid = "0.10.0"
|
||||
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
|
||||
proxmox-rest-server = { version = "0.8", features = [ "templates" ] }
|
||||
# some use "cli", some use "cli" and "server", pbs-config uses nothing
|
||||
proxmox-router = { version = "2.0.0", default_features = false }
|
||||
proxmox-rrd = { version = "0.1" }
|
||||
proxmox-router = { version = "3.0.0", default-features = false }
|
||||
proxmox-rrd = "0.4"
|
||||
proxmox-rrd-api-types = "1.0.2"
|
||||
# everything but pbs-config and pbs-client use "api-macro"
|
||||
proxmox-schema = "3"
|
||||
proxmox-section-config = "2"
|
||||
proxmox-serde = "0.1.1"
|
||||
proxmox-shared-cache = "0.1"
|
||||
proxmox-shared-memory = "0.3.0"
|
||||
proxmox-sortable-macro = "0.1.2"
|
||||
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
|
||||
proxmox-sys = "0.5.3"
|
||||
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
|
||||
proxmox-time = "1.1.6"
|
||||
proxmox-sys = "0.6"
|
||||
proxmox-systemd = "0.1"
|
||||
proxmox-tfa = { version = "5", features = [ "api", "api-types" ] }
|
||||
proxmox-time = "2"
|
||||
proxmox-uuid = "1"
|
||||
proxmox-worker-task = "0.1"
|
||||
|
||||
# other proxmox crates
|
||||
pathpatterns = "0.3"
|
||||
proxmox-acme = "0.5"
|
||||
pxar = "0.10.2"
|
||||
proxmox-acme = "0.5.3"
|
||||
pxar = "0.12"
|
||||
|
||||
# PBS workspace
|
||||
pbs-api-types = { path = "pbs-api-types" }
|
||||
@ -104,7 +114,7 @@ anyhow = "1.0"
|
||||
async-trait = "0.1.56"
|
||||
#apt-pkg-native = "0.3.2"
|
||||
base64 = "0.13"
|
||||
bitflags = "1.2.1"
|
||||
bitflags = "2.4"
|
||||
bytes = "1.0"
|
||||
cidr = "0.2.1"
|
||||
crc32fast = "1"
|
||||
@ -115,12 +125,11 @@ env_logger = "0.10"
|
||||
flate2 = "1.0"
|
||||
foreign-types = "0.3"
|
||||
futures = "0.3"
|
||||
h2 = { version = "0.3", features = [ "stream" ] }
|
||||
h2 = { version = "0.4", features = [ "stream" ] }
|
||||
handlebars = "3.0"
|
||||
hex = "0.4.3"
|
||||
http = "0.2"
|
||||
hyper = { version = "0.14", features = [ "full" ] }
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2"
|
||||
log = "0.4.17"
|
||||
nix = "0.26.1"
|
||||
@ -144,33 +153,30 @@ tokio = "1.6"
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||
tracing = "0.1"
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.4"
|
||||
url = "2.1"
|
||||
walkdir = "2"
|
||||
xdg = "2.2"
|
||||
zstd = { version = "0.12", features = [ "bindgen" ] }
|
||||
zstd-safe = "6.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
#apt-pkg-native.workspace = true
|
||||
base64.workspace = true
|
||||
bitflags.workspace = true
|
||||
bytes.workspace = true
|
||||
cidr.workspace = true
|
||||
const_format.workspace = true
|
||||
crc32fast.workspace = true
|
||||
crossbeam-channel.workspace = true
|
||||
endian_trait.workspace = true
|
||||
flate2.workspace = true
|
||||
futures.workspace = true
|
||||
h2.workspace = true
|
||||
handlebars.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
hyper.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
@ -183,7 +189,6 @@ regex.workspace = true
|
||||
rustyline.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
siphasher.workspace = true
|
||||
syslog.workspace = true
|
||||
termcolor.workspace = true
|
||||
thiserror.workspace = true
|
||||
@ -191,42 +196,48 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
|
||||
tokio-openssl.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio-util = { workspace = true, features = [ "codec" ] }
|
||||
tower-service.workspace = true
|
||||
tracing.workspace = true
|
||||
udev.workspace = true
|
||||
url.workspace = true
|
||||
walkdir.workspace = true
|
||||
xdg.workspace = true
|
||||
zstd.workspace = true
|
||||
|
||||
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
|
||||
|
||||
# proxmox workspace
|
||||
proxmox-apt.workspace = true
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-async.workspace = true
|
||||
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
|
||||
proxmox-compression.workspace = true
|
||||
proxmox-config-digest.workspace = true
|
||||
proxmox-daemon.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace = true
|
||||
proxmox-log.workspace = true
|
||||
proxmox-ldap.workspace = true
|
||||
proxmox-metrics.workspace = true
|
||||
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||
proxmox-openid.workspace = true
|
||||
proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] }
|
||||
proxmox-router = { workspace = true, features = [ "cli", "server"] }
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-section-config.workspace = true
|
||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||
proxmox-shared-cache.workspace = true
|
||||
proxmox-shared-memory.workspace = true
|
||||
proxmox-sortable-macro.workspace = true
|
||||
proxmox-subscription.workspace = true
|
||||
proxmox-sys = { workspace = true, features = [ "timer" ] }
|
||||
proxmox-systemd.workspace = true
|
||||
proxmox-tfa.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid.workspace = true
|
||||
proxmox-worker-task.workspace = true
|
||||
|
||||
# in their respective repo
|
||||
pathpatterns.workspace = true
|
||||
proxmox-acme.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
@ -240,27 +251,34 @@ pbs-key-config.workspace = true
|
||||
pbs-tape.workspace = true
|
||||
pbs-tools.workspace = true
|
||||
proxmox-rrd.workspace = true
|
||||
proxmox-rrd-api-types.workspace = true
|
||||
|
||||
# Local path overrides
|
||||
# NOTE: You must run `cargo update` after changing this for it to take effect!
|
||||
[patch.crates-io]
|
||||
|
||||
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
|
||||
#proxmox-apt-api-types = { path = "../proxmox/proxmox-apt-api-types" }
|
||||
#proxmox-async = { path = "../proxmox/proxmox-async" }
|
||||
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
|
||||
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
|
||||
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
|
||||
#proxmox-config-digest = { path = "../proxmox/proxmox-config-digest" }
|
||||
#proxmox-daemon = { path = "../proxmox/proxmox-daemon" }
|
||||
#proxmox-fuse = { path = "../proxmox-fuse" }
|
||||
#proxmox-http = { path = "../proxmox/proxmox-http" }
|
||||
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
||||
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||
#proxmox-log = { path = "../proxmox/proxmox-log" }
|
||||
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
||||
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
||||
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
||||
#proxmox-openid = { path = "../proxmox/proxmox-openid" }
|
||||
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
|
||||
#proxmox-router = { path = "../proxmox/proxmox-router" }
|
||||
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
|
||||
#proxmox-rrd-api-types = { path = "../proxmox/proxmox-rrd-api-types" }
|
||||
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
|
||||
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
|
||||
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
|
||||
@ -268,9 +286,11 @@ proxmox-rrd.workspace = true
|
||||
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
|
||||
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
|
||||
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
|
||||
#proxmox-systemd = { path = "../proxmox/proxmox-systemd" }
|
||||
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
|
||||
#proxmox-time = { path = "../proxmox/proxmox-time" }
|
||||
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
|
||||
#proxmox-worker-task = { path = "../proxmox/proxmox-worker-task" }
|
||||
|
||||
#proxmox-acme = { path = "../proxmox/proxmox-acme" }
|
||||
#pathpatterns = {path = "../pathpatterns" }
|
||||
|
26
Makefile
26
Makefile
@ -4,7 +4,7 @@ include defines.mk
|
||||
PACKAGE := proxmox-backup
|
||||
ARCH := $(DEB_BUILD_ARCH)
|
||||
|
||||
SUBDIRS := etc www docs
|
||||
SUBDIRS := etc www docs templates
|
||||
|
||||
# Binaries usable by users
|
||||
USR_BIN := \
|
||||
@ -33,14 +33,15 @@ RESTORE_BIN := \
|
||||
|
||||
SUBCRATES != cargo metadata --no-deps --format-version=1 \
|
||||
| jq -r .workspace_members'[]' \
|
||||
| awk '!/^proxmox-backup[[:space:]]/ { printf "%s ", $$1 }'
|
||||
| grep "$$PWD/" \
|
||||
| sed -e "s!.*$$PWD/!!g" -e 's/\#.*$$//g' -e 's/)$$//g'
|
||||
|
||||
#ifeq ($(BUILD_MODE), release)
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
CARGO_BUILD_ARGS += --release --offline
|
||||
COMPILEDIR := target/release
|
||||
#else
|
||||
#COMPILEDIR := target/debug
|
||||
#endif
|
||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/release
|
||||
else
|
||||
COMPILEDIR := target/$(DEB_HOST_RUST_TYPE)/debug
|
||||
endif
|
||||
|
||||
ifeq ($(valgrind), yes)
|
||||
CARGO_BUILD_ARGS += --features valgrind
|
||||
@ -93,7 +94,7 @@ build:
|
||||
cp -a debian \
|
||||
Cargo.toml src \
|
||||
$(SUBCRATES) \
|
||||
docs etc examples tests www zsh-completions \
|
||||
docs etc examples tests www zsh-completions templates \
|
||||
defines.mk Makefile \
|
||||
./build/
|
||||
rm -f build/Cargo.lock
|
||||
@ -108,12 +109,15 @@ proxmox-backup-docs: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean
|
||||
lintian $(DOC_DEB)
|
||||
|
||||
# copy the local target/ dir as a build-cache
|
||||
.PHONY: deb dsc deb-nodoc
|
||||
.PHONY: deb dsc deb-nodoc deb-nostrip
|
||||
deb-nodoc: build
|
||||
cd build; dpkg-buildpackage -b -us -uc --no-pre-clean --build-profiles=nodoc
|
||||
lintian $(DEBS)
|
||||
|
||||
deb-nostrip: build
|
||||
cd build; DEB_BUILD_OPTIONS=nostrip dpkg-buildpackage -b -us -uc
|
||||
lintian $(DEBS) $(DOC_DEB)
|
||||
|
||||
$(DEBS): deb
|
||||
deb: build
|
||||
cd build; dpkg-buildpackage -b -us -uc
|
||||
@ -176,6 +180,7 @@ $(COMPILED_BINS) $(COMPILEDIR)/dump-catalog-shell-cli $(COMPILEDIR)/docgen: .do-
|
||||
--bin proxmox-restore-daemon \
|
||||
--package proxmox-backup \
|
||||
--bin docgen \
|
||||
--bin pbs2to3 \
|
||||
--bin proxmox-backup-api \
|
||||
--bin proxmox-backup-manager \
|
||||
--bin proxmox-backup-proxy \
|
||||
@ -211,6 +216,7 @@ install: $(COMPILED_BINS)
|
||||
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
|
||||
$(MAKE) -C www install
|
||||
$(MAKE) -C docs install
|
||||
$(MAKE) -C templates install
|
||||
|
||||
.PHONY: upload
|
||||
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
|
||||
|
@ -30,7 +30,7 @@ pre-release version number (e.g., "0.1.1-dev.1" instead of "0.1.0").
|
||||
Local cargo config
|
||||
==================
|
||||
|
||||
This repository ships with a ``.cargo/config`` that replaces the crates.io
|
||||
This repository ships with a ``.cargo/config.toml`` that replaces the crates.io
|
||||
registry with packaged crates located in ``/usr/share/cargo/registry``.
|
||||
|
||||
A similar config is also applied building with dh_cargo. Cargo.lock needs to be
|
||||
|
291
debian/changelog
vendored
291
debian/changelog
vendored
@ -1,3 +1,294 @@
|
||||
rust-proxmox-backup (3.2.8-1) bookworm; urgency=medium
|
||||
|
||||
* switch various log statements in worker tasks to the newer, more flexible
|
||||
proxmox log crate. With this change, errors from task logs are now also
|
||||
logged to the system log, increasing their visibility.
|
||||
|
||||
* datastore api: list snapshots: avoid calculating protected attribute
|
||||
twice per snapshot, this reduces the amounts of file metadata requests.
|
||||
|
||||
* avoid re-calculating the backup snapshot path's date time component when
|
||||
getting the full path, reducing calls to the relatively slow strftime
|
||||
function from libc.
|
||||
|
||||
* fix #3699: client: prefer the XDG cache directory for temporary files with
|
||||
a fallback to using /tmp, as before.
|
||||
|
||||
* sync job: improve log message for when syncing the root namespace.
|
||||
|
||||
* client: increase read buffer from 8 KiB to 4 MiB for raw image based
|
||||
backups. This reduces the time spent polling between the reader, chunker
|
||||
and uploader async tasks and thus can improve backup speed significantly,
|
||||
especially on setups with fast network and storage.
|
||||
|
||||
* client benchmark: avoid unnecessary allocation in the AES benchmark,
|
||||
causing artificial overhead. The benchmark AES results should now be more
|
||||
in line with the hardware capability and what the PBS client could already
|
||||
do. On our test system we saw an increase by an factor of 2.3 on this
|
||||
specific benchmark.
|
||||
|
||||
* docs: add external metrics server page
|
||||
|
||||
* tfa: webauthn: serialize OriginUrl following RFC6454
|
||||
|
||||
* factor out apt and apt-repository handling into a new library crate for
|
||||
re-use in other projects. There should be no functional change.
|
||||
|
||||
* fix various typos all over the place found using the rust based `typos`
|
||||
tool.
|
||||
|
||||
* datastore: data blob compression: increase compression throughput by
|
||||
switching away from a higher level zstd method to a lower level one, which
|
||||
allows us to control the target buffer size directly and thus avoid some
|
||||
allocation and syscall overhead. We saw the compression bandwidth increase
|
||||
by a factor of 1.19 in our tests where both the source data and the target
|
||||
datastore where located in memory backed tmpfs.
|
||||
|
||||
* daily-update: ensure notification system context is initialized.
|
||||
|
||||
* backup reader: derive if debug messages should be printed from the global
|
||||
log level. This avoids printing some debug messages by default, e.g., the
|
||||
"protocol upgrade done" message from sync jobs.
|
||||
|
||||
* ui: user view: disable 'Unlock TFA' button by default to improve UX if no
|
||||
user is selected.
|
||||
|
||||
* manager cli: ensure the worker tasks finishes when triggering a reload of
|
||||
the system network.
|
||||
|
||||
* fix #5622: backup client: properly handle rate and burst parameters.
|
||||
Previously, passing any non-integer value, like `1mb`, was ignored.
|
||||
|
||||
* tape: read element status: ignore responses where the library specifies
|
||||
that it will return a volume tag but then does not includes that field in
|
||||
the actual response. As both the primary and the alternative volume tag
|
||||
are not required by PBS, this specific error can simply be downgraded to a
|
||||
warning.
|
||||
|
||||
* pxar: dump archive: print entries to stdout instead of stderr
|
||||
|
||||
* sync jobs: various clean-ups and refactoring that should not result in any
|
||||
semantic change.
|
||||
|
||||
* metric collection: put metrics in a cache with a 30 minutes lifetime.
|
||||
|
||||
* api: add /status/metrics API to allow pull-based metric server to gather
|
||||
data directly.
|
||||
|
||||
* partial fix #5560: client: periodically show backup progress
|
||||
|
||||
* docs: add proxmox-backup.node.cfg man page
|
||||
|
||||
* docs: sync: explicitly mention `removed-vanish` flag
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Fri, 18 Oct 2024 19:05:41 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.7-1) bookworm; urgency=medium
|
||||
|
||||
* docs: drop blanket statement recommending against remote storage
|
||||
|
||||
* ui: gc job edit: fix i18n gettext usage
|
||||
|
||||
* pxar: improve error handling, e.g., avoiding duplicate information
|
||||
|
||||
* close #4763: client: add command to forget (delete) whole backup group
|
||||
with all its snapshots
|
||||
|
||||
* close #5571: client: fix regression for `map` command
|
||||
|
||||
* client: mount: wait for child to return before exiting to provide better
|
||||
UX for some edge paths
|
||||
|
||||
* fix #5304: client: set process uid/gid for `.pxarexclude-cli` to avoid
|
||||
issues when trying to backup and restore the backup as non-root user.
|
||||
|
||||
* http client: keep renewal future running on failed re-auth to make it more
|
||||
resilient against some transient errors, like the request just failing due
|
||||
to network instability.
|
||||
|
||||
* datastore: fix problem with operations counting for the case where the
|
||||
`.chunks/` directory is not available (deleted/moved)
|
||||
|
||||
* manager: use confirmation helper in wipe-disk command
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 03 Jul 2024 13:33:51 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.6-1) bookworm; urgency=medium
|
||||
|
||||
* tape: disable Programmable Early Warning Zone (PEWZ)
|
||||
|
||||
* tape: handle PEWZ like regular early warning
|
||||
|
||||
* docs: add note for not using remote storages
|
||||
|
||||
* client: pxar: fix fuse mount performance for split archives
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 17 Jun 2024 10:18:13 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.5-1) bookworm; urgency=medium
|
||||
|
||||
* pxar: add support for split archives
|
||||
|
||||
* fix #3174: pxar: enable caching and meta comparison
|
||||
|
||||
* docs: file formats: describe split pxar archive file layout
|
||||
|
||||
* docs: add section describing change detection mode
|
||||
|
||||
* api: datastore: add optional archive-name to file-restore
|
||||
|
||||
* client: backup: conditionally write catalog for file level backups
|
||||
|
||||
* docs: add table listing possible change detection modes
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Mon, 10 Jun 2024 13:39:54 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.4-1) bookworm; urgency=medium
|
||||
|
||||
* fix: network api: permission using wrong pathname
|
||||
|
||||
* fix #5503: d/control: bump dependency for proxmox-widget-toolkit
|
||||
|
||||
* auth: add locking to `PbsAuthenticator` to avoid race conditions
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 05 Jun 2024 16:23:38 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.3-1) bookworm; urgency=medium
|
||||
|
||||
* api-types: remove influxdb bucket name restrictions
|
||||
|
||||
* api: datastore status: delay lookup after permission check to improve
|
||||
consistency of tracked read operations
|
||||
|
||||
* tape: improve throughput by not unnecessarily syncing/committing after
|
||||
every archive written beyond the first 128 GiB
|
||||
|
||||
* tape: save 'bytes used' in the tape inventory and show them on the web UI
|
||||
to allow users to more easily see the usage of a tape
|
||||
|
||||
* tape drive status: return drive activity (like cleaning, loading,
|
||||
unloading, writing, ...) in the API and show them in the UI
|
||||
|
||||
* ui: tape drive status: avoid checking some specific status if the current
|
||||
drive activity would block doing so anyway
|
||||
|
||||
* tape: write out basic MAM host-type attributes to media to make them more
|
||||
easily identifiable as Proxmox Backup Server tape by common LTO tooling.
|
||||
|
||||
* api: syslog: fix the documented type of the return value
|
||||
|
||||
* fix #5465: restore daemon: mount NTFS with UTF-8 charset
|
||||
|
||||
* restore daemon: log some more errors on directory traversal
|
||||
|
||||
* fix #5422: ui: garbage-collection: make columns in global view sortable
|
||||
|
||||
* auth: move to hmac keys for csrf tokens as future-proofing
|
||||
|
||||
* auth: upgrade hashes on user log in if a users password is not hashed with
|
||||
the latest password hashing function for hardening purpose
|
||||
|
||||
* auth: use ed25519 keys when generating new auth api keys
|
||||
|
||||
* notifications: fix legacy sync notifications
|
||||
|
||||
* docs: document notification-mode and merge old notification section
|
||||
|
||||
* docs: notifications: rewrite overview for more clarity
|
||||
|
||||
* ui: datastore options: link to 'notification-mode' section
|
||||
|
||||
* acme: explicitly print a query when prompting for the custom directory URI
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 22 May 2024 19:31:35 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.2-1) bookworm; urgency=medium
|
||||
|
||||
* ui: notifications fix empty text format for the default mail author
|
||||
|
||||
* ui: tape backup: do not try to delete the namespace property if its empty
|
||||
|
||||
* ui: sync job: fix error if local namespace is selected first
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Thu, 25 Apr 2024 12:06:04 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.1-1) bookworm; urgency=medium
|
||||
|
||||
* implement Active Directory support:
|
||||
- api: access: add routes for managing AD realms
|
||||
- config: domains: add new "ad" section type for AD realms
|
||||
- realm sync: add sync job for AD realms
|
||||
- manager cli: add sub-command for managing AD realms
|
||||
- docs: user-management: add section about AD realm support
|
||||
|
||||
* auth: fix requesting the TFA write lock exclusively
|
||||
|
||||
* installation: add section about unattended/automatic installation
|
||||
|
||||
* api: tape config: forbid reusing IDs between tape changer and tape drive
|
||||
|
||||
* api: add support for creating and updating VLAN interfaces
|
||||
|
||||
* ui: enable the VLAN widget that got moved over from PVE to the generic
|
||||
widget-toolkit
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Wed, 24 Apr 2024 22:05:36 +0200
|
||||
|
||||
rust-proxmox-backup (3.2.0-1) bookworm; urgency=medium
|
||||
|
||||
* fix #5248: client: allow self-signed/untrusted certificate chains
|
||||
|
||||
* api: make prune-group a real worker task to avoid timeouts after 30s
|
||||
|
||||
* ui: sync view: rename column 'Max. Recursion' -> 'Max. Depth'
|
||||
|
||||
* api: assert that maintenance mode transitions are valid, e.g., do
|
||||
not allow clearing the special "delete" maitenance mode
|
||||
|
||||
* fix #3217: ui: add global prune and GC job view for an overview over
|
||||
all datastores
|
||||
|
||||
* fix #4723: manager: add new "garbage-collection list" CLI command to
|
||||
list all GC jobs
|
||||
|
||||
* ui: garbage collection: show removed and pending data of last run in
|
||||
bytes
|
||||
|
||||
* fix #5251: login: set autocomplete on password and user
|
||||
|
||||
* allow sending notifications via advanced proxmox-notify crate
|
||||
|
||||
* api: add endpoints for querying known notification values/fields
|
||||
|
||||
* api: add endpoints for gotify, smtp, and sendmail targets
|
||||
|
||||
* api: add endpoints for managing notification matchers
|
||||
|
||||
* api: add endpoints for querying/testing notification targets
|
||||
|
||||
* server: notifications:
|
||||
- send tape notifications via notification system
|
||||
- send ACME notifications via notification system
|
||||
- send update notifications via notification system
|
||||
- send sync notifications via notification system
|
||||
- send verify notifications via notification system
|
||||
- send prune notifications via notification system
|
||||
- send GC notifications via notification system
|
||||
|
||||
* docs: add documentation for notification system
|
||||
|
||||
* ui: notifications: pull in UX improvements for match rules creation
|
||||
|
||||
* api: notification: also list datastores if user has only Backup
|
||||
privs
|
||||
|
||||
* manager: add CLI commands for SMTP, sendmail, and gotify
|
||||
endpoints
|
||||
|
||||
* manager: add CLI for administrating notification matchers and targets
|
||||
|
||||
-- Proxmox Support Team <support@proxmox.com> Tue, 23 Apr 2024 23:45:29 +0200
|
||||
|
||||
rust-proxmox-backup (3.1.5-1) bookworm; urgency=medium
|
||||
|
||||
* fix #5190: api: OIDC: accept generic URIs for the ACR value
|
||||
|
74
debian/control
vendored
74
debian/control
vendored
@ -15,29 +15,26 @@ Build-Depends: bash-completion,
|
||||
libacl1-dev,
|
||||
libfuse3-dev,
|
||||
librust-anyhow-1+default-dev,
|
||||
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
|
||||
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
|
||||
librust-base64-0.13+default-dev,
|
||||
librust-bitflags-1+default-dev (>= 1.2.1-~~),
|
||||
librust-bitflags-2+default-dev (>= 2.4-~~),
|
||||
librust-bytes-1+default-dev,
|
||||
librust-cidr-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-const-format-0.2+default-dev,
|
||||
librust-crc32fast-1+default-dev,
|
||||
librust-crossbeam-channel-0.5+default-dev,
|
||||
librust-endian-trait-0.6+arrays-dev,
|
||||
librust-endian-trait-0.6+default-dev,
|
||||
librust-env-logger-0.10+default-dev,
|
||||
librust-flate2-1+default-dev,
|
||||
librust-foreign-types-0.3+default-dev,
|
||||
librust-futures-0.3+default-dev,
|
||||
librust-h2-0.3+default-dev,
|
||||
librust-h2-0.3+stream-dev,
|
||||
librust-handlebars-3+default-dev,
|
||||
librust-h2-0.4+default-dev,
|
||||
librust-h2-0.4+stream-dev,
|
||||
librust-hex-0.4+default-dev (>= 0.4.3-~~),
|
||||
librust-hex-0.4+serde-dev (>= 0.4.3-~~),
|
||||
librust-http-0.2+default-dev,
|
||||
librust-hyper-0.14+default-dev,
|
||||
librust-hyper-0.14+full-dev,
|
||||
librust-lazy-static-1+default-dev (>= 1.4-~~),
|
||||
librust-libc-0.2+default-dev,
|
||||
librust-log-0.4+default-dev (>= 0.4.17-~~),
|
||||
librust-nix-0.26+default-dev (>= 0.26.1-~~),
|
||||
@ -48,15 +45,19 @@ Build-Depends: bash-completion,
|
||||
librust-pathpatterns-0.3+default-dev,
|
||||
librust-percent-encoding-2+default-dev (>= 2.1-~~),
|
||||
librust-pin-project-lite-0.2+default-dev,
|
||||
librust-proxmox-acme-0.5+default-dev,
|
||||
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
|
||||
librust-proxmox-acme-0.5+default-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-apt-0.11+cache-dev,
|
||||
librust-proxmox-apt-0.11+default-dev,
|
||||
librust-proxmox-apt-api-types-1+default-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-async-0.4+default-dev,
|
||||
librust-proxmox-auth-api-0.3+api-dev,
|
||||
librust-proxmox-auth-api-0.3+api-types-dev,
|
||||
librust-proxmox-auth-api-0.3+default-dev,
|
||||
librust-proxmox-auth-api-0.3+pam-authenticator-dev,
|
||||
librust-proxmox-auth-api-0.4+api-dev,
|
||||
librust-proxmox-auth-api-0.4+api-types-dev,
|
||||
librust-proxmox-auth-api-0.4+default-dev,
|
||||
librust-proxmox-auth-api-0.4+pam-authenticator-dev,
|
||||
librust-proxmox-borrow-1+default-dev,
|
||||
librust-proxmox-compression-0.2+default-dev,
|
||||
librust-proxmox-config-digest-0.1+default-dev,
|
||||
librust-proxmox-daemon-0.1+default-dev,
|
||||
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
|
||||
librust-proxmox-http-0.9+client-dev,
|
||||
librust-proxmox-http-0.9+client-trait-dev,
|
||||
@ -71,43 +72,48 @@ Build-Depends: bash-completion,
|
||||
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
|
||||
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
|
||||
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
|
||||
librust-proxmox-metrics-0.3+default-dev,
|
||||
librust-proxmox-log-0.2+default-dev (>= 0.2.4-~~),
|
||||
librust-proxmox-metrics-0.3+default-dev (>= 0.3.1-~~),
|
||||
librust-proxmox-notify-0.4+default-dev,
|
||||
librust-proxmox-notify-0.4+pbs-context-dev,
|
||||
librust-proxmox-openid-0.10+default-dev,
|
||||
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
|
||||
librust-proxmox-router-2+cli-dev,
|
||||
librust-proxmox-router-2+default-dev,
|
||||
librust-proxmox-router-2+server-dev,
|
||||
librust-proxmox-rrd-0.1+default-dev,
|
||||
librust-proxmox-rest-server-0.8+default-dev,
|
||||
librust-proxmox-rest-server-0.8+rate-limited-stream-dev,
|
||||
librust-proxmox-rest-server-0.8+templates-dev,
|
||||
librust-proxmox-router-3+cli-dev,
|
||||
librust-proxmox-router-3+server-dev,
|
||||
librust-proxmox-rrd-0.4+default-dev,
|
||||
librust-proxmox-rrd-api-types-1+default-dev (>= 1.0.2-~~),
|
||||
librust-proxmox-schema-3+api-macro-dev,
|
||||
librust-proxmox-schema-3+default-dev,
|
||||
librust-proxmox-section-config-2+default-dev,
|
||||
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
|
||||
librust-proxmox-shared-cache-0.1+default-dev,
|
||||
librust-proxmox-shared-memory-0.3+default-dev,
|
||||
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
|
||||
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
|
||||
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
|
||||
librust-proxmox-sys-0.5+acl-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-sys-0.5+default-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-sys-0.5+timer-dev (>= 0.5.3-~~),
|
||||
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
|
||||
librust-proxmox-time-1+default-dev (>= 1.1.6-~~),
|
||||
librust-proxmox-sys-0.6+acl-dev,
|
||||
librust-proxmox-sys-0.6+crypt-dev,
|
||||
librust-proxmox-sys-0.6+default-dev,
|
||||
librust-proxmox-sys-0.6+logrotate-dev,
|
||||
librust-proxmox-sys-0.6+timer-dev,
|
||||
librust-proxmox-systemd-0.1+default-dev,
|
||||
librust-proxmox-tfa-5+api-dev,
|
||||
librust-proxmox-tfa-5+api-types-dev,
|
||||
librust-proxmox-tfa-5+default-dev,
|
||||
librust-proxmox-time-2+default-dev,
|
||||
librust-proxmox-uuid-1+default-dev,
|
||||
librust-proxmox-uuid-1+serde-dev,
|
||||
librust-pxar-0.10+default-dev (>= 0.10.2-~~),
|
||||
librust-proxmox-worker-task-0.1+default-dev,
|
||||
librust-pxar-0.12+default-dev,
|
||||
librust-regex-1+default-dev (>= 1.5.5-~~),
|
||||
librust-rustyline-9+default-dev,
|
||||
librust-serde-1+default-dev,
|
||||
librust-serde-1+derive-dev,
|
||||
librust-serde-json-1+default-dev,
|
||||
librust-serde-plain-1+default-dev,
|
||||
librust-siphasher-0.3+default-dev,
|
||||
librust-syslog-6+default-dev,
|
||||
librust-tar-0.4+default-dev,
|
||||
librust-termcolor-1+default-dev (>= 1.1.2-~~),
|
||||
@ -131,12 +137,14 @@ Build-Depends: bash-completion,
|
||||
librust-tokio-util-0.7+default-dev,
|
||||
librust-tokio-util-0.7+io-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-tracing-0.1+default-dev,
|
||||
librust-udev-0.4+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
librust-xdg-2+default-dev (>= 2.2-~~),
|
||||
librust-zstd-0.12+bindgen-dev,
|
||||
librust-zstd-0.12+default-dev,
|
||||
librust-zstd-safe-6+default-dev,
|
||||
libsgutils2-dev,
|
||||
libstd-rust-dev,
|
||||
libsystemd-dev (>= 246-~~),
|
||||
@ -175,7 +183,7 @@ Depends: fonts-font-awesome,
|
||||
postfix | mail-transport-agent,
|
||||
proxmox-backup-docs,
|
||||
proxmox-mini-journalreader,
|
||||
proxmox-widget-toolkit (>= 3.5.2),
|
||||
proxmox-widget-toolkit (>= 4.1.4),
|
||||
pve-xtermjs (>= 4.7.0-1),
|
||||
sg3-utils,
|
||||
smartmontools,
|
||||
|
2
debian/copyright
vendored
2
debian/copyright
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (C) 2019 - 2023 Proxmox Server Solutions GmbH
|
||||
Copyright (C) 2019 - 2024 Proxmox Server Solutions GmbH
|
||||
|
||||
This software is written by Proxmox Server Solutions GmbH <support@proxmox.com>
|
||||
|
||||
|
2
debian/proxmox-backup-file-restore.postinst
vendored
2
debian/proxmox-backup-file-restore.postinst
vendored
@ -9,7 +9,7 @@ update_initramfs() {
|
||||
CACHE_PATH_DBG="/var/cache/proxmox-backup/file-restore-initramfs-debug.img"
|
||||
|
||||
# cleanup first, in case proxmox-file-restore was uninstalled since we do
|
||||
# not want an unuseable image lying around
|
||||
# not want an unusable image lying around
|
||||
rm -f "$CACHE_PATH"
|
||||
|
||||
if [ ! -f "$INST_PATH/initramfs.img" ]; then
|
||||
|
32
debian/proxmox-backup-server.install
vendored
32
debian/proxmox-backup-server.install
vendored
@ -30,12 +30,44 @@ usr/share/man/man5/acl.cfg.5
|
||||
usr/share/man/man5/datastore.cfg.5
|
||||
usr/share/man/man5/domains.cfg.5
|
||||
usr/share/man/man5/media-pool.cfg.5
|
||||
usr/share/man/man5/notifications-priv.cfg.5
|
||||
usr/share/man/man5/notifications.cfg.5
|
||||
usr/share/man/man5/proxmox-backup.node.cfg.5
|
||||
usr/share/man/man5/remote.cfg.5
|
||||
usr/share/man/man5/sync.cfg.5
|
||||
usr/share/man/man5/tape-job.cfg.5
|
||||
usr/share/man/man5/tape.cfg.5
|
||||
usr/share/man/man5/user.cfg.5
|
||||
usr/share/man/man5/verification.cfg.5
|
||||
usr/share/proxmox-backup/templates/default/acme-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/acme-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/gc-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/package-updates-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/package-updates-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/prune-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/sync-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-backup-ok-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/tape-load-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.html.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/test-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-err-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-err-subject.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-ok-body.txt.hbs
|
||||
usr/share/proxmox-backup/templates/default/verify-ok-subject.txt.hbs
|
||||
usr/share/zsh/vendor-completions/_pmt
|
||||
usr/share/zsh/vendor-completions/_pmtx
|
||||
usr/share/zsh/vendor-completions/_proxmox-backup-debug
|
||||
|
7
debian/rules
vendored
7
debian/rules
vendored
@ -8,7 +8,7 @@ include /usr/share/rustc/architecture.mk
|
||||
|
||||
export BUILD_MODE=release
|
||||
|
||||
CARGO=/usr/share/cargo/bin/cargo
|
||||
export CARGO=/usr/share/cargo/bin/cargo
|
||||
|
||||
export CFLAGS CXXFLAGS CPPFLAGS LDFLAGS
|
||||
export DEB_HOST_RUST_TYPE DEB_HOST_GNU_TYPE
|
||||
@ -28,6 +28,11 @@ override_dh_auto_configure:
|
||||
@perl -ne 'if (/^version\s*=\s*"(\d+(?:\.\d+)+)"/) { my $$v_cargo = $$1; my $$v_deb = "$(DEB_VERSION_UPSTREAM)"; \
|
||||
die "ERROR: d/changelog <-> Cargo.toml version mismatch: $$v_cargo != $$v_deb\n" if $$v_cargo ne $$v_deb; exit(0); }' Cargo.toml
|
||||
$(CARGO) prepare-debian $(CURDIR)/debian/cargo_registry --link-from-system
|
||||
# `cargo build` and `cargo install` have different config precedence, symlink
|
||||
# the wrapper config into a place where `build` picks it up as well..
|
||||
# https://doc.rust-lang.org/cargo/commands/cargo-install.html#configuration-discovery
|
||||
mkdir -p .cargo
|
||||
ln -s $(CARGO_HOME)/config.toml $(CURDIR)/.cargo/config.toml
|
||||
dh_auto_configure
|
||||
|
||||
override_dh_auto_build:
|
||||
|
@ -1,55 +1,63 @@
|
||||
include ../defines.mk
|
||||
|
||||
GENERATED_SYNOPSIS := \
|
||||
proxmox-tape/synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
proxmox-backup-debug/synopsis.rst \
|
||||
proxmox-file-restore/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
pmt/synopsis.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/user/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/verification/config.rst \
|
||||
config/acl/roles.rst \
|
||||
config/datastore/config.rst \
|
||||
config/domains/config.rst
|
||||
config/domains/config.rst \
|
||||
config/media-pool/config.rst \
|
||||
config/notifications-priv/config.rst \
|
||||
config/notifications/config.rst \
|
||||
config/remote/config.rst \
|
||||
config/sync/config.rst \
|
||||
config/tape-job/config.rst \
|
||||
config/tape/config.rst \
|
||||
config/user/config.rst \
|
||||
config/verification/config.rst \
|
||||
pmt/synopsis.rst \
|
||||
pmtx/synopsis.rst \
|
||||
proxmox-backup-client/catalog-shell-synopsis.rst \
|
||||
proxmox-backup-client/synopsis.rst \
|
||||
proxmox-backup-debug/synopsis.rst \
|
||||
proxmox-backup-manager/synopsis.rst \
|
||||
proxmox-file-restore/synopsis.rst \
|
||||
proxmox-tape/synopsis.rst \
|
||||
pxar/synopsis.rst \
|
||||
|
||||
MAN1_PAGES := \
|
||||
pxar.1 \
|
||||
pmtx.1 \
|
||||
pmt.1 \
|
||||
proxmox-tape.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-manager.1 \
|
||||
proxmox-file-restore.1 \
|
||||
proxmox-backup-debug.1 \
|
||||
pbs2to3.1 \
|
||||
pmt.1 \
|
||||
pmtx.1 \
|
||||
proxmox-backup-client.1 \
|
||||
proxmox-backup-debug.1 \
|
||||
proxmox-backup-manager.1 \
|
||||
proxmox-backup-proxy.1 \
|
||||
proxmox-file-restore.1 \
|
||||
proxmox-tape.1 \
|
||||
pxar.1 \
|
||||
|
||||
# FIXME: prefix all man pages that are not directly relating to an existing executable with
|
||||
# `proxmox-backup.`, like the newer added proxmox-backup.node.cfg but add backwards compatible
|
||||
# symlinks, e.g. with a "5pbs" man page "suffix section".
|
||||
MAN5_PAGES := \
|
||||
media-pool.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
acl.cfg.5 \
|
||||
user.cfg.5 \
|
||||
datastore.cfg.5 \
|
||||
domains.cfg.5 \
|
||||
media-pool.cfg.5 \
|
||||
proxmox-backup.node.cfg.5 \
|
||||
notifications-priv.cfg.5 \
|
||||
notifications.cfg.5 \
|
||||
remote.cfg.5 \
|
||||
sync.cfg.5 \
|
||||
tape-job.cfg.5 \
|
||||
tape.cfg.5 \
|
||||
user.cfg.5 \
|
||||
verification.cfg.5 \
|
||||
datastore.cfg.5 \
|
||||
domains.cfg.5
|
||||
|
||||
PRUNE_SIMULATOR_FILES := \
|
||||
prune-simulator/index.html \
|
||||
prune-simulator/documentation.html \
|
||||
prune-simulator/clear-trigger.png \
|
||||
prune-simulator/prune-simulator.js
|
||||
prune-simulator/documentation.html \
|
||||
prune-simulator/prune-simulator.js \
|
||||
|
||||
PRUNE_SIMULATOR_JS_SOURCE := \
|
||||
/usr/share/javascript/proxmox-widget-toolkit-dev/Toolkit.js \
|
||||
@ -85,13 +93,13 @@ SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
BUILDDIR = output
|
||||
|
||||
#ifeq ($(BUILD_MODE), release)
|
||||
COMPILEDIR := ../target/release
|
||||
ifeq ($(BUILD_MODE), release)
|
||||
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/release
|
||||
SPHINXOPTS += -t release
|
||||
#else
|
||||
#COMPILEDIR := ../target/debug
|
||||
#SPHINXOPTS += -t devbuild
|
||||
#endif
|
||||
else
|
||||
COMPILEDIR := ../target/$(DEB_HOST_RUST_TYPE)/debug
|
||||
SPHINXOPTS += -t devbuild
|
||||
endif
|
||||
|
||||
# Sphinx internal variables.
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) .
|
||||
@ -138,9 +146,9 @@ lto-barcode/lto-barcode-generator.js: ${LTO_BARCODE_JS_SOURCE}
|
||||
mv $@.tmp $@
|
||||
|
||||
.PHONY: html
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||
html: ${GENERATED_SYNOPSIS} images/proxmox-logo.svg _static/custom.css conf.py ${PRUNE_SIMULATOR_FILES} ${LTO_BARCODE_FILES} ${API_VIEWER_SOURCES}
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
install -m 0644 custom.js custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
install -m 0644 _static/custom.js _static/custom.css images/proxmox-logo.svg $(BUILDDIR)/html/_static/
|
||||
install -dm 0755 $(BUILDDIR)/html/prune-simulator
|
||||
install -m 0644 ${PRUNE_SIMULATOR_FILES} $(BUILDDIR)/html/prune-simulator
|
||||
install -dm 0755 $(BUILDDIR)/html/lto-barcode
|
||||
|
0
docs/custom.js → docs/_static/custom.js
vendored
0
docs/custom.js → docs/_static/custom.js
vendored
@ -280,6 +280,65 @@ Multiple paths can be excluded like this:
|
||||
|
||||
# proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust
|
||||
|
||||
.. _client_change_detection_mode:
|
||||
|
||||
Change Detection Mode
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File-based backups containing a lot of data can take a long time, as the default
|
||||
behavior for the Proxmox backup client is to read all data and encode it into a
|
||||
pxar archive.
|
||||
The encoded stream is split into variable sized chunks. For each chunk, a digest
|
||||
is calculated and used to decide whether the chunk needs to be uploaded or can
|
||||
be indexed without upload, as it is already available on the server (and
|
||||
therefore deduplicated). If the backed up files are largely unchanged,
|
||||
re-reading and then detecting the corresponding chunks don't need to be uploaded
|
||||
after all is time consuming and undesired.
|
||||
|
||||
The backup client's `change-detection-mode` can be switched from default to
|
||||
`metadata` based detection to reduce limitations as described above, instructing
|
||||
the client to avoid re-reading files with unchanged metadata whenever possible.
|
||||
When using this mode, instead of the regular pxar archive, the backup snapshot
|
||||
is stored into two separate files: the `mpxar` containing the archive's metadata
|
||||
and the `ppxar` containing a concatenation of the file contents. This splitting
|
||||
allows for efficient metadata lookups.
|
||||
|
||||
Using the `change-detection-mode` set to `data` allows to create the same split
|
||||
archive as when using the `metadata` mode, but without using a previous
|
||||
reference and therefore reencoding all file payloads.
|
||||
When creating the backup archives, the current file metadata is compared to the
|
||||
one looked up in the previous `mpxar` archive.
|
||||
The metadata comparison includes file size, file type, ownership and permission
|
||||
information, as well as acls and attributes and most importantly the file's
|
||||
mtime, for details see the
|
||||
:ref:`pxar metadata archive format <pxar-meta-format>`.
|
||||
|
||||
If unchanged, the entry is cached for possible re-use of content chunks without
|
||||
re-reading, by indexing the already present chunks containing the contents from
|
||||
the previous backup snapshot. Since the file might only partially re-use chunks
|
||||
(thereby introducing wasted space in the form of padding), the decision whether
|
||||
to re-use or re-encode the currently cached entries is postponed to when enough
|
||||
information is available, comparing the possible padding to a threshold value.
|
||||
|
||||
.. _client_change_detection_mode_table:
|
||||
|
||||
============ ===================================================================
|
||||
Mode Description
|
||||
============ ===================================================================
|
||||
``legacy`` (current default): Encode all files into a self contained pxar
|
||||
archive.
|
||||
``data`` Encode all files into a split data and metadata pxar archive.
|
||||
``metadata`` Encode changed files, reuse unchanged from previous snapshot,
|
||||
creating a split archive.
|
||||
============ ===================================================================
|
||||
|
||||
The following shows an example for the client invocation with the `metadata`
|
||||
mode:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
# proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata
|
||||
|
||||
.. _client_encryption:
|
||||
|
||||
Encryption
|
||||
|
@ -108,12 +108,15 @@ man_pages = [
|
||||
('config/datastore/man5', 'datastore.cfg', 'Datastore Configuration', [author], 5),
|
||||
('config/domains/man5', 'domains.cfg', 'Realm Configuration', [author], 5),
|
||||
('config/media-pool/man5', 'media-pool.cfg', 'Media Pool Configuration', [author], 5),
|
||||
('config/node/man5', 'proxmox-backup.node.cfg', 'Proxmox Backup Server - Node Configuration', [author], 5),
|
||||
('config/remote/man5', 'remote.cfg', 'Remote Server Configuration', [author], 5),
|
||||
('config/sync/man5', 'sync.cfg', 'Synchronization Job Configuration', [author], 5),
|
||||
('config/tape-job/man5', 'tape-job.cfg', 'Tape Job Configuration', [author], 5),
|
||||
('config/tape/man5', 'tape.cfg', 'Tape Drive and Changer Configuration', [author], 5),
|
||||
('config/user/man5', 'user.cfg', 'User Configuration', [author], 5),
|
||||
('config/verification/man5', 'verification.cfg', 'Verification Job Configuration', [author], 5),
|
||||
('config/notifications/man5', 'notifications.cfg', 'Notification target/matcher configuration', [author], 5),
|
||||
('config/notifications-priv/man5', 'notifications-priv.cfg', 'Notification target secrets', [author], 5),
|
||||
]
|
||||
|
||||
|
||||
@ -263,6 +266,9 @@ html_static_path = ['_static']
|
||||
html_js_files = [
|
||||
'custom.js',
|
||||
]
|
||||
html_css_files = [
|
||||
'custom.css',
|
||||
]
|
||||
|
||||
# Add any extra paths that contain custom files (such as robots.txt or
|
||||
# .htaccess) here, relative to this directory. These files are copied
|
||||
|
@ -23,5 +23,5 @@ For LDAP realms, the LDAP bind password is stored in ``ldap_passwords.json``.
|
||||
user-classes inetorgperson,posixaccount,person,user
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager openid`` and ``proxmox-backup-manager ldap`` commands to manipulate
|
||||
this file.
|
||||
You can use the ``proxmox-backup-manager openid``, ``proxmox-backup-manager
|
||||
ldap`` and ``proxmox-backup-manager ad`` commands to manipulate this file.
|
||||
|
49
docs/config/node/format.rst
Normal file
49
docs/config/node/format.rst
Normal file
@ -0,0 +1,49 @@
|
||||
The file contains these options:
|
||||
|
||||
:acme: The ACME account to use on this node.
|
||||
|
||||
:acmedomain0: ACME domain.
|
||||
|
||||
:acmedomain1: ACME domain.
|
||||
|
||||
:acmedomain2: ACME domain.
|
||||
|
||||
:acmedomain3: ACME domain.
|
||||
|
||||
:acmedomain4: ACME domain.
|
||||
|
||||
:http-proxy: Set proxy for apt and subscription checks.
|
||||
|
||||
:email-from: Fallback email from which notifications will be sent.
|
||||
|
||||
:ciphers-tls-1.3: List of TLS ciphers for TLS 1.3 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||
|
||||
:ciphers-tls-1.2: List of TLS ciphers for TLS <= 1.2 that will be used by the proxy. Colon-separated and in descending priority (https://docs.openssl.org/master/man1/openssl-ciphers/). (Proxy has to be restarted for changes to take effect.)
|
||||
|
||||
:default-lang: Default language used in the GUI.
|
||||
|
||||
:description: Node description.
|
||||
|
||||
:task-log-max-days: Maximum days to keep task logs.
|
||||
|
||||
For example:
|
||||
|
||||
::
|
||||
|
||||
acme: local
|
||||
acmedomain0: first.domain.com
|
||||
acmedomain1: second.domain.com
|
||||
acmedomain2: third.domain.com
|
||||
acmedomain3: fourth.domain.com
|
||||
acmedomain4: fifth.domain.com
|
||||
http-proxy: internal.proxy.com
|
||||
email-from: proxmox@mail.com
|
||||
ciphers-tls-1.3: TLS_AES_128_GCM_SHA256:TLS_AES_128_CCM_8_SHA256:TLS_CHACHA20_POLY1305_SHA256
|
||||
ciphers-tls-1.2: RSA_WITH_AES_128_CCM:DHE_RSA_WITH_AES_128_CCM
|
||||
default-lang: en
|
||||
description: Primary PBS instance
|
||||
task-log-max-days: 30
|
||||
|
||||
|
||||
You can use the ``proxmox-backup-manager node`` command to manipulate
|
||||
this file.
|
18
docs/config/node/man5.rst
Normal file
18
docs/config/node/man5.rst
Normal file
@ -0,0 +1,18 @@
|
||||
:orphan:
|
||||
|
||||
========
|
||||
node.cfg
|
||||
========
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/node.cfg is a configuration file for Proxmox
|
||||
Backup Server. It contains the general configuration regarding this node.
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
1
docs/config/notifications-priv/format.rst
Normal file
1
docs/config/notifications-priv/format.rst
Normal file
@ -0,0 +1 @@
|
||||
This file contains protected credentials for notification targets.
|
24
docs/config/notifications-priv/man5.rst
Normal file
24
docs/config/notifications-priv/man5.rst
Normal file
@ -0,0 +1,24 @@
|
||||
:orphan:
|
||||
|
||||
======================
|
||||
notifications-priv.cfg
|
||||
======================
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/notifications-priv.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the configuration for the
|
||||
notification system configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
2
docs/config/notifications/format.rst
Normal file
2
docs/config/notifications/format.rst
Normal file
@ -0,0 +1,2 @@
|
||||
This file contains configuration for notification targets and notification
|
||||
matchers.
|
24
docs/config/notifications/man5.rst
Normal file
24
docs/config/notifications/man5.rst
Normal file
@ -0,0 +1,24 @@
|
||||
:orphan:
|
||||
|
||||
==================
|
||||
notifications.cfg
|
||||
==================
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
The file /etc/proxmox-backup/notifications.cfg is a configuration file
|
||||
for Proxmox Backup Server. It contains the configuration for the
|
||||
notification system configuration.
|
||||
|
||||
File Format
|
||||
===========
|
||||
|
||||
.. include:: format.rst
|
||||
|
||||
Options
|
||||
=======
|
||||
|
||||
.. include:: config.rst
|
||||
|
||||
.. include:: ../../pbs-copyright.rst
|
@ -67,6 +67,46 @@ Options
|
||||
|
||||
.. include:: config/media-pool/config.rst
|
||||
|
||||
``node.cfg``
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/node/format.rst
|
||||
|
||||
.. _notifications.cfg:
|
||||
|
||||
``notifications.cfg``
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/notifications/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/notifications/config.rst
|
||||
|
||||
.. _notifications_priv.cfg:
|
||||
|
||||
``notifications-priv.cfg``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
File Format
|
||||
^^^^^^^^^^^
|
||||
|
||||
.. include:: config/notifications-priv/format.rst
|
||||
|
||||
|
||||
Options
|
||||
^^^^^^^
|
||||
|
||||
.. include:: config/notifications-priv/config.rst
|
||||
|
||||
|
||||
``tape.cfg``
|
||||
~~~~~~~~~~~~
|
||||
|
55
docs/external-metric-server.rst
Normal file
55
docs/external-metric-server.rst
Normal file
@ -0,0 +1,55 @@
|
||||
External Metric Server
|
||||
----------------------
|
||||
|
||||
Proxmox Backup Server periodically sends various metrics about your host's memory,
|
||||
network and disk activity to configured external metric servers.
|
||||
|
||||
Currently supported are:
|
||||
|
||||
* InfluxDB (HTTP) (see https://docs.influxdata.com/influxdb/v2/ )
|
||||
* InfluxDB (UDP) (see https://docs.influxdata.com/influxdb/v1/ )
|
||||
|
||||
The external metric server definitions are saved in
|
||||
'/etc/proxmox-backup/metricserver.cfg', and can be edited through the web
|
||||
interface.
|
||||
|
||||
.. note::
|
||||
|
||||
Using HTTP is recommended as UDP support has been dropped in InfluxDB v2.
|
||||
|
||||
InfluxDB (HTTP) plugin configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The plugin can be configured to use the HTTP(s) API of InfluxDB 2.x.
|
||||
InfluxDB 1.8.x does contain a forwards compatible API endpoint for this v2 API.
|
||||
|
||||
Since InfluxDB's v2 API is only available with authentication, you have
|
||||
to generate a token that can write into the correct bucket and set it.
|
||||
|
||||
In the v2 compatible API of 1.8.x, you can use 'user:password' as token
|
||||
(if required), and can omit the 'organization' since that has no meaning in InfluxDB 1.x.
|
||||
|
||||
You can also set the maximum batch size (default 25000000 bytes) with the
|
||||
'max-body-size' setting (this corresponds to the InfluxDB setting with the
|
||||
same name).
|
||||
|
||||
InfluxDB (UDP) plugin configuration
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server can also send data via UDP. This requires the InfluxDB
|
||||
server to be configured correctly. The MTU can also be configured here if
|
||||
necessary.
|
||||
|
||||
Here is an example configuration for InfluxDB (on your InfluxDB server):
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
[[udp]]
|
||||
enabled = true
|
||||
bind-address = "0.0.0.0:8089"
|
||||
database = "proxmox"
|
||||
batch-size = 1000
|
||||
batch-timeout = "1s"
|
||||
|
||||
With this configuration, the InfluxDB server listens on all IP addresses on
|
||||
port 8089, and writes the data in the *proxmox* database.
|
@ -8,7 +8,53 @@ Proxmox File Archive Format (``.pxar``)
|
||||
|
||||
.. graphviz:: pxar-format-overview.dot
|
||||
|
||||
.. _pxar-meta-format:
|
||||
|
||||
Proxmox File Archive Format - Meta (``.mpxar``)
|
||||
-----------------------------------------------
|
||||
|
||||
Pxar metadata archive with same structure as a regular pxar archive, with the
|
||||
exception of regular file payloads not being contained within the archive
|
||||
itself, but rather being stored as payload references to the corresponding pxar
|
||||
payload (``.ppxar``) file.
|
||||
|
||||
Can be used to lookup all the archive entries and metadata without the size
|
||||
overhead introduced by the file payloads.
|
||||
|
||||
.. graphviz:: meta-format-overview.dot
|
||||
|
||||
.. _ppxar-format:
|
||||
|
||||
Proxmox File Archive Format - Payload (``.ppxar``)
|
||||
--------------------------------------------------
|
||||
|
||||
Pxar payload file storing regular file payloads to be referenced and accessed by
|
||||
the corresponding pxar metadata (``.mpxar``) archive. Contains a concatenation
|
||||
of regular file payloads, each prefixed by a `PAYLOAD` header. Further, the
|
||||
actual referenced payload entries might be separated by padding (full/partial
|
||||
payloads not referenced), introduced when reusing chunks of a previous backup
|
||||
run, when chunk boundaries did not aligned to payload entry offsets.
|
||||
|
||||
All headers are stored as little-endian.
|
||||
|
||||
.. list-table::
|
||||
:widths: auto
|
||||
|
||||
* - ``PAYLOAD_START_MARKER``
|
||||
- header of ``[u8; 16]`` consisting of type hash and size;
|
||||
marks start
|
||||
* - ``PAYLOAD``
|
||||
- header of ``[u8; 16]`` cosisting of type hash and size;
|
||||
referenced by metadata archive
|
||||
* - Payload
|
||||
- raw regular file payload
|
||||
* - Padding
|
||||
- partial/full unreferenced payloads, caused by unaligned chunk boundary
|
||||
* - ...
|
||||
- further concatenation of payload header, payload and padding
|
||||
* - ``PAYLOAD_TAIL_MARKER``
|
||||
- header of ``[u8; 16]`` consisting of type hash and size;
|
||||
marks end
|
||||
.. _data-blob-format:
|
||||
|
||||
Data Blob Format (``.blob``)
|
||||
|
@ -31,6 +31,7 @@ in the section entitled "GNU Free Documentation License".
|
||||
maintenance.rst
|
||||
sysadmin.rst
|
||||
network-management.rst
|
||||
notifications.rst
|
||||
technical-overview.rst
|
||||
faq.rst
|
||||
|
||||
|
@ -50,6 +50,22 @@ It includes the following:
|
||||
.. note:: During the installation process, the complete server
|
||||
is used by default and all existing data is removed.
|
||||
|
||||
Install `Proxmox Backup`_ Server Unattended
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
It is possible to install `Proxmox Backup`_ Server automatically in an
|
||||
unattended manner. This enables you to fully automate the setup process on
|
||||
bare-metal. Once the installation is complete and the host has booted up,
|
||||
automation tools like Ansible can be used to further configure the installation.
|
||||
|
||||
The necessary options for the installer must be provided in an answer file.
|
||||
This file allows the use of filter rules to determine which disks and network
|
||||
cards should be used.
|
||||
|
||||
To use the automated installation, it is first necessary to prepare an
|
||||
installation ISO. For more details and information on the unattended
|
||||
installation see `our wiki
|
||||
<https://pve.proxmox.com/wiki/Automated_Installation>`_.
|
||||
|
||||
|
||||
Install `Proxmox Backup`_ Server on Debian
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -108,7 +108,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 's_x',
|
||||
fieldLabel: 'Meassured Start Offset Sx (mm)',
|
||||
fieldLabel: 'Measured Start Offset Sx (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -116,7 +116,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 'd_x',
|
||||
fieldLabel: 'Meassured Length Dx (mm)',
|
||||
fieldLabel: 'Measured Length Dx (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -124,7 +124,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 's_y',
|
||||
fieldLabel: 'Meassured Start Offset Sy (mm)',
|
||||
fieldLabel: 'Measured Start Offset Sy (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
@ -132,7 +132,7 @@ Ext.define('PageCalibration', {
|
||||
xtype: 'numberfield',
|
||||
value: 'a4',
|
||||
name: 'd_y',
|
||||
fieldLabel: 'Meassured Length Dy (mm)',
|
||||
fieldLabel: 'Measured Length Dy (mm)',
|
||||
allowBlank: false,
|
||||
labelWidth: 200,
|
||||
},
|
||||
|
@ -277,26 +277,10 @@ the **Actions** column in the table.
|
||||
Notifications
|
||||
-------------
|
||||
|
||||
Proxmox Backup Server can send you notification emails about automatically
|
||||
Proxmox Backup Server can send you notifications about automatically
|
||||
scheduled verification, garbage-collection and synchronization tasks results.
|
||||
|
||||
By default, notifications are sent to the email address configured for the
|
||||
`root@pam` user. You can instead set this user for each datastore.
|
||||
|
||||
.. image:: images/screenshots/pbs-gui-datastore-options.png
|
||||
:target: _images/pbs-gui-datastore-options.png
|
||||
:align: right
|
||||
:alt: Datastore Options
|
||||
|
||||
You can also change the level of notification received per task type, the
|
||||
following options are available:
|
||||
|
||||
* Always: send a notification for any scheduled task, independent of the
|
||||
outcome
|
||||
|
||||
* Errors: send a notification for any scheduled task that results in an error
|
||||
|
||||
* Never: do not send any notification at all
|
||||
Refer to the :ref:`notifications` chapter for more details.
|
||||
|
||||
.. _maintenance_mode:
|
||||
|
||||
|
@ -69,6 +69,9 @@ sync-job`` command. The configuration information for sync jobs is stored at
|
||||
in the GUI, or use the ``create`` subcommand. After creating a sync job, you can
|
||||
either start it manually from the GUI or provide it with a schedule (see
|
||||
:ref:`calendar-event-scheduling`) to run regularly.
|
||||
Backup snapshots, groups and namespaces which are no longer available on the
|
||||
**Remote** datastore can be removed from the local datastore as well by setting
|
||||
the ``remove-vanished`` option for the sync job.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
|
50
docs/meta-format-overview.dot
Normal file
50
docs/meta-format-overview.dot
Normal file
@ -0,0 +1,50 @@
|
||||
digraph g {
|
||||
graph [
|
||||
rankdir = "LR"
|
||||
fontname="Helvetica"
|
||||
];
|
||||
node [
|
||||
fontsize = "16"
|
||||
shape = "record"
|
||||
];
|
||||
edge [
|
||||
];
|
||||
|
||||
"archive" [
|
||||
label = "archive.mpxar"
|
||||
shape = "record"
|
||||
];
|
||||
|
||||
"rootdir" [
|
||||
label = "<fv>FORMAT_VERSION\l|PRELUDE\l|<f0>ENTRY\l|\{XATTR\}\* extended attribute list\l|\{ACL_USER\}\* USER ACL entries\l|\{ACL_GROUP\}\* GROUP ACL entries\l|\[ACL_GROUP_OBJ\] the ACL_GROUP_OBJ \l|\[ACL_DEFAULT\] the various default ACL fields\l|\{ACL_DEFAULT_USER\}\* USER ACL entries\l|\{ACL_DEFAULT_GROUP\}\* GROUP ACL entries\l|\[FCAPS\] file capability in Linux disk format\l|\[QUOTA_PROJECT_ID\] the ext4/xfs quota project ID\l|{<pl> PAYLOAD_REF|SYMLINK|DEVICE|{<de> \{DirectoryEntries\}\*|GOODBYE}}"
|
||||
shape = "record"
|
||||
];
|
||||
|
||||
|
||||
"entry" [
|
||||
label = "<f0> size: u64 = 64\l|type: u64 = ENTRY\l|feature_flags: u64\l|mode: u64\l|flags: u64\l|uid: u64\l|gid: u64\l|mtime: u64\l"
|
||||
labeljust = "l"
|
||||
shape = "record"
|
||||
];
|
||||
|
||||
|
||||
|
||||
"direntry" [
|
||||
label = "<f0> FILENAME\l|{ENTRY\l|HARDLINK\l}"
|
||||
shape = "record"
|
||||
];
|
||||
|
||||
"payloadrefentry" [
|
||||
label = "<f0> offset: u64\l|size: u64\l"
|
||||
shape = "record"
|
||||
];
|
||||
|
||||
"archive" -> "rootdir":fv
|
||||
|
||||
"rootdir":f0 -> "entry":f0
|
||||
|
||||
"rootdir":de -> "direntry":f0
|
||||
|
||||
"rootdir":pl -> "payloadrefentry":f0
|
||||
|
||||
}
|
257
docs/notifications.rst
Normal file
257
docs/notifications.rst
Normal file
@ -0,0 +1,257 @@
|
||||
.. _notifications:
|
||||
|
||||
Notifications
|
||||
=============
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
* Proxmox Backup Server emits :ref:`notification_events` in case of noteworthy
|
||||
events in the system. These events are handled by the notification system.
|
||||
A notification event has metadata, for example a timestamp, a severity level,
|
||||
a type and other metadata fields.
|
||||
* :ref:`notification_matchers` route a notification event to one or more notification
|
||||
targets. A matcher can have match rules to selectively route based on the metadata
|
||||
of a notification event.
|
||||
* :ref:`notification_targets` are a destination to which a notification event
|
||||
is routed to by a matcher. There are multiple types of target, mail-based
|
||||
(Sendmail and SMTP) and Gotify.
|
||||
|
||||
Datastores and tape backup jobs have a configurable :ref:`notification_mode`.
|
||||
It allows you to choose between the notification system and a legacy mode
|
||||
for sending notification emails. The legacy mode is equivalent to the
|
||||
way notifications were handled before Proxmox Backup Server 3.2.
|
||||
|
||||
The notification system can be configured in the GUI under
|
||||
*Configuration → Notifications*. The configuration is stored in
|
||||
:ref:`notifications.cfg` and :ref:`notifications_priv.cfg` -
|
||||
the latter contains sensitive configuration options such as
|
||||
passwords or authentication tokens for notification targets and
|
||||
can only be read by ``root``.
|
||||
|
||||
.. _notification_targets:
|
||||
|
||||
Notification Targets
|
||||
--------------------
|
||||
|
||||
Proxmox Backup Server offers multiple types of notification targets.
|
||||
|
||||
.. _notification_targets_sendmail:
|
||||
|
||||
Sendmail
|
||||
^^^^^^^^
|
||||
The sendmail binary is a program commonly found on Unix-like operating systems
|
||||
that handles the sending of email messages.
|
||||
It is a command-line utility that allows users and applications to send emails
|
||||
directly from the command line or from within scripts.
|
||||
|
||||
The sendmail notification target uses the ``sendmail`` binary to send emails to a
|
||||
list of configured users or email addresses. If a user is selected as a recipient,
|
||||
the email address configured in user's settings will be used.
|
||||
For the ``root@pam`` user, this is the email address entered during installation.
|
||||
A user's email address can be configured in ``Configuration -> Access Control -> User Management``.
|
||||
If a user has no associated email address, no email will be sent.
|
||||
|
||||
.. NOTE:: In standard Proxmox Backup Server installations, the ``sendmail`` binary is provided by
|
||||
Postfix. It may be necessary to configure Postfix so that it can deliver
|
||||
mails correctly - for example by setting an external mail relay (smart host).
|
||||
In case of failed delivery, check the system logs for messages logged by
|
||||
the Postfix daemon.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
.. _notification_targets_smtp:
|
||||
|
||||
SMTP
|
||||
^^^^
|
||||
SMTP notification targets can send emails directly to an SMTP mail relay.
|
||||
This target does not use the system's MTA to deliver emails.
|
||||
Similar to sendmail targets, if a user is selected as a recipient, the user's configured
|
||||
email address will be used.
|
||||
|
||||
.. NOTE:: Unlike sendmail targets, SMTP targets do not have any queuing/retry mechanism
|
||||
in case of a failed mail delivery.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
.. _notification_targets_gotify:
|
||||
|
||||
Gotify
|
||||
^^^^^^
|
||||
`Gotify <http://gotify.net>`_ is an open-source self-hosted notification server that
|
||||
allows you to send push notifications to various devices and
|
||||
applications. It provides a simple API and web interface, making it easy to
|
||||
integrate with different platforms and services.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
.. _notification_matchers:
|
||||
|
||||
Notification Matchers
|
||||
---------------------
|
||||
|
||||
Notification matchers route notifications to notification targets based
|
||||
on their matching rules. These rules can match certain properties of a
|
||||
notification, such as the timestamp (``match-calendar``), the severity of
|
||||
the notification (``match-severity``) or metadata fields (``match-field``).
|
||||
If a notification is matched by a matcher, all targets configured for the
|
||||
matcher will receive the notification.
|
||||
|
||||
An arbitrary number of matchers can be created, each with with their own
|
||||
matching rules and targets to notify.
|
||||
Every target is notified at most once for every notification, even if
|
||||
the target is used in multiple matchers.
|
||||
|
||||
A matcher without rules matches any notification; the configured targets
|
||||
will always be notified.
|
||||
|
||||
See :ref:`notifications.cfg` for all configuration options.
|
||||
|
||||
Calendar Matching Rules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
A calendar matcher matches a notification's timestamp.
|
||||
|
||||
Examples:
|
||||
|
||||
* ``match-calendar 8-12``
|
||||
* ``match-calendar 8:00-15:30``
|
||||
* ``match-calendar mon-fri 9:00-17:00``
|
||||
* ``match-calendar sun,tue-wed,fri 9-17``
|
||||
|
||||
Field Matching Rules
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
Notifications have a selection of metadata fields that can be matched.
|
||||
When using ``exact`` as a matching mode, a ``,`` can be used as a separator.
|
||||
The matching rule then matches if the metadata field has **any** of the specified
|
||||
values.
|
||||
|
||||
Examples:
|
||||
|
||||
* ``match-field exact:type=gc`` Only match notifications for garbage collection jobs
|
||||
* ``match-field exact:type=prune,verify`` Match prune job and verification job notifications.
|
||||
* ``match-field regex:datastore=^backup-.*$`` Match any datastore starting with ``backup``.
|
||||
|
||||
If a notification does not have the matched field, the rule will **not** match.
|
||||
For instance, a ``match-field regex:datastore=.*`` directive will match any notification that has
|
||||
a ``datastore`` metadata field, but will not match if the field does not exist.
|
||||
|
||||
Severity Matching Rules
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
A notification has a associated severity that can be matched.
|
||||
|
||||
Examples:
|
||||
|
||||
* ``match-severity error``: Only match errors
|
||||
* ``match-severity warning,error``: Match warnings and error
|
||||
|
||||
The following severities are in use:
|
||||
``info``, ``notice``, ``warning``, ``error``, ``unknown``.
|
||||
|
||||
.. _notification_events:
|
||||
|
||||
Notification Events
|
||||
-------------------
|
||||
|
||||
The following table contains a list of all notification events in Proxmox Backup server, their
|
||||
type, severity and additional metadata fields. ``type`` as well as any other metadata field
|
||||
may be used in ``match-field`` match rules.
|
||||
|
||||
================================ ==================== ========== ==============================================================
|
||||
Event ``type`` Severity Metadata fields (in addition to ``type``)
|
||||
================================ ==================== ========== ==============================================================
|
||||
ACME certificate renewal failed ``acme`` ``error`` ``hostname``
|
||||
Garbage collection failure ``gc`` ``error`` ``datastore``, ``hostname``
|
||||
Garbage collection success ``gc`` ``info`` ``datastore``, ``hostname``
|
||||
Package updates available ``package-updates`` ``info`` ``hostname``
|
||||
Prune job failure ``prune`` ``error`` ``datastore``, ``hostname``, ``job-id``
|
||||
Prune job success ``prune`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||
Remote sync failure ``sync`` ``error`` ``datastore``, ``hostname``, ``job-id``
|
||||
Remote sync success ``sync`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||
Tape backup job failure ``tape-backup`` ``error`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
|
||||
Tape backup job success ``tape-backup`` ``info`` ``datastore``, ``hostname``, ``media-pool``, ``job-id``
|
||||
Tape loading request ``tape-load`` ``notice`` ``hostname``
|
||||
Verification job failure ``verification`` ``error`` ``datastore``, ``hostname``, ``job-id``
|
||||
Verification job success ``verification`` ``info`` ``datastore``, ``hostname``, ``job-id``
|
||||
================================ ==================== ========== ==============================================================
|
||||
|
||||
The following table contains a description of all use metadata fields. All of these
|
||||
can be used in ``match-field`` match rules.
|
||||
|
||||
==================== ===================================
|
||||
Metadata field Description
|
||||
==================== ===================================
|
||||
``datastore`` The name of the datastore
|
||||
``hostname`` The hostname of the backup server
|
||||
``job-id`` Job ID
|
||||
``media-pool`` The name of the tape media pool
|
||||
``type`` Notification event type
|
||||
==================== ===================================
|
||||
|
||||
.. NOTE:: The daily task checking for any available system updates only sends
|
||||
notifications if the node has an active subscription.
|
||||
|
||||
System Mail Forwarding
|
||||
----------------------
|
||||
Certain local system daemons, such as ``smartd``, send notification emails
|
||||
to the local ``root`` user. Proxmox Backup Server will feed these mails
|
||||
into the notification system as a notification of type ``system-mail``
|
||||
and with severity ``unknown``.
|
||||
|
||||
When the email is forwarded to a sendmail target, the mail's content and headers
|
||||
are forwarded as-is. For all other targets,
|
||||
the system tries to extract both a subject line and the main text body
|
||||
from the email content. In instances where emails solely consist of HTML
|
||||
content, they will be transformed into plain text format during this process.
|
||||
|
||||
Permissions
|
||||
-----------
|
||||
In order to modify/view the configuration for notification targets,
|
||||
the ``Sys.Modify/Sys.Audit`` permissions are required for the
|
||||
``/system/notifications`` ACL node.
|
||||
|
||||
.. _notification_mode:
|
||||
|
||||
Notification Mode
|
||||
-----------------
|
||||
Datastores and tape backup/restore job configuration have a ``notification-mode``
|
||||
option which can have one of two values:
|
||||
|
||||
* ``legacy-sendmail``: Send notification emails via the system's ``sendmail`` command.
|
||||
The notification system will be bypassed and any configured targets/matchers will be ignored.
|
||||
This mode is equivalent to the notification behavior for version before
|
||||
Proxmox Backup Server 3.2.
|
||||
|
||||
* ``notification-system``: Use the new, flexible notification system.
|
||||
|
||||
If the ``notification-mode`` option is not set, Proxmox Backup Server will default
|
||||
to ``legacy-sendmail``.
|
||||
|
||||
Starting with Proxmox Backup Server 3.2, a datastore created in the UI will
|
||||
automatically opt in to the new notification system. If the datastore is created
|
||||
via the API or the ``proxmox-backup-manager`` CLI, the ``notification-mode``
|
||||
option has to be set explicitly to ``notification-system`` if the
|
||||
notification system shall be used.
|
||||
|
||||
The ``legacy-sendmail`` mode might be removed in a later release of
|
||||
Proxmox Backup Server.
|
||||
|
||||
Settings for ``legacy-sendmail`` notification mode
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If ``notification-mode`` is set to ``legacy-sendmail``, Proxmox Backup Server
|
||||
will send notification emails via the system's ``sendmail`` command to the email
|
||||
address configured for the user set in the ``notify-user`` option
|
||||
(falling back to ``root@pam`` if not set).
|
||||
|
||||
For datastores, you can also change the level of notifications received per task
|
||||
type via the ``notify`` option.
|
||||
|
||||
* Always: send a notification for any scheduled task, independent of the
|
||||
outcome
|
||||
|
||||
* Errors: send a notification for any scheduled task that results in an error
|
||||
|
||||
* Never: do not send any notification at all
|
||||
|
||||
The ``notify-user`` and ``notify`` options are ignored if ``notification-mode``
|
||||
is set to ``notification-system``.
|
@ -314,7 +314,7 @@ Options
|
||||
|
||||
There are a few per-datastore options:
|
||||
|
||||
* :ref:`Notifications <maintenance_notification>`
|
||||
* :ref:`Notification mode and legacy notification settings <notification_mode>`
|
||||
* :ref:`Maintenance Mode <maintenance_mode>`
|
||||
* Verification of incoming backups
|
||||
|
||||
@ -419,7 +419,7 @@ remote-source to avoid that an attacker that took over the source can cause
|
||||
deletions of backups on the target hosts.
|
||||
If the source-host became victim of a ransomware attack, there is a good chance
|
||||
that sync jobs will fail, triggering an :ref:`error notification
|
||||
<maintenance_notification>`.
|
||||
<Notification Events>`.
|
||||
|
||||
It is also possible to create :ref:`tape backups <tape_backup>` as a second
|
||||
storage medium. This way, you get an additional copy of your data on a
|
||||
|
@ -30,6 +30,8 @@ please refer to the standard Debian documentation.
|
||||
|
||||
.. include:: certificate-management.rst
|
||||
|
||||
.. include:: external-metric-server.rst
|
||||
|
||||
.. include:: services.rst
|
||||
|
||||
.. include:: command-line-tools.rst
|
||||
|
@ -38,7 +38,8 @@ Recommended Server System Requirements
|
||||
|
||||
* Backup storage:
|
||||
|
||||
* Use only SSDs, for best results
|
||||
* Prefer fast storage that delivers high IOPS for random IO workloads; use
|
||||
only enterprise SSDs for best results.
|
||||
* If HDDs are used: Using a metadata cache is highly recommended, for example,
|
||||
add a ZFS :ref:`special device mirror <local_zfs_special_device>`.
|
||||
|
||||
|
@ -28,6 +28,9 @@ which are not chunked, e.g. the client log), or one or more indexes
|
||||
|
||||
When uploading an index, the client first has to read the source data, chunk it
|
||||
and send the data as chunks with their identifying checksum to the server.
|
||||
When using the :ref:`change detection mode <change_detection_mode>` payload
|
||||
chunks for unchanged files are reused from the previous snapshot, thereby not
|
||||
reading the source data again.
|
||||
|
||||
If there is a previous Snapshot in the backup group, the client can first
|
||||
download the chunk list of the previous Snapshot. If it detects a chunk that
|
||||
@ -53,8 +56,9 @@ The chunks of a datastore are found in
|
||||
|
||||
<datastore-root>/.chunks/
|
||||
|
||||
This chunk directory is further subdivided by the first four bytes of the
|
||||
chunk's checksum, so a chunk with the checksum
|
||||
This chunk directory is further subdivided into directories grouping chunks by
|
||||
their checksums 2 byte prefix (given as 4 hexadecimal digits), so a chunk with
|
||||
the checksum
|
||||
|
||||
a342e8151cbf439ce65f3df696b54c67a114982cc0aa751f2852c2f7acc19a8b
|
||||
|
||||
|
@ -27,6 +27,9 @@ choose the realm when you add a new user. Possible realms are:
|
||||
|
||||
:ldap: LDAP server. Users can authenticate against external LDAP servers.
|
||||
|
||||
:ad: Active Directory server. Users can authenticate against external Active
|
||||
Directory servers.
|
||||
|
||||
After installation, there is a single user, ``root@pam``, which corresponds to
|
||||
the Unix superuser. User configuration information is stored in the file
|
||||
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
|
||||
@ -646,15 +649,47 @@ A full list of all configuration parameters can be found at :ref:`domains.cfg`.
|
||||
server, you must also add them as a user of that realm in Proxmox Backup
|
||||
Server. This can be carried out automatically with syncing.
|
||||
|
||||
User Synchronization in LDAP realms
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
.. _user_realms_ad:
|
||||
|
||||
It is possible to automatically sync users for LDAP-based realms, rather than
|
||||
having to add them to Proxmox VE manually. Synchronization options can be set
|
||||
in the LDAP realm configuration dialog window in the GUI and via the
|
||||
``proxmox-backup-manager ldap create/update`` command.
|
||||
User synchronization can started in the GUI at
|
||||
Configuration > Access Control > Realms by selecting a realm and pressing the
|
||||
`Sync` button. In the sync dialog, some of the default options set in the realm
|
||||
configuration can be overridden. Alternatively, user synchronization can also
|
||||
be started via the ``proxmox-backup-manager ldap sync`` command.
|
||||
Active Directory
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Proxmox Backup Server can also utilize external Microsoft Active Directory
|
||||
servers for user authentication.
|
||||
To achieve this, a realm of the type ``ad`` has to be configured.
|
||||
|
||||
For an Active Directory realm, the authentication domain name and the server
|
||||
address must be specified. Most options from :ref:`user_realms_ldap` apply to
|
||||
Active Directory as well, most importantly the bind credentials ``bind-dn``
|
||||
and ``password``. This is typically required by default for Microsoft Active
|
||||
Directory. The ``bind-dn`` can be specified either in AD-specific
|
||||
``user@company.net`` syntax or the common LDAP-DN syntax.
|
||||
|
||||
The authentication domain name must only be specified if anonymous bind is
|
||||
requested. If bind credentials are given, the domain name is automatically
|
||||
inferred from the bind users' base domain, as reported by the Active Directory
|
||||
server.
|
||||
|
||||
A full list of all configuration parameters can be found at :ref:`domains.cfg`.
|
||||
|
||||
.. note:: In order to allow a particular user to authenticate using the Active
|
||||
Directory server, you must also add them as a user of that realm in Proxmox
|
||||
Backup Server. This can be carried out automatically with syncing.
|
||||
|
||||
.. note:: Currently, case-insensitive usernames are not supported.
|
||||
|
||||
User Synchronization in LDAP/AD realms
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
It is possible to automatically sync users for LDAP and AD-based realms, rather
|
||||
than having to add them to Proxmox Backup Server manually. Synchronization
|
||||
options can be set in the LDAP realm configuration dialog window in the GUI and
|
||||
via the ``proxmox-backup-manager ldap`` and ``proxmox-backup-manager ad``
|
||||
commands, respectively.
|
||||
|
||||
User synchronization can be started in the GUI under **Configuration > Access
|
||||
Control > Realms** by selecting a realm and pressing the `Sync` button. In the
|
||||
sync dialog, some of the default options set in the realm configuration can be
|
||||
overridden. Alternatively, user synchronization can also be started via the
|
||||
``proxmox-backup-manager ldap sync`` and ``proxmox-backup-manager ad sync``
|
||||
command, respectively.
|
||||
|
91
examples/tape-write-benchmark.rs
Normal file
91
examples/tape-write-benchmark.rs
Normal file
@ -0,0 +1,91 @@
|
||||
use std::{
|
||||
fs::File,
|
||||
io::Read,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use pbs_tape::TapeWrite;
|
||||
use proxmox_backup::tape::drive::{LtoTapeHandle, TapeDriver};
|
||||
|
||||
const URANDOM_PATH: &str = "/dev/urandom";
|
||||
const CHUNK_SIZE: usize = 4 * 1024 * 1024; // 4 MiB
|
||||
const LOG_LIMIT: usize = 4 * 1024 * 1024 * 1024; // 4 GiB
|
||||
|
||||
fn write_chunks<'a>(
|
||||
mut writer: Box<dyn 'a + TapeWrite>,
|
||||
blob_size: usize,
|
||||
max_size: usize,
|
||||
max_time: Duration,
|
||||
) -> Result<(), Error> {
|
||||
// prepare chunks in memory
|
||||
|
||||
let mut blob: Vec<u8> = vec![0u8; blob_size];
|
||||
|
||||
let mut file = File::open(URANDOM_PATH)?;
|
||||
file.read_exact(&mut blob[..])?;
|
||||
|
||||
let start_time = SystemTime::now();
|
||||
loop {
|
||||
let iteration_time = SystemTime::now();
|
||||
let mut count = 0;
|
||||
let mut bytes_written = 0;
|
||||
let mut idx = 0;
|
||||
let mut incr_count = 0;
|
||||
loop {
|
||||
if writer.write_all(&blob)? {
|
||||
eprintln!("LEOM reached");
|
||||
break;
|
||||
}
|
||||
|
||||
// modifying chunks a bit to mitigate compression/deduplication
|
||||
blob[idx] = blob[idx].wrapping_add(1);
|
||||
incr_count += 1;
|
||||
if incr_count >= 256 {
|
||||
incr_count = 0;
|
||||
idx += 1;
|
||||
}
|
||||
count += 1;
|
||||
bytes_written += blob_size;
|
||||
|
||||
if bytes_written > max_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let elapsed = iteration_time.elapsed()?.as_secs_f64();
|
||||
let elapsed_total = start_time.elapsed()?;
|
||||
eprintln!(
|
||||
"{:.2}s: wrote {} chunks ({:.2} MB at {:.2} MB/s, average: {:.2} MB/s)",
|
||||
elapsed_total.as_secs_f64(),
|
||||
count,
|
||||
bytes_written as f64 / 1_000_000.0,
|
||||
(bytes_written as f64) / (1_000_000.0 * elapsed),
|
||||
(writer.bytes_written() as f64) / (1_000_000.0 * elapsed_total.as_secs_f64()),
|
||||
);
|
||||
|
||||
if elapsed_total > max_time {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
fn main() -> Result<(), Error> {
|
||||
let mut args = std::env::args_os();
|
||||
args.next(); // binary name
|
||||
let path = args.next().expect("no path to tape device given");
|
||||
let file = File::open(path).map_err(|err| format_err!("could not open tape device: {err}"))?;
|
||||
let mut drive = LtoTapeHandle::new(file)
|
||||
.map_err(|err| format_err!("error creating drive handle: {err}"))?;
|
||||
write_chunks(
|
||||
drive
|
||||
.write_file()
|
||||
.map_err(|err| format_err!("error starting file write: {err}"))?,
|
||||
CHUNK_SIZE,
|
||||
LOG_LIMIT,
|
||||
Duration::new(60 * 20, 0),
|
||||
)
|
||||
.map_err(|err| format_err!("error writing data to tape: {err}"))?;
|
||||
Ok(())
|
||||
}
|
@ -5,10 +5,10 @@ extern crate proxmox_backup;
|
||||
use anyhow::Error;
|
||||
use std::io::{Read, Write};
|
||||
|
||||
use pbs_datastore::Chunker;
|
||||
use pbs_datastore::{Chunker, ChunkerImpl};
|
||||
|
||||
struct ChunkWriter {
|
||||
chunker: Chunker,
|
||||
chunker: ChunkerImpl,
|
||||
last_chunk: usize,
|
||||
chunk_offset: usize,
|
||||
|
||||
@ -23,7 +23,7 @@ struct ChunkWriter {
|
||||
impl ChunkWriter {
|
||||
fn new(chunk_size: usize) -> Self {
|
||||
ChunkWriter {
|
||||
chunker: Chunker::new(chunk_size),
|
||||
chunker: ChunkerImpl::new(chunk_size),
|
||||
last_chunk: 0,
|
||||
chunk_offset: 0,
|
||||
chunk_count: 0,
|
||||
@ -69,7 +69,8 @@ impl Write for ChunkWriter {
|
||||
fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
|
||||
let chunker = &mut self.chunker;
|
||||
|
||||
let pos = chunker.scan(data);
|
||||
let ctx = pbs_datastore::chunker::Context::default();
|
||||
let pos = chunker.scan(data, &ctx);
|
||||
|
||||
if pos > 0 {
|
||||
self.chunk_offset += pos;
|
||||
|
@ -1,6 +1,6 @@
|
||||
extern crate proxmox_backup;
|
||||
|
||||
use pbs_datastore::Chunker;
|
||||
use pbs_datastore::{Chunker, ChunkerImpl};
|
||||
|
||||
fn main() {
|
||||
let mut buffer = Vec::new();
|
||||
@ -12,7 +12,7 @@ fn main() {
|
||||
buffer.push(byte);
|
||||
}
|
||||
}
|
||||
let mut chunker = Chunker::new(64 * 1024);
|
||||
let mut chunker = ChunkerImpl::new(64 * 1024);
|
||||
|
||||
let count = 5;
|
||||
|
||||
@ -23,8 +23,9 @@ fn main() {
|
||||
for _i in 0..count {
|
||||
let mut pos = 0;
|
||||
let mut _last = 0;
|
||||
let ctx = pbs_datastore::chunker::Context::default();
|
||||
while pos < buffer.len() {
|
||||
let k = chunker.scan(&buffer[pos..]);
|
||||
let k = chunker.scan(&buffer[pos..], &ctx);
|
||||
if k == 0 {
|
||||
//println!("LAST {}", pos);
|
||||
break;
|
||||
|
@ -1,9 +1,10 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Error;
|
||||
use futures::*;
|
||||
|
||||
extern crate proxmox_backup;
|
||||
|
||||
use pbs_client::ChunkStream;
|
||||
use proxmox_human_byte::HumanByte;
|
||||
|
||||
// Test Chunker with real data read from a file.
|
||||
//
|
||||
@ -21,12 +22,22 @@ fn main() {
|
||||
async fn run() -> Result<(), Error> {
|
||||
let file = tokio::fs::File::open("random-test.dat").await?;
|
||||
|
||||
let stream = tokio_util::codec::FramedRead::new(file, tokio_util::codec::BytesCodec::new())
|
||||
.map_ok(|bytes| bytes.to_vec())
|
||||
let mut args = std::env::args();
|
||||
args.next();
|
||||
|
||||
let buffer_size = args.next().unwrap_or("8k".to_string());
|
||||
let buffer_size = HumanByte::from_str(&buffer_size)?;
|
||||
println!("Using buffer size {buffer_size}");
|
||||
|
||||
let stream = tokio_util::codec::FramedRead::with_capacity(
|
||||
file,
|
||||
tokio_util::codec::BytesCodec::new(),
|
||||
buffer_size.as_u64() as usize,
|
||||
)
|
||||
.map_err(Error::from);
|
||||
|
||||
//let chunk_stream = FixedChunkStream::new(stream, 4*1024*1024);
|
||||
let mut chunk_stream = ChunkStream::new(stream, None);
|
||||
let mut chunk_stream = ChunkStream::new(stream, None, None, None);
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@ -40,7 +51,7 @@ async fn run() -> Result<(), Error> {
|
||||
repeat += 1;
|
||||
stream_len += chunk.len();
|
||||
|
||||
println!("Got chunk {}", chunk.len());
|
||||
//println!("Got chunk {}", chunk.len());
|
||||
}
|
||||
|
||||
let speed =
|
||||
|
@ -18,7 +18,7 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
let backup_time = proxmox_time::epoch_i64();
|
||||
|
||||
let client = BackupWriter::start(
|
||||
client,
|
||||
&client,
|
||||
None,
|
||||
datastore,
|
||||
&BackupNamespace::root(),
|
||||
|
@ -9,13 +9,13 @@ description = "general API type helpers for PBS"
|
||||
anyhow.workspace = true
|
||||
const_format.workspace = true
|
||||
hex.workspace = true
|
||||
lazy_static.workspace = true
|
||||
percent-encoding.workspace = true
|
||||
regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_plain.workspace = true
|
||||
|
||||
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
|
||||
proxmox-apt-api-types.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
|
@ -223,7 +223,7 @@ pub enum Role {
|
||||
RemoteAudit = ROLE_REMOTE_AUDIT,
|
||||
/// Remote Administrator
|
||||
RemoteAdmin = ROLE_REMOTE_ADMIN,
|
||||
/// Syncronisation Opertator
|
||||
/// Synchronization Operator
|
||||
RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR,
|
||||
/// Tape Auditor
|
||||
TapeAudit = ROLE_TAPE_AUDIT,
|
||||
|
98
pbs-api-types/src/ad.rs
Normal file
98
pbs-api-types/src/ad.rs
Normal file
@ -0,0 +1,98 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::{api, Updater};
|
||||
|
||||
use super::{
|
||||
LdapMode, LDAP_DOMAIN_SCHEMA, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
SYNC_ATTRIBUTES_SCHEMA, SYNC_DEFAULTS_STRING_SCHEMA, USER_CLASSES_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"realm": {
|
||||
schema: REALM_ID_SCHEMA,
|
||||
},
|
||||
"comment": {
|
||||
optional: true,
|
||||
schema: SINGLE_LINE_COMMENT_SCHEMA,
|
||||
},
|
||||
"verify": {
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"sync-defaults-options": {
|
||||
schema: SYNC_DEFAULTS_STRING_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"sync-attributes": {
|
||||
schema: SYNC_ATTRIBUTES_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"user-classes" : {
|
||||
optional: true,
|
||||
schema: USER_CLASSES_SCHEMA,
|
||||
},
|
||||
"base-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"bind-dn" : {
|
||||
schema: LDAP_DOMAIN_SCHEMA,
|
||||
optional: true,
|
||||
}
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// AD realm configuration properties.
|
||||
pub struct AdRealmConfig {
|
||||
#[updater(skip)]
|
||||
pub realm: String,
|
||||
/// AD server address
|
||||
pub server1: String,
|
||||
/// Fallback AD server address
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server2: Option<String>,
|
||||
/// AD server Port
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub port: Option<u16>,
|
||||
/// Base domain name. Users are searched under this domain using a `subtree search`.
|
||||
/// Expected to be set only internally to `defaultNamingContext` of the AD server, but can be
|
||||
/// overridden if the need arises.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub base_dn: Option<String>,
|
||||
/// Comment
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
/// Connection security
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mode: Option<LdapMode>,
|
||||
/// Verify server certificate
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub verify: Option<bool>,
|
||||
/// CA certificate to use for the server. The path can point to
|
||||
/// either a file, or a directory. If it points to a file,
|
||||
/// the PEM-formatted X.509 certificate stored at the path
|
||||
/// will be added as a trusted certificate.
|
||||
/// If the path points to a directory,
|
||||
/// the directory replaces the system's default certificate
|
||||
/// store at `/etc/ssl/certs` - Every file in the directory
|
||||
/// will be loaded as a trusted certificate.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub capath: Option<String>,
|
||||
/// Bind domain to use for looking up users
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bind_dn: Option<String>,
|
||||
/// Custom LDAP search filter for user sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub filter: Option<String>,
|
||||
/// Default options for AD sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_defaults_options: Option<String>,
|
||||
/// List of LDAP attributes to sync from AD to user config
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub sync_attributes: Option<String>,
|
||||
/// User ``objectClass`` classes to sync
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_classes: Option<String>,
|
||||
}
|
@ -11,8 +11,8 @@ use proxmox_schema::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, BACKUP_ID_RE,
|
||||
BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||
Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, MaintenanceType, Userid,
|
||||
BACKUP_ID_RE, BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA,
|
||||
GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
SNAPSHOT_PATH_REGEX_STR, UPID,
|
||||
@ -309,6 +309,10 @@ pub struct DataStoreConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notify: Option<String>,
|
||||
|
||||
/// Opt in to the new notification system
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notification_mode: Option<NotificationMode>,
|
||||
|
||||
/// Datastore tuning options
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub tuning: Option<String>,
|
||||
@ -318,6 +322,23 @@ pub struct DataStoreConfig {
|
||||
pub maintenance_mode: Option<String>,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq, Default)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Configure how notifications for this datastore should be sent.
|
||||
/// `legacy-sendmail` sends email notifications to the user configured
|
||||
/// in `notify-user` via the system's `sendmail` executable.
|
||||
/// `notification-system` emits matchable notification events to the
|
||||
/// notification system.
|
||||
pub enum NotificationMode {
|
||||
/// Send notifications via the system's sendmail command to the user
|
||||
/// configured in `notify-user`
|
||||
#[default]
|
||||
LegacySendmail,
|
||||
/// Emit notification events to the notification system
|
||||
NotificationSystem,
|
||||
}
|
||||
|
||||
impl DataStoreConfig {
|
||||
pub fn new(name: String, path: String) -> Self {
|
||||
Self {
|
||||
@ -330,16 +351,51 @@ impl DataStoreConfig {
|
||||
verify_new: None,
|
||||
notify_user: None,
|
||||
notify: None,
|
||||
notification_mode: None,
|
||||
tuning: None,
|
||||
maintenance_mode: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_maintenance_mode(&self) -> Option<MaintenanceMode> {
|
||||
self.maintenance_mode
|
||||
.as_ref()
|
||||
.and_then(|str| MaintenanceMode::API_SCHEMA.parse_property_string(str).ok())
|
||||
.and_then(|value| MaintenanceMode::deserialize(value).ok())
|
||||
self.maintenance_mode.as_ref().and_then(|str| {
|
||||
MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new(
|
||||
str,
|
||||
&MaintenanceMode::API_SCHEMA,
|
||||
))
|
||||
.ok()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_maintenance_mode(&mut self, new_mode: Option<MaintenanceMode>) -> Result<(), Error> {
|
||||
let current_type = self.get_maintenance_mode().map(|mode| mode.ty);
|
||||
let new_type = new_mode.as_ref().map(|mode| mode.ty);
|
||||
|
||||
match current_type {
|
||||
Some(MaintenanceType::ReadOnly) => { /* always OK */ }
|
||||
Some(MaintenanceType::Offline) => { /* always OK */ }
|
||||
Some(MaintenanceType::Delete) => {
|
||||
match new_type {
|
||||
Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ }
|
||||
_ => {
|
||||
bail!("datastore is being deleted")
|
||||
}
|
||||
}
|
||||
}
|
||||
None => { /* always OK */ }
|
||||
}
|
||||
|
||||
let new_mode = match new_mode {
|
||||
Some(new_mode) => Some(
|
||||
proxmox_schema::property_string::PropertyString::new(new_mode)
|
||||
.to_property_string()?,
|
||||
),
|
||||
None => None,
|
||||
};
|
||||
|
||||
self.maintenance_mode = new_mode;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -1246,7 +1302,7 @@ pub struct TypeCounts {
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Clone, Default, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Garbage collection status.
|
||||
pub struct GarbageCollectionStatus {
|
||||
@ -1273,6 +1329,38 @@ pub struct GarbageCollectionStatus {
|
||||
pub still_bad: usize,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"status": {
|
||||
type: GarbageCollectionStatus,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Garbage Collection general info
|
||||
pub struct GarbageCollectionJobStatus {
|
||||
/// Datastore
|
||||
pub store: String,
|
||||
#[serde(flatten)]
|
||||
pub status: GarbageCollectionStatus,
|
||||
/// Schedule of the gc job
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub schedule: Option<String>,
|
||||
/// Time of the next gc run
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub next_run: Option<i64>,
|
||||
/// Endtime of the last gc run
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_endtime: Option<i64>,
|
||||
/// State of the last gc run
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub last_run_state: Option<String>,
|
||||
/// Duration of last gc run
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub duration: Option<i64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"gc-status": {
|
||||
|
@ -8,9 +8,9 @@ use serde::{Deserialize, Serialize};
|
||||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
|
||||
BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA,
|
||||
MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
||||
Authid, BackupNamespace, BackupType, NotificationMode, RateLimitConfig, Userid,
|
||||
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
|
||||
PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
@ -325,6 +325,8 @@ pub struct TapeBackupJobSetup {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notify_user: Option<Userid>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub notification_mode: Option<NotificationMode>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub group_filter: Option<Vec<GroupFilter>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
pub ns: Option<BackupNamespace>,
|
||||
|
@ -149,7 +149,7 @@ pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults
|
||||
.schema();
|
||||
|
||||
const REMOVE_VANISHED_DESCRIPTION: &str =
|
||||
"A semicolon-seperated list of things to remove when they or the user \
|
||||
"A semicolon-separated list of things to remove when they or the user \
|
||||
vanishes during user synchronization. The following values are possible: ``entry`` removes the \
|
||||
user when not returned from the sync; ``properties`` removes any \
|
||||
properties on existing user that do not appear in the source. \
|
||||
|
@ -52,6 +52,13 @@ pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
|
||||
|
||||
use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
|
||||
|
||||
// re-export APT API types
|
||||
pub use proxmox_apt_api_types::{
|
||||
APTChangeRepositoryOptions, APTGetChangelogOptions, APTRepositoriesResult, APTRepositoryFile,
|
||||
APTRepositoryFileError, APTRepositoryHandle, APTRepositoryInfo, APTStandardRepository,
|
||||
APTUpdateInfo, APTUpdateOptions,
|
||||
};
|
||||
|
||||
#[rustfmt::skip]
|
||||
pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
|
||||
|
||||
@ -130,6 +137,9 @@ pub use openid::*;
|
||||
mod ldap;
|
||||
pub use ldap::*;
|
||||
|
||||
mod ad;
|
||||
pub use ad::*;
|
||||
|
||||
mod remote;
|
||||
pub use remote::*;
|
||||
|
||||
@ -246,34 +256,6 @@ pub const PASSWORD_HINT_SCHEMA: Schema = StringSchema::new("Password hint.")
|
||||
.max_length(64)
|
||||
.schema();
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "PascalCase")]
|
||||
/// Describes a package for which an update is available.
|
||||
pub struct APTUpdateInfo {
|
||||
/// Package name
|
||||
pub package: String,
|
||||
/// Package title
|
||||
pub title: String,
|
||||
/// Package architecture
|
||||
pub arch: String,
|
||||
/// Human readable package description
|
||||
pub description: String,
|
||||
/// New version to be updated to
|
||||
pub version: String,
|
||||
/// Old version currently installed
|
||||
pub old_version: String,
|
||||
/// Package origin
|
||||
pub origin: String,
|
||||
/// Package priority in human-readable form
|
||||
pub priority: String,
|
||||
/// Package section
|
||||
pub section: String,
|
||||
/// Custom extra field for additional package information
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub extra_info: Option<String>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -335,36 +317,6 @@ pub const NODE_TASKS_LIST_TASKS_RETURN_TYPE: ReturnType = ReturnType {
|
||||
schema: &ArraySchema::new("A list of tasks.", &TaskListItem::API_SCHEMA).schema(),
|
||||
};
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
/// RRD consolidation mode
|
||||
pub enum RRDMode {
|
||||
/// Maximum
|
||||
Max,
|
||||
/// Average
|
||||
Average,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// RRD time frame
|
||||
pub enum RRDTimeFrame {
|
||||
/// Hour
|
||||
Hour,
|
||||
/// Day
|
||||
Day,
|
||||
/// Week
|
||||
Week,
|
||||
/// Month
|
||||
Month,
|
||||
/// Year
|
||||
Year,
|
||||
/// Decade (10 years)
|
||||
Decade,
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize, Copy, Clone, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
@ -378,8 +330,13 @@ pub enum RealmType {
|
||||
OpenId,
|
||||
/// An LDAP realm
|
||||
Ldap,
|
||||
/// An Active Directory (AD) realm
|
||||
Ad,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(RealmType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(RealmType);
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
realm: {
|
||||
|
@ -33,7 +33,7 @@ pub enum Operation {
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Maintenance type.
|
||||
pub enum MaintenanceType {
|
||||
@ -69,11 +69,11 @@ serde_plain::derive_fromstr_from_deserialize!(MaintenanceType);
|
||||
pub struct MaintenanceMode {
|
||||
/// Type of maintenance ("read-only" or "offline").
|
||||
#[serde(rename = "type")]
|
||||
ty: MaintenanceType,
|
||||
pub ty: MaintenanceType,
|
||||
|
||||
/// Reason for maintenance.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
message: Option<String>,
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
impl MaintenanceMode {
|
||||
|
@ -12,14 +12,12 @@ pub const METRIC_SERVER_ID_SCHEMA: Schema = StringSchema::new("Metrics Server ID
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_BUCKET_SCHEMA: Schema = StringSchema::new("InfluxDB Bucket.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
.schema();
|
||||
|
||||
pub const INFLUXDB_ORGANIZATION_SCHEMA: Schema = StringSchema::new("InfluxDB Organization.")
|
||||
.format(&PROXMOX_SAFE_ID_FORMAT)
|
||||
.min_length(3)
|
||||
.max_length(32)
|
||||
.default("proxmox")
|
||||
@ -129,13 +127,14 @@ pub struct InfluxDbHttp {
|
||||
pub enable: bool,
|
||||
/// The base url of the influxdb server
|
||||
pub url: String,
|
||||
/// The Optional Token
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) API token
|
||||
pub token: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Named location where time series data is stored
|
||||
pub bucket: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Workspace for a group of users
|
||||
pub organization: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// The (optional) maximum body size
|
||||
@ -188,3 +187,69 @@ pub struct MetricServerInfo {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub comment: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[api(
|
||||
properties: {
|
||||
data: {
|
||||
type: Array,
|
||||
items: {
|
||||
type: MetricDataPoint,
|
||||
}
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Return type for the metric API endpoint
|
||||
pub struct Metrics {
|
||||
/// List of metric data points, sorted by timestamp
|
||||
pub data: Vec<MetricDataPoint>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
id: {
|
||||
type: String,
|
||||
},
|
||||
metric: {
|
||||
type: String,
|
||||
},
|
||||
timestamp: {
|
||||
type: Integer,
|
||||
},
|
||||
},
|
||||
)]
|
||||
/// Metric data point
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct MetricDataPoint {
|
||||
/// Unique identifier for this metric object, for instance 'node/<nodename>'
|
||||
/// or 'qemu/<vmid>'.
|
||||
pub id: String,
|
||||
|
||||
/// Name of the metric.
|
||||
pub metric: String,
|
||||
|
||||
/// Time at which this metric was observed
|
||||
pub timestamp: i64,
|
||||
|
||||
#[serde(rename = "type")]
|
||||
pub ty: MetricDataType,
|
||||
|
||||
/// Metric value.
|
||||
pub value: f64,
|
||||
}
|
||||
|
||||
#[api]
|
||||
/// Type of the metric.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum MetricDataType {
|
||||
/// gauge.
|
||||
Gauge,
|
||||
/// counter.
|
||||
Counter,
|
||||
/// derive.
|
||||
Derive,
|
||||
}
|
||||
|
||||
serde_plain::derive_display_from_serialize!(MetricDataType);
|
||||
serde_plain::derive_fromstr_from_deserialize!(MetricDataType);
|
||||
|
@ -224,6 +224,15 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
|
||||
schema: NETWORK_INTERFACE_ARRAY_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-id": {
|
||||
description: "VLAN ID.",
|
||||
type: u16,
|
||||
optional: true,
|
||||
},
|
||||
"vlan-raw-device": {
|
||||
schema: NETWORK_INTERFACE_NAME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
bond_mode: {
|
||||
type: LinuxBondMode,
|
||||
optional: true,
|
||||
@ -287,6 +296,12 @@ pub struct Interface {
|
||||
/// Enable bridge vlan support.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub bridge_vlan_aware: Option<bool>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-id")]
|
||||
pub vlan_id: Option<u16>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(rename = "vlan-raw-device")]
|
||||
pub vlan_raw_device: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub slaves: Option<Vec<String>>,
|
||||
@ -319,6 +334,8 @@ impl Interface {
|
||||
mtu: None,
|
||||
bridge_ports: None,
|
||||
bridge_vlan_aware: None,
|
||||
vlan_id: None,
|
||||
vlan_raw_device: None,
|
||||
slaves: None,
|
||||
bond_mode: None,
|
||||
bond_primary: None,
|
||||
|
@ -93,6 +93,9 @@ pub struct DriveListEntry {
|
||||
/// the state of the drive if locked
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub state: Option<String>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
@ -216,6 +219,9 @@ pub struct LtoDriveAndMediaStatus {
|
||||
/// Estimated tape wearout factor (assuming max. 16000 end-to-end passes)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub medium_wearout: Option<f64>,
|
||||
/// Current device activity
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub drive_activity: Option<DeviceActivity>,
|
||||
}
|
||||
|
||||
#[api()]
|
||||
@ -276,3 +282,68 @@ pub struct Lp17VolumeStatistics {
|
||||
/// Volume serial number
|
||||
pub serial: String,
|
||||
}
|
||||
|
||||
/// The DT Device Activity from DT Device Status LP page
|
||||
#[api]
|
||||
#[derive(Copy, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum DeviceActivity {
|
||||
/// No activity
|
||||
NoActivity,
|
||||
/// Cleaning
|
||||
Cleaning,
|
||||
/// Loading
|
||||
Loading,
|
||||
/// Unloading
|
||||
Unloading,
|
||||
/// Other unspecified activity
|
||||
Other,
|
||||
/// Reading
|
||||
Reading,
|
||||
/// Writing
|
||||
Writing,
|
||||
/// Locating
|
||||
Locating,
|
||||
/// Rewinding
|
||||
Rewinding,
|
||||
/// Erasing
|
||||
Erasing,
|
||||
/// Formatting
|
||||
Formatting,
|
||||
/// Calibrating
|
||||
Calibrating,
|
||||
/// Other (DT)
|
||||
OtherDT,
|
||||
/// Updating microcode
|
||||
MicrocodeUpdate,
|
||||
/// Reading encrypted data
|
||||
ReadingEncrypted,
|
||||
/// Writing encrypted data
|
||||
WritingEncrypted,
|
||||
}
|
||||
|
||||
impl TryFrom<u8> for DeviceActivity {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
Ok(match value {
|
||||
0x00 => DeviceActivity::NoActivity,
|
||||
0x01 => DeviceActivity::Cleaning,
|
||||
0x02 => DeviceActivity::Loading,
|
||||
0x03 => DeviceActivity::Unloading,
|
||||
0x04 => DeviceActivity::Other,
|
||||
0x05 => DeviceActivity::Reading,
|
||||
0x06 => DeviceActivity::Writing,
|
||||
0x07 => DeviceActivity::Locating,
|
||||
0x08 => DeviceActivity::Rewinding,
|
||||
0x09 => DeviceActivity::Erasing,
|
||||
0x0A => DeviceActivity::Formatting,
|
||||
0x0B => DeviceActivity::Calibrating,
|
||||
0x0C => DeviceActivity::OtherDT,
|
||||
0x0D => DeviceActivity::MicrocodeUpdate,
|
||||
0x0E => DeviceActivity::ReadingEncrypted,
|
||||
0x0F => DeviceActivity::WritingEncrypted,
|
||||
other => bail!("invalid DT device activity value: {:x}", other),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -81,6 +81,9 @@ pub struct MediaListEntry {
|
||||
/// Media Pool
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub pool: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// Bytes currently used
|
||||
pub bytes_used: Option<u64>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -1,7 +1,7 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
|
||||
use proxmox_schema::{api, ApiType, Schema, StringSchema, Updater};
|
||||
|
||||
use crate::{
|
||||
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
@ -18,16 +18,6 @@ pub const TRAFFIC_CONTROL_ID_SCHEMA: Schema = StringSchema::new("Rule ID.")
|
||||
.max_length(32)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_RATE_SCHEMA: Schema =
|
||||
IntegerSchema::new("Rate limit (for Token bucket filter) in bytes/second.")
|
||||
.minimum(100_000)
|
||||
.schema();
|
||||
|
||||
pub const TRAFFIC_CONTROL_BURST_SCHEMA: Schema =
|
||||
IntegerSchema::new("Size of the token bucket (for Token bucket filter) in bytes.")
|
||||
.minimum(1000)
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"rate-in": {
|
||||
@ -71,6 +61,45 @@ impl RateLimitConfig {
|
||||
burst_out: burst,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a [RateLimitConfig] from a [ClientRateLimitConfig]
|
||||
pub fn from_client_config(limit: ClientRateLimitConfig) -> Self {
|
||||
Self::with_same_inout(limit.rate, limit.burst)
|
||||
}
|
||||
}
|
||||
|
||||
const CLIENT_RATE_LIMIT_SCHEMA: Schema = StringSchema {
|
||||
description: "Rate limit (for Token bucket filter) in bytes/s with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
|
||||
..*HumanByte::API_SCHEMA.unwrap_string_schema()
|
||||
}
|
||||
.schema();
|
||||
|
||||
const CLIENT_BURST_SCHEMA: Schema = StringSchema {
|
||||
description: "Size of the token bucket (for Token bucket filter) in bytes with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
|
||||
..*HumanByte::API_SCHEMA.unwrap_string_schema()
|
||||
}
|
||||
.schema();
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
rate: {
|
||||
schema: CLIENT_RATE_LIMIT_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
burst: {
|
||||
schema: CLIENT_BURST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Default, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// Client Rate Limit Configuration
|
||||
pub struct ClientRateLimitConfig {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
rate: Option<HumanByte>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
burst: Option<HumanByte>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -1,5 +1,11 @@
|
||||
//! Exports configuration data from the build system
|
||||
|
||||
pub const PROXMOX_BACKUP_CRATE_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
// TODO: clean-up, drop the RELEASE one, should not be required on its own and if it would be just
|
||||
// the X.Y part, also add the Debian package revision (extracted through build.rs) in an existing
|
||||
// or new constant.
|
||||
|
||||
pub const PROXMOX_PKG_VERSION: &str = concat!(
|
||||
env!("CARGO_PKG_VERSION_MAJOR"),
|
||||
".",
|
||||
@ -90,6 +96,8 @@ pub const PROXMOX_BACKUP_KERNEL_FN: &str =
|
||||
|
||||
pub const PROXMOX_BACKUP_SUBSCRIPTION_FN: &str = configdir!("/subscription");
|
||||
|
||||
pub const APT_PKG_STATE_FN: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/pkg-state.json");
|
||||
|
||||
/// Prepend configuration directory to a file name
|
||||
///
|
||||
/// This is a simply way to get the full path for configuration files.
|
||||
|
@ -14,7 +14,6 @@ h2.workspace = true
|
||||
hex.workspace = true
|
||||
http.workspace = true
|
||||
hyper.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
@ -39,7 +38,6 @@ proxmox-compression.workspace = true
|
||||
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io = { workspace = true, features = [ "tokio" ] }
|
||||
proxmox-lang.workspace = true
|
||||
proxmox-router = { workspace = true, features = [ "cli", "server" ] }
|
||||
proxmox-schema.workspace = true
|
||||
proxmox-sys.workspace = true
|
||||
@ -48,6 +46,5 @@ proxmox-time.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
pbs-api-types.workspace = true
|
||||
pbs-buildcfg.workspace = true
|
||||
pbs-datastore.workspace = true
|
||||
pbs-tools.workspace = true
|
||||
|
@ -1,7 +1,6 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use std::fs::File;
|
||||
use std::io::{Seek, SeekFrom, Write};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::future::AbortHandle;
|
||||
@ -141,18 +140,14 @@ impl BackupReader {
|
||||
|
||||
/// Download a .blob file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The data is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The data is verified using the provided manifest.
|
||||
pub async fn download_blob(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
) -> Result<DataBlobReader<'_, File>, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
@ -167,18 +162,14 @@ impl BackupReader {
|
||||
|
||||
/// Download dynamic index file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The index is verified using the provided manifest.
|
||||
pub async fn download_dynamic_index(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
@ -194,18 +185,14 @@ impl BackupReader {
|
||||
|
||||
/// Download fixed index file
|
||||
///
|
||||
/// This creates a temporary file in /tmp (using O_TMPFILE). The index is verified using
|
||||
/// the provided manifest.
|
||||
/// This creates a temporary file (See [`crate::tools::create_tmp_file`] for
|
||||
/// details). The index is verified using the provided manifest.
|
||||
pub async fn download_fixed_index(
|
||||
&self,
|
||||
manifest: &BackupManifest,
|
||||
name: &str,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
self.download(name, &mut tmpfile).await?;
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
use anyhow::{bail, Error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_schema::*;
|
||||
|
||||
@ -45,3 +46,28 @@ pub fn parse_backup_specification(value: &str) -> Result<BackupSpecification, Er
|
||||
|
||||
bail!("unable to parse backup source specification '{}'", value);
|
||||
}
|
||||
|
||||
#[api]
|
||||
#[derive(Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
/// Mode to detect file changes since last backup run
|
||||
pub enum BackupDetectionMode {
|
||||
/// Encode backup as self contained pxar archive
|
||||
#[default]
|
||||
Legacy,
|
||||
/// Split backup mode, re-encode payload data
|
||||
Data,
|
||||
/// Compare metadata, reuse payload chunks if metadata unchanged
|
||||
Metadata,
|
||||
}
|
||||
|
||||
impl BackupDetectionMode {
|
||||
/// Selected mode is data based file change detection with split meta/payload streams
|
||||
pub fn is_data(&self) -> bool {
|
||||
matches!(self, Self::Data)
|
||||
}
|
||||
/// Selected mode is metadata based file change detection
|
||||
pub fn is_metadata(&self) -> bool {
|
||||
matches!(self, Self::Metadata)
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
use std::collections::HashSet;
|
||||
use std::future::Future;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@ -22,7 +21,9 @@ use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_time::TimeSpan;
|
||||
|
||||
use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo};
|
||||
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
|
||||
|
||||
use super::{H2Client, HttpClient};
|
||||
@ -56,14 +57,21 @@ pub struct UploadOptions {
|
||||
struct UploadStats {
|
||||
chunk_count: usize,
|
||||
chunk_reused: usize,
|
||||
chunk_injected: usize,
|
||||
size: usize,
|
||||
size_reused: usize,
|
||||
size_injected: usize,
|
||||
size_compressed: usize,
|
||||
duration: std::time::Duration,
|
||||
csum: [u8; 32],
|
||||
}
|
||||
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
|
||||
struct ChunkUploadResponse {
|
||||
future: h2::client::ResponseFuture,
|
||||
size: usize,
|
||||
}
|
||||
|
||||
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<ChunkUploadResponse>)>;
|
||||
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
|
||||
|
||||
impl BackupWriter {
|
||||
@ -78,7 +86,7 @@ impl BackupWriter {
|
||||
// FIXME: extract into (flattened) parameter struct?
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn start(
|
||||
client: HttpClient,
|
||||
client: &HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
ns: &BackupNamespace,
|
||||
@ -265,6 +273,7 @@ impl BackupWriter {
|
||||
archive_name: &str,
|
||||
stream: impl Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
options: UploadOptions,
|
||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||
) -> Result<BackupStats, Error> {
|
||||
let known_chunks = Arc::new(Mutex::new(HashSet::new()));
|
||||
|
||||
@ -329,6 +338,12 @@ impl BackupWriter {
|
||||
.as_u64()
|
||||
.unwrap();
|
||||
|
||||
let archive = if log::log_enabled!(log::Level::Debug) {
|
||||
archive_name
|
||||
} else {
|
||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||
};
|
||||
|
||||
let upload_stats = Self::upload_chunk_info_stream(
|
||||
self.h2.clone(),
|
||||
wid,
|
||||
@ -341,16 +356,21 @@ impl BackupWriter {
|
||||
None
|
||||
},
|
||||
options.compress,
|
||||
injections,
|
||||
archive,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let size_dirty = upload_stats.size - upload_stats.size_reused;
|
||||
let size: HumanByte = upload_stats.size.into();
|
||||
let archive = if log::log_enabled!(log::Level::Debug) {
|
||||
archive_name
|
||||
} else {
|
||||
pbs_tools::format::strip_server_file_extension(archive_name)
|
||||
};
|
||||
|
||||
if upload_stats.chunk_injected > 0 {
|
||||
log::info!(
|
||||
"{archive}: reused {} from previous snapshot for unchanged files ({} chunks)",
|
||||
HumanByte::from(upload_stats.size_injected),
|
||||
upload_stats.chunk_injected,
|
||||
);
|
||||
}
|
||||
|
||||
if archive_name != CATALOG_NAME {
|
||||
let speed: HumanByte =
|
||||
@ -358,14 +378,9 @@ impl BackupWriter {
|
||||
let size_dirty: HumanByte = size_dirty.into();
|
||||
let size_compressed: HumanByte = upload_stats.size_compressed.into();
|
||||
log::info!(
|
||||
"{}: had to backup {} of {} (compressed {}) in {:.2}s",
|
||||
archive,
|
||||
size_dirty,
|
||||
size,
|
||||
size_compressed,
|
||||
"{archive}: had to backup {size_dirty} of {size} (compressed {size_compressed}) in {:.2} s (average {speed}/s)",
|
||||
upload_stats.duration.as_secs_f64()
|
||||
);
|
||||
log::info!("{}: average backup speed: {}/s", archive, speed);
|
||||
} else {
|
||||
log::info!("Uploaded backup catalog ({})", size);
|
||||
}
|
||||
@ -455,6 +470,7 @@ impl BackupWriter {
|
||||
h2: H2Client,
|
||||
wid: u64,
|
||||
path: String,
|
||||
uploaded: Arc<AtomicUsize>,
|
||||
) -> (UploadQueueSender, UploadResultReceiver) {
|
||||
let (verify_queue_tx, verify_queue_rx) = mpsc::channel(64);
|
||||
let (verify_result_tx, verify_result_rx) = oneshot::channel();
|
||||
@ -463,15 +479,21 @@ impl BackupWriter {
|
||||
tokio::spawn(
|
||||
ReceiverStream::new(verify_queue_rx)
|
||||
.map(Ok::<_, Error>)
|
||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<h2::client::ResponseFuture>)| {
|
||||
.and_then(move |(merged_chunk_info, response): (MergedChunkInfo, Option<ChunkUploadResponse>)| {
|
||||
match (response, merged_chunk_info) {
|
||||
(Some(response), MergedChunkInfo::Known(list)) => {
|
||||
Either::Left(
|
||||
response
|
||||
.future
|
||||
.map_err(Error::from)
|
||||
.and_then(H2Client::h2api_response)
|
||||
.and_then(move |_result| {
|
||||
.and_then({
|
||||
let uploaded = uploaded.clone();
|
||||
move |_result| {
|
||||
// account for uploaded bytes for progress output
|
||||
uploaded.fetch_add(response.size, Ordering::SeqCst);
|
||||
future::ok(MergedChunkInfo::Known(list))
|
||||
}
|
||||
})
|
||||
)
|
||||
}
|
||||
@ -523,11 +545,7 @@ impl BackupWriter {
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<FixedIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2
|
||||
@ -562,20 +580,15 @@ impl BackupWriter {
|
||||
manifest: &BackupManifest,
|
||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<DynamicIndexReader, Error> {
|
||||
let mut tmpfile = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE)
|
||||
.open("/tmp")?;
|
||||
let mut tmpfile = crate::tools::create_tmp_file()?;
|
||||
|
||||
let param = json!({ "archive-name": archive_name });
|
||||
self.h2
|
||||
.download("previous", Some(param), &mut tmpfile)
|
||||
.await?;
|
||||
|
||||
let index = DynamicIndexReader::new(tmpfile).map_err(|err| {
|
||||
format_err!("unable to read dynmamic index '{}' - {}", archive_name, err)
|
||||
})?;
|
||||
let index = DynamicIndexReader::new(tmpfile)
|
||||
.map_err(|err| format_err!("unable to read dynamic index '{archive_name}' - {err}"))?;
|
||||
// Note: do not use values stored in index (not trusted) - instead, computed them again
|
||||
let (csum, size) = index.compute_csum();
|
||||
manifest.verify_file(archive_name, &csum, size)?;
|
||||
@ -637,33 +650,85 @@ impl BackupWriter {
|
||||
known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
compress: bool,
|
||||
injections: Option<std::sync::mpsc::Receiver<InjectChunks>>,
|
||||
archive: &str,
|
||||
) -> impl Future<Output = Result<UploadStats, Error>> {
|
||||
let total_chunks = Arc::new(AtomicUsize::new(0));
|
||||
let total_chunks2 = total_chunks.clone();
|
||||
let known_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||
let known_chunk_count2 = known_chunk_count.clone();
|
||||
let injected_chunk_count = Arc::new(AtomicUsize::new(0));
|
||||
let injected_chunk_count2 = injected_chunk_count.clone();
|
||||
|
||||
let stream_len = Arc::new(AtomicUsize::new(0));
|
||||
let stream_len2 = stream_len.clone();
|
||||
let stream_len3 = stream_len.clone();
|
||||
let compressed_stream_len = Arc::new(AtomicU64::new(0));
|
||||
let compressed_stream_len2 = compressed_stream_len.clone();
|
||||
let reused_len = Arc::new(AtomicUsize::new(0));
|
||||
let reused_len2 = reused_len.clone();
|
||||
let injected_len = Arc::new(AtomicUsize::new(0));
|
||||
let injected_len2 = injected_len.clone();
|
||||
let uploaded_len = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let append_chunk_path = format!("{}_index", prefix);
|
||||
let upload_chunk_path = format!("{}_chunk", prefix);
|
||||
let is_fixed_chunk_size = prefix == "fixed";
|
||||
|
||||
let (upload_queue, upload_result) =
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path);
|
||||
Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone());
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new())));
|
||||
let index_csum_2 = index_csum.clone();
|
||||
|
||||
let progress_handle = if archive.ends_with(".img")
|
||||
|| archive.ends_with(".pxar")
|
||||
|| archive.ends_with(".ppxar")
|
||||
{
|
||||
Some(tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
|
||||
|
||||
let size = HumanByte::from(stream_len3.load(Ordering::SeqCst));
|
||||
let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst));
|
||||
let elapsed = TimeSpan::from(start_time.elapsed());
|
||||
|
||||
log::info!("processed {size} in {elapsed}, uploaded {size_uploaded}");
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
stream
|
||||
.and_then(move |data| {
|
||||
.inject_reused_chunks(injections, stream_len.clone())
|
||||
.and_then(move |chunk_info| match chunk_info {
|
||||
InjectedChunksInfo::Known(chunks) => {
|
||||
// account for injected chunks
|
||||
let count = chunks.len();
|
||||
total_chunks.fetch_add(count, Ordering::SeqCst);
|
||||
injected_chunk_count.fetch_add(count, Ordering::SeqCst);
|
||||
|
||||
let mut known = Vec::new();
|
||||
let mut guard = index_csum.lock().unwrap();
|
||||
let csum = guard.as_mut().unwrap();
|
||||
for chunk in chunks {
|
||||
let offset =
|
||||
stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64;
|
||||
reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst);
|
||||
let digest = chunk.digest();
|
||||
known.push((offset, digest));
|
||||
let end_offset = offset + chunk.size();
|
||||
csum.update(&end_offset.to_le_bytes());
|
||||
csum.update(&digest);
|
||||
}
|
||||
future::ok(MergedChunkInfo::Known(known))
|
||||
}
|
||||
InjectedChunksInfo::Raw(data) => {
|
||||
// account for not injected chunks (new and known)
|
||||
let chunk_len = data.len();
|
||||
|
||||
total_chunks.fetch_add(1, Ordering::SeqCst);
|
||||
@ -706,6 +771,7 @@ impl BackupWriter {
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
})
|
||||
.merge_known_chunks()
|
||||
.try_for_each(move |merged_chunk_info| {
|
||||
@ -747,7 +813,13 @@ impl BackupWriter {
|
||||
Either::Left(h2.send_request(request, upload_data).and_then(
|
||||
move |response| async move {
|
||||
upload_queue
|
||||
.send((new_info, Some(response)))
|
||||
.send((
|
||||
new_info,
|
||||
Some(ChunkUploadResponse {
|
||||
future: response,
|
||||
size: chunk_info.chunk_len as usize,
|
||||
}),
|
||||
))
|
||||
.await
|
||||
.map_err(|err| {
|
||||
format_err!("failed to send to upload queue: {}", err)
|
||||
@ -768,18 +840,26 @@ impl BackupWriter {
|
||||
let duration = start_time.elapsed();
|
||||
let chunk_count = total_chunks2.load(Ordering::SeqCst);
|
||||
let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
|
||||
let chunk_injected = injected_chunk_count2.load(Ordering::SeqCst);
|
||||
let size = stream_len2.load(Ordering::SeqCst);
|
||||
let size_reused = reused_len2.load(Ordering::SeqCst);
|
||||
let size_injected = injected_len2.load(Ordering::SeqCst);
|
||||
let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize;
|
||||
|
||||
let mut guard = index_csum_2.lock().unwrap();
|
||||
let csum = guard.take().unwrap().finish();
|
||||
|
||||
if let Some(handle) = progress_handle {
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
futures::future::ok(UploadStats {
|
||||
chunk_count,
|
||||
chunk_reused,
|
||||
chunk_injected,
|
||||
size,
|
||||
size_reused,
|
||||
size_injected,
|
||||
size_compressed,
|
||||
duration,
|
||||
csum,
|
||||
|
@ -1,4 +1,5 @@
|
||||
use std::pin::Pin;
|
||||
use std::sync::mpsc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::Error;
|
||||
@ -6,23 +7,59 @@ use bytes::BytesMut;
|
||||
use futures::ready;
|
||||
use futures::stream::{Stream, TryStream};
|
||||
|
||||
use pbs_datastore::Chunker;
|
||||
use pbs_datastore::{Chunker, ChunkerImpl, PayloadChunker};
|
||||
|
||||
use crate::inject_reused_chunks::InjectChunks;
|
||||
|
||||
/// Holds the queues for optional injection of reused dynamic index entries
|
||||
pub struct InjectionData {
|
||||
boundaries: mpsc::Receiver<InjectChunks>,
|
||||
next_boundary: Option<InjectChunks>,
|
||||
injections: mpsc::Sender<InjectChunks>,
|
||||
}
|
||||
|
||||
impl InjectionData {
|
||||
pub fn new(
|
||||
boundaries: mpsc::Receiver<InjectChunks>,
|
||||
injections: mpsc::Sender<InjectChunks>,
|
||||
) -> Self {
|
||||
Self {
|
||||
boundaries,
|
||||
next_boundary: None,
|
||||
injections,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Split input stream into dynamic sized chunks
|
||||
pub struct ChunkStream<S: Unpin> {
|
||||
input: S,
|
||||
chunker: Chunker,
|
||||
chunker: Box<dyn Chunker + Send>,
|
||||
buffer: BytesMut,
|
||||
scan_pos: usize,
|
||||
consumed: u64,
|
||||
injection_data: Option<InjectionData>,
|
||||
}
|
||||
|
||||
impl<S: Unpin> ChunkStream<S> {
|
||||
pub fn new(input: S, chunk_size: Option<usize>) -> Self {
|
||||
pub fn new(
|
||||
input: S,
|
||||
chunk_size: Option<usize>,
|
||||
injection_data: Option<InjectionData>,
|
||||
suggested_boundaries: Option<mpsc::Receiver<u64>>,
|
||||
) -> Self {
|
||||
let chunk_size = chunk_size.unwrap_or(4 * 1024 * 1024);
|
||||
Self {
|
||||
input,
|
||||
chunker: Chunker::new(chunk_size.unwrap_or(4 * 1024 * 1024)),
|
||||
chunker: if let Some(suggested) = suggested_boundaries {
|
||||
Box::new(PayloadChunker::new(chunk_size, suggested))
|
||||
} else {
|
||||
Box::new(ChunkerImpl::new(chunk_size))
|
||||
},
|
||||
buffer: BytesMut::new(),
|
||||
scan_pos: 0,
|
||||
consumed: 0,
|
||||
injection_data,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -39,19 +76,87 @@ where
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
|
||||
loop {
|
||||
let ctx = pbs_datastore::chunker::Context {
|
||||
base: this.consumed,
|
||||
total: this.buffer.len() as u64,
|
||||
};
|
||||
|
||||
if let Some(InjectionData {
|
||||
boundaries,
|
||||
next_boundary,
|
||||
injections,
|
||||
}) = this.injection_data.as_mut()
|
||||
{
|
||||
if next_boundary.is_none() {
|
||||
if let Ok(boundary) = boundaries.try_recv() {
|
||||
*next_boundary = Some(boundary);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(inject) = next_boundary.take() {
|
||||
// require forced boundary, lookup next regular boundary
|
||||
let pos = if this.scan_pos < this.buffer.len() {
|
||||
this.chunker.scan(&this.buffer[this.scan_pos..], &ctx)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let chunk_boundary = if pos == 0 {
|
||||
this.consumed + this.buffer.len() as u64
|
||||
} else {
|
||||
this.consumed + (this.scan_pos + pos) as u64
|
||||
};
|
||||
|
||||
if inject.boundary <= chunk_boundary {
|
||||
// forced boundary is before next boundary, force within current buffer
|
||||
let chunk_size = (inject.boundary - this.consumed) as usize;
|
||||
let raw_chunk = this.buffer.split_to(chunk_size);
|
||||
this.chunker.reset();
|
||||
this.scan_pos = 0;
|
||||
|
||||
this.consumed += chunk_size as u64;
|
||||
|
||||
// add the size of the injected chunks to consumed, so chunk stream offsets
|
||||
// are in sync with the rest of the archive.
|
||||
this.consumed += inject.size as u64;
|
||||
|
||||
injections.send(inject).unwrap();
|
||||
|
||||
// the chunk can be empty, return nevertheless to allow the caller to
|
||||
// make progress by consuming from the injection queue
|
||||
return Poll::Ready(Some(Ok(raw_chunk)));
|
||||
} else if pos != 0 {
|
||||
*next_boundary = Some(inject);
|
||||
// forced boundary is after next boundary, split off chunk from buffer
|
||||
let chunk_size = this.scan_pos + pos;
|
||||
let raw_chunk = this.buffer.split_to(chunk_size);
|
||||
this.consumed += chunk_size as u64;
|
||||
this.scan_pos = 0;
|
||||
|
||||
return Poll::Ready(Some(Ok(raw_chunk)));
|
||||
} else {
|
||||
// forced boundary is after current buffer length, continue reading
|
||||
*next_boundary = Some(inject);
|
||||
this.scan_pos = this.buffer.len();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if this.scan_pos < this.buffer.len() {
|
||||
let boundary = this.chunker.scan(&this.buffer[this.scan_pos..]);
|
||||
let boundary = this.chunker.scan(&this.buffer[this.scan_pos..], &ctx);
|
||||
|
||||
let chunk_size = this.scan_pos + boundary;
|
||||
|
||||
if boundary == 0 {
|
||||
this.scan_pos = this.buffer.len();
|
||||
// continue poll
|
||||
} else if chunk_size <= this.buffer.len() {
|
||||
let result = this.buffer.split_to(chunk_size);
|
||||
// found new chunk boundary inside buffer, split off chunk from buffer
|
||||
let raw_chunk = this.buffer.split_to(chunk_size);
|
||||
this.consumed += chunk_size as u64;
|
||||
this.scan_pos = 0;
|
||||
return Poll::Ready(Some(Ok(result)));
|
||||
return Poll::Ready(Some(Ok(raw_chunk)));
|
||||
} else {
|
||||
panic!("got unexpected chunk boundary from chunker");
|
||||
}
|
||||
@ -132,3 +237,120 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
use super::*;
|
||||
|
||||
struct DummyInput {
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl DummyInput {
|
||||
fn new(data: Vec<u8>) -> Self {
|
||||
Self { data }
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for DummyInput {
|
||||
type Item = Result<Vec<u8>, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let this = self.get_mut();
|
||||
match this.data.len() {
|
||||
0 => Poll::Ready(None),
|
||||
size if size > 10 => Poll::Ready(Some(Ok(this.data.split_off(10)))),
|
||||
_ => Poll::Ready(Some(Ok(std::mem::take(&mut this.data)))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_chunk_stream_forced_boundaries() {
|
||||
let mut data = Vec::new();
|
||||
for i in 0..(256 * 1024) {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j << 3)) & 0xff) as u8;
|
||||
data.push(byte);
|
||||
}
|
||||
}
|
||||
|
||||
let mut input = DummyInput::new(data);
|
||||
let input = Pin::new(&mut input);
|
||||
|
||||
let (injections_tx, injections_rx) = mpsc::channel();
|
||||
let (boundaries_tx, boundaries_rx) = mpsc::channel();
|
||||
let (suggested_tx, suggested_rx) = mpsc::channel();
|
||||
let injection_data = InjectionData::new(boundaries_rx, injections_tx);
|
||||
|
||||
let mut chunk_stream = ChunkStream::new(
|
||||
input,
|
||||
Some(64 * 1024),
|
||||
Some(injection_data),
|
||||
Some(suggested_rx),
|
||||
);
|
||||
let chunks = std::sync::Arc::new(std::sync::Mutex::new(Vec::new()));
|
||||
let chunks_clone = chunks.clone();
|
||||
|
||||
// Suggested boundary matching forced boundary
|
||||
suggested_tx.send(32 * 1024).unwrap();
|
||||
// Suggested boundary not matching forced boundary
|
||||
suggested_tx.send(64 * 1024).unwrap();
|
||||
// Force chunk boundary at suggested boundary
|
||||
boundaries_tx
|
||||
.send(InjectChunks {
|
||||
boundary: 32 * 1024,
|
||||
chunks: Vec::new(),
|
||||
size: 1024,
|
||||
})
|
||||
.unwrap();
|
||||
// Force chunk boundary within regular chunk
|
||||
boundaries_tx
|
||||
.send(InjectChunks {
|
||||
boundary: 128 * 1024,
|
||||
chunks: Vec::new(),
|
||||
size: 2048,
|
||||
})
|
||||
.unwrap();
|
||||
// Force chunk boundary aligned with regular boundary
|
||||
boundaries_tx
|
||||
.send(InjectChunks {
|
||||
boundary: 657408,
|
||||
chunks: Vec::new(),
|
||||
size: 512,
|
||||
})
|
||||
.unwrap();
|
||||
// Force chunk boundary within regular chunk, without injecting data
|
||||
boundaries_tx
|
||||
.send(InjectChunks {
|
||||
boundary: 657408 + 1024,
|
||||
chunks: Vec::new(),
|
||||
size: 0,
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async move {
|
||||
while let Some(chunk) = chunk_stream.next().await {
|
||||
let chunk = chunk.unwrap();
|
||||
let mut chunks = chunks.lock().unwrap();
|
||||
chunks.push(chunk);
|
||||
}
|
||||
});
|
||||
|
||||
let mut total = 0;
|
||||
let chunks = chunks_clone.lock().unwrap();
|
||||
let expected = [32768, 31744, 65536, 262144, 262144, 512, 262144, 131584];
|
||||
for (chunk, expected) in chunks.as_slice().iter().zip(expected.iter()) {
|
||||
assert_eq!(chunk.len(), *expected);
|
||||
total += chunk.len();
|
||||
}
|
||||
while let Ok(injection) = injections_rx.recv() {
|
||||
total += injection.size;
|
||||
}
|
||||
|
||||
assert_eq!(total, 4 * 256 * 1024 + 1024 + 2048 + 512);
|
||||
}
|
||||
}
|
||||
|
@ -332,6 +332,7 @@ impl HttpClient {
|
||||
let interactive = options.interactive;
|
||||
let fingerprint_cache = options.fingerprint_cache;
|
||||
let prefix = options.prefix.clone();
|
||||
let trust_openssl_valid = Arc::new(Mutex::new(true));
|
||||
ssl_connector_builder.set_verify_callback(
|
||||
openssl::ssl::SslVerifyMode::PEER,
|
||||
move |valid, ctx| match Self::verify_callback(
|
||||
@ -339,6 +340,7 @@ impl HttpClient {
|
||||
ctx,
|
||||
expected_fingerprint.as_ref(),
|
||||
interactive,
|
||||
Arc::clone(&trust_openssl_valid),
|
||||
) {
|
||||
Ok(None) => true,
|
||||
Ok(Some(fingerprint)) => {
|
||||
@ -467,7 +469,6 @@ impl HttpClient {
|
||||
}
|
||||
Err(err) => {
|
||||
log::error!("re-authentication failed: {}", err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -561,8 +562,12 @@ impl HttpClient {
|
||||
ctx: &mut X509StoreContextRef,
|
||||
expected_fingerprint: Option<&String>,
|
||||
interactive: bool,
|
||||
trust_openssl: Arc<Mutex<bool>>,
|
||||
) -> Result<Option<String>, Error> {
|
||||
if openssl_valid {
|
||||
let mut trust_openssl_valid = trust_openssl.lock().unwrap();
|
||||
|
||||
// we can only rely on openssl's prevalidation if we haven't forced it earlier
|
||||
if openssl_valid && *trust_openssl_valid {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
@ -571,11 +576,13 @@ impl HttpClient {
|
||||
None => bail!("context lacks current certificate."),
|
||||
};
|
||||
|
||||
let depth = ctx.error_depth();
|
||||
if depth != 0 {
|
||||
bail!("context depth != 0")
|
||||
// force trust in case of a chain, but set flag to no longer trust prevalidation by openssl
|
||||
if ctx.error_depth() > 0 {
|
||||
*trust_openssl_valid = false;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// leaf certificate - if we end up here, we have to verify the fingerprint!
|
||||
let fp = match cert.digest(openssl::hash::MessageDigest::sha256()) {
|
||||
Ok(fp) => fp,
|
||||
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
|
||||
|
127
pbs-client/src/inject_reused_chunks.rs
Normal file
127
pbs-client/src/inject_reused_chunks.rs
Normal file
@ -0,0 +1,127 @@
|
||||
use std::cmp;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use futures::{ready, Stream};
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::pxar::create::ReusableDynamicEntry;
|
||||
|
||||
pin_project! {
|
||||
pub struct InjectReusedChunksQueue<S> {
|
||||
#[pin]
|
||||
input: S,
|
||||
next_injection: Option<InjectChunks>,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
}
|
||||
}
|
||||
|
||||
type StreamOffset = u64;
|
||||
#[derive(Debug)]
|
||||
/// Holds a list of chunks to inject at the given boundary by forcing a chunk boundary.
|
||||
pub struct InjectChunks {
|
||||
/// Offset at which to force the boundary
|
||||
pub boundary: StreamOffset,
|
||||
/// List of chunks to inject
|
||||
pub chunks: Vec<ReusableDynamicEntry>,
|
||||
/// Cumulative size of the chunks in the list
|
||||
pub size: usize,
|
||||
}
|
||||
|
||||
/// Variants for stream consumer to distinguish between raw data chunks and injected ones.
|
||||
pub enum InjectedChunksInfo {
|
||||
Known(Vec<ReusableDynamicEntry>),
|
||||
Raw(bytes::BytesMut),
|
||||
}
|
||||
|
||||
pub trait InjectReusedChunks: Sized {
|
||||
fn inject_reused_chunks(
|
||||
self,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
) -> InjectReusedChunksQueue<Self>;
|
||||
}
|
||||
|
||||
impl<S> InjectReusedChunks for S
|
||||
where
|
||||
S: Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
{
|
||||
fn inject_reused_chunks(
|
||||
self,
|
||||
injections: Option<mpsc::Receiver<InjectChunks>>,
|
||||
stream_len: Arc<AtomicUsize>,
|
||||
) -> InjectReusedChunksQueue<Self> {
|
||||
InjectReusedChunksQueue {
|
||||
input: self,
|
||||
next_injection: None,
|
||||
injections,
|
||||
stream_len,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for InjectReusedChunksQueue<S>
|
||||
where
|
||||
S: Stream<Item = Result<bytes::BytesMut, Error>>,
|
||||
{
|
||||
type Item = Result<InjectedChunksInfo, Error>;
|
||||
|
||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
let mut this = self.project();
|
||||
|
||||
// loop to skip over possible empty chunks
|
||||
loop {
|
||||
if this.next_injection.is_none() {
|
||||
if let Some(injections) = this.injections.as_mut() {
|
||||
if let Ok(injection) = injections.try_recv() {
|
||||
*this.next_injection = Some(injection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(inject) = this.next_injection.take() {
|
||||
// got reusable dynamic entries to inject
|
||||
let offset = this.stream_len.load(Ordering::SeqCst) as u64;
|
||||
|
||||
match inject.boundary.cmp(&offset) {
|
||||
// inject now
|
||||
cmp::Ordering::Equal => {
|
||||
let chunk_info = InjectedChunksInfo::Known(inject.chunks);
|
||||
return Poll::Ready(Some(Ok(chunk_info)));
|
||||
}
|
||||
// inject later
|
||||
cmp::Ordering::Greater => *this.next_injection = Some(inject),
|
||||
// incoming new chunks and injections didn't line up?
|
||||
cmp::Ordering::Less => {
|
||||
return Poll::Ready(Some(Err(anyhow!("invalid injection boundary"))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// nothing to inject now, await further input
|
||||
match ready!(this.input.as_mut().poll_next(cx)) {
|
||||
None => {
|
||||
if let Some(injections) = this.injections.as_mut() {
|
||||
if this.next_injection.is_some() || injections.try_recv().is_ok() {
|
||||
// stream finished, but remaining dynamic entries to inject
|
||||
return Poll::Ready(Some(Err(anyhow!(
|
||||
"injection queue not fully consumed"
|
||||
))));
|
||||
}
|
||||
}
|
||||
// stream finished and all dynamic entries already injected
|
||||
return Poll::Ready(None);
|
||||
}
|
||||
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
|
||||
// ignore empty chunks, injected chunks from queue at forced boundary, but boundary
|
||||
// did not require splitting of the raw stream buffer to force the boundary
|
||||
Some(Ok(raw)) if raw.is_empty() => continue,
|
||||
Some(Ok(raw)) => return Poll::Ready(Some(Ok(InjectedChunksInfo::Raw(raw)))),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -7,6 +7,7 @@ pub mod catalog_shell;
|
||||
pub mod pxar;
|
||||
pub mod tools;
|
||||
|
||||
mod inject_reused_chunks;
|
||||
mod merge_known_chunks;
|
||||
pub mod pipe_to_stream;
|
||||
|
||||
@ -38,6 +39,6 @@ mod backup_specification;
|
||||
pub use backup_specification::*;
|
||||
|
||||
mod chunk_stream;
|
||||
pub use chunk_stream::{ChunkStream, FixedChunkStream};
|
||||
pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData};
|
||||
|
||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,8 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ffi::{CStr, CString, OsStr, OsString};
|
||||
use std::io;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::{self, Write};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
|
||||
use std::path::{Path, PathBuf};
|
||||
@ -29,6 +30,7 @@ use proxmox_compression::zip::{ZipEncoder, ZipEntry};
|
||||
use crate::pxar::dir_stack::PxarDirStack;
|
||||
use crate::pxar::metadata;
|
||||
use crate::pxar::Flags;
|
||||
use crate::tools::handle_root_with_optional_format_version_prelude;
|
||||
|
||||
pub struct PxarExtractOptions<'a> {
|
||||
pub match_list: &'a [MatchEntry],
|
||||
@ -36,10 +38,11 @@ pub struct PxarExtractOptions<'a> {
|
||||
pub allow_existing_dirs: bool,
|
||||
pub overwrite_flags: OverwriteFlags,
|
||||
pub on_error: Option<ErrorHandler>,
|
||||
pub prelude_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
#[derive(Default)]
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
|
||||
pub struct OverwriteFlags: u8 {
|
||||
/// Overwrite existing entries file content
|
||||
const FILE = 0x1;
|
||||
@ -124,11 +127,26 @@ where
|
||||
// we use this to keep track of our directory-traversal
|
||||
decoder.enable_goodbye_entries(true);
|
||||
|
||||
let root = decoder
|
||||
.next()
|
||||
.context("found empty pxar archive")?
|
||||
let (root, prelude) = handle_root_with_optional_format_version_prelude(&mut decoder)
|
||||
.context("error reading pxar archive")?;
|
||||
|
||||
if let Some(ref path) = options.prelude_path {
|
||||
if let Some(entry) = prelude {
|
||||
let mut prelude_file = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(path)
|
||||
.with_context(|| format!("error creating prelude file '{path:?}'"))?;
|
||||
if let pxar::EntryKind::Prelude(ref prelude) = entry.kind() {
|
||||
prelude_file.write_all(prelude.as_ref())?;
|
||||
} else {
|
||||
log::info!("unexpected entry kind for prelude");
|
||||
}
|
||||
} else {
|
||||
log::info!("No prelude entry found, skip prelude restore.");
|
||||
}
|
||||
}
|
||||
|
||||
if !root.is_dir() {
|
||||
bail!("pxar archive does not start with a directory entry!");
|
||||
}
|
||||
@ -267,6 +285,8 @@ where
|
||||
};
|
||||
|
||||
let extract_res = match (did_match, entry.kind()) {
|
||||
(_, EntryKind::Version(_version)) => Ok(()),
|
||||
(_, EntryKind::Prelude(_prelude)) => Ok(()),
|
||||
(_, EntryKind::Directory) => {
|
||||
self.callback(entry.path());
|
||||
|
||||
@ -353,11 +373,8 @@ where
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
(true, EntryKind::File { size, .. }) => {
|
||||
let contents = self.decoder.contents();
|
||||
|
||||
if let Some(mut contents) = contents {
|
||||
self.extractor.extract_file(
|
||||
(true, EntryKind::File { size, .. }) => match self.decoder.contents() {
|
||||
Ok(Some(mut contents)) => self.extractor.extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
@ -365,14 +382,13 @@ where
|
||||
self.extractor
|
||||
.overwrite_flags
|
||||
.contains(OverwriteFlags::FILE),
|
||||
)
|
||||
} else {
|
||||
Err(format_err!(
|
||||
),
|
||||
Ok(None) => Err(format_err!(
|
||||
"found regular file entry without contents in archive"
|
||||
))
|
||||
}
|
||||
.context(PxarExtractContext::ExtractFile)
|
||||
)),
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
.context(PxarExtractContext::ExtractFile),
|
||||
(false, _) => Ok(()), // skip this
|
||||
};
|
||||
|
||||
@ -852,7 +868,8 @@ where
|
||||
match entry.kind() {
|
||||
EntryKind::File { .. } => {
|
||||
let size = decoder.content_size().unwrap_or(0);
|
||||
tar_add_file(&mut tarencoder, decoder.contents(), size, metadata, path).await?
|
||||
let contents = decoder.contents().await?;
|
||||
tar_add_file(&mut tarencoder, contents, size, metadata, path).await?
|
||||
}
|
||||
EntryKind::Hardlink(link) => {
|
||||
if !link.data.is_empty() {
|
||||
@ -874,13 +891,8 @@ where
|
||||
path
|
||||
} else {
|
||||
let size = decoder.content_size().unwrap_or(0);
|
||||
tar_add_file(
|
||||
&mut tarencoder,
|
||||
decoder.contents(),
|
||||
size,
|
||||
metadata,
|
||||
path,
|
||||
)
|
||||
let contents = decoder.contents().await?;
|
||||
tar_add_file(&mut tarencoder, contents, size, metadata, path)
|
||||
.await?;
|
||||
hardlinks.insert(realpath.to_owned(), path.to_owned());
|
||||
continue;
|
||||
@ -1018,7 +1030,8 @@ where
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, decoder.contents())
|
||||
let contents = decoder.contents().await?;
|
||||
zip.add_entry(entry, contents)
|
||||
.await
|
||||
.context("could not send file entry")?;
|
||||
}
|
||||
@ -1036,7 +1049,8 @@ where
|
||||
metadata.stat.mode as u16,
|
||||
true,
|
||||
);
|
||||
zip.add_entry(entry, decoder.contents())
|
||||
let contents = decoder.contents().await?;
|
||||
zip.add_entry(entry, contents)
|
||||
.await
|
||||
.context("could not send file entry")?;
|
||||
}
|
||||
@ -1259,14 +1273,16 @@ where
|
||||
.with_context(|| format!("error at entry {file_name_os:?}"))?;
|
||||
}
|
||||
EntryKind::File { size, .. } => {
|
||||
let mut contents = decoder
|
||||
.contents()
|
||||
.await?
|
||||
.context("found regular file entry without contents in archive")?;
|
||||
extractor
|
||||
.async_extract_file(
|
||||
&file_name,
|
||||
metadata,
|
||||
*size,
|
||||
&mut decoder
|
||||
.contents()
|
||||
.context("found regular file entry without contents in archive")?,
|
||||
&mut contents,
|
||||
extractor.overwrite_flags.contains(OverwriteFlags::FILE),
|
||||
)
|
||||
.await?
|
||||
|
@ -8,6 +8,7 @@ use libc::c_long;
|
||||
use bitflags::bitflags;
|
||||
|
||||
bitflags! {
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub struct Flags: u64 {
|
||||
/// FAT-style 2s time granularity
|
||||
const WITH_2SEC_TIME = 0x40;
|
||||
|
162
pbs-client/src/pxar/look_ahead_cache.rs
Normal file
162
pbs-client/src/pxar/look_ahead_cache.rs
Normal file
@ -0,0 +1,162 @@
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::CString;
|
||||
use std::ops::Range;
|
||||
use std::os::unix::io::OwnedFd;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use nix::sys::stat::FileStat;
|
||||
|
||||
use pxar::encoder::PayloadOffset;
|
||||
use pxar::Metadata;
|
||||
|
||||
use super::create::*;
|
||||
|
||||
const DEFAULT_CACHE_SIZE: usize = 512;
|
||||
|
||||
pub(crate) struct CacheEntryData {
|
||||
pub(crate) fd: OwnedFd,
|
||||
pub(crate) c_file_name: CString,
|
||||
pub(crate) stat: FileStat,
|
||||
pub(crate) metadata: Metadata,
|
||||
pub(crate) payload_offset: PayloadOffset,
|
||||
}
|
||||
|
||||
pub(crate) enum CacheEntry {
|
||||
RegEntry(CacheEntryData),
|
||||
DirEntry(CacheEntryData),
|
||||
DirEnd,
|
||||
}
|
||||
|
||||
pub(crate) struct PxarLookaheadCache {
|
||||
// Current state of the cache
|
||||
enabled: bool,
|
||||
// Cached entries
|
||||
entries: Vec<CacheEntry>,
|
||||
// Entries encountered having more than one link given by stat
|
||||
hardlinks: HashSet<HardLinkInfo>,
|
||||
// Payload range covered by the currently cached entries
|
||||
range: Range<u64>,
|
||||
// Possible held back last chunk from last flush, used for possible chunk continuation
|
||||
last_chunk: Option<ReusableDynamicEntry>,
|
||||
// Path when started caching
|
||||
start_path: PathBuf,
|
||||
// Number of entries with file descriptors
|
||||
fd_entries: usize,
|
||||
// Max number of entries with file descriptors
|
||||
cache_size: usize,
|
||||
}
|
||||
|
||||
impl PxarLookaheadCache {
|
||||
pub(crate) fn new(size: Option<usize>) -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
entries: Vec::new(),
|
||||
hardlinks: HashSet::new(),
|
||||
range: 0..0,
|
||||
last_chunk: None,
|
||||
start_path: PathBuf::new(),
|
||||
fd_entries: 0,
|
||||
cache_size: size.unwrap_or(DEFAULT_CACHE_SIZE),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn is_full(&self) -> bool {
|
||||
self.fd_entries >= self.cache_size
|
||||
}
|
||||
|
||||
pub(crate) fn caching_enabled(&self) -> bool {
|
||||
self.enabled
|
||||
}
|
||||
|
||||
pub(crate) fn insert(
|
||||
&mut self,
|
||||
fd: OwnedFd,
|
||||
c_file_name: CString,
|
||||
stat: FileStat,
|
||||
metadata: Metadata,
|
||||
payload_offset: PayloadOffset,
|
||||
path: PathBuf,
|
||||
) {
|
||||
if !self.enabled {
|
||||
self.start_path = path;
|
||||
if !metadata.is_dir() {
|
||||
self.start_path.pop();
|
||||
}
|
||||
}
|
||||
self.enabled = true;
|
||||
self.fd_entries += 1;
|
||||
if metadata.is_dir() {
|
||||
self.entries.push(CacheEntry::DirEntry(CacheEntryData {
|
||||
fd,
|
||||
c_file_name,
|
||||
stat,
|
||||
metadata,
|
||||
payload_offset,
|
||||
}))
|
||||
} else {
|
||||
self.entries.push(CacheEntry::RegEntry(CacheEntryData {
|
||||
fd,
|
||||
c_file_name,
|
||||
stat,
|
||||
metadata,
|
||||
payload_offset,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_dir_end(&mut self) {
|
||||
self.entries.push(CacheEntry::DirEnd);
|
||||
}
|
||||
|
||||
pub(crate) fn take_and_reset(&mut self) -> (Vec<CacheEntry>, PathBuf) {
|
||||
self.fd_entries = 0;
|
||||
self.enabled = false;
|
||||
// keep end for possible continuation if cache has been cleared because
|
||||
// it was full, but further caching would be fine
|
||||
self.range = self.range.end..self.range.end;
|
||||
(
|
||||
std::mem::take(&mut self.entries),
|
||||
std::mem::take(&mut self.start_path),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn contains_hardlink(&self, info: &HardLinkInfo) -> bool {
|
||||
self.hardlinks.contains(info)
|
||||
}
|
||||
|
||||
pub(crate) fn insert_hardlink(&mut self, info: HardLinkInfo) -> bool {
|
||||
self.hardlinks.insert(info)
|
||||
}
|
||||
|
||||
pub(crate) fn range(&self) -> &Range<u64> {
|
||||
&self.range
|
||||
}
|
||||
|
||||
pub(crate) fn update_range(&mut self, range: Range<u64>) {
|
||||
self.range = range;
|
||||
}
|
||||
|
||||
pub(crate) fn try_extend_range(&mut self, range: Range<u64>) -> bool {
|
||||
if self.range.end == 0 {
|
||||
// initialize first range to start and end with start of new range
|
||||
self.range.start = range.start;
|
||||
self.range.end = range.start;
|
||||
}
|
||||
|
||||
// range continued, update end
|
||||
if self.range.end == range.start {
|
||||
self.range.end = range.end;
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn take_last_chunk(&mut self) -> Option<ReusableDynamicEntry> {
|
||||
self.last_chunk.take()
|
||||
}
|
||||
|
||||
pub(crate) fn update_last_chunk(&mut self, chunk: Option<ReusableDynamicEntry>) {
|
||||
self.last_chunk = chunk;
|
||||
}
|
||||
}
|
@ -188,7 +188,7 @@ fn add_fcaps(
|
||||
c_result!(unsafe {
|
||||
libc::setxattr(
|
||||
c_proc_path,
|
||||
xattr::xattr_name_fcaps().as_ptr(),
|
||||
xattr::XATTR_NAME_FCAPS.as_ptr(),
|
||||
fcaps.data.as_ptr() as *const libc::c_void,
|
||||
fcaps.data.len(),
|
||||
0,
|
||||
|
@ -50,13 +50,16 @@
|
||||
pub(crate) mod create;
|
||||
pub(crate) mod dir_stack;
|
||||
pub(crate) mod extract;
|
||||
pub(crate) mod look_ahead_cache;
|
||||
pub(crate) mod metadata;
|
||||
pub(crate) mod tools;
|
||||
|
||||
mod flags;
|
||||
pub use flags::Flags;
|
||||
|
||||
pub use create::{create_archive, PxarCreateOptions};
|
||||
pub use create::{
|
||||
create_archive, MetadataArchiveReader, PxarCreateOptions, PxarPrevRef, PxarWriters,
|
||||
};
|
||||
pub use extract::{
|
||||
create_tar, create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
|
||||
OverwriteFlags, PxarExtractContext, PxarExtractOptions,
|
||||
|
@ -128,16 +128,32 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
|
||||
let meta = entry.metadata();
|
||||
|
||||
let (size, link) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new()),
|
||||
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str())),
|
||||
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new()),
|
||||
_ => ("0".to_string(), String::new()),
|
||||
let (size, link, payload_offset) = match entry.kind() {
|
||||
EntryKind::File {
|
||||
size,
|
||||
payload_offset,
|
||||
..
|
||||
} => (format!("{}", *size), String::new(), *payload_offset),
|
||||
EntryKind::Symlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str()), None),
|
||||
EntryKind::Hardlink(link) => ("0".to_string(), format!(" -> {:?}", link.as_os_str()), None),
|
||||
EntryKind::Device(dev) => (format!("{},{}", dev.major, dev.minor), String::new(), None),
|
||||
_ => ("0".to_string(), String::new(), None),
|
||||
};
|
||||
|
||||
let owner_string = format!("{}/{}", meta.stat.uid, meta.stat.gid);
|
||||
|
||||
if let Some(offset) = payload_offset {
|
||||
format!(
|
||||
"{} {:<13} {} {:>8} {:?}{} {}",
|
||||
mode_string,
|
||||
owner_string,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
size,
|
||||
entry.path(),
|
||||
link,
|
||||
offset,
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"{} {:<13} {} {:>8} {:?}{}",
|
||||
mode_string,
|
||||
@ -148,23 +164,37 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
||||
link,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
let mode_string = mode_string(entry);
|
||||
|
||||
let meta = entry.metadata();
|
||||
|
||||
let (size, link, type_name) = match entry.kind() {
|
||||
EntryKind::File { size, .. } => (format!("{}", *size), String::new(), "file"),
|
||||
let (size, link, type_name, payload_offset) = match entry.kind() {
|
||||
EntryKind::Version(version) => (format!("{version:?}"), String::new(), "version", None),
|
||||
EntryKind::Prelude(prelude) => (
|
||||
"0".to_string(),
|
||||
format!("raw data: {:?} bytes", prelude.data.len()),
|
||||
"prelude",
|
||||
None,
|
||||
),
|
||||
EntryKind::File {
|
||||
size,
|
||||
payload_offset,
|
||||
..
|
||||
} => (format!("{}", *size), String::new(), "file", *payload_offset),
|
||||
EntryKind::Symlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
None,
|
||||
),
|
||||
EntryKind::Hardlink(link) => (
|
||||
"0".to_string(),
|
||||
format!(" -> {:?}", link.as_os_str()),
|
||||
"symlink",
|
||||
None,
|
||||
),
|
||||
EntryKind::Device(dev) => (
|
||||
format!("{},{}", dev.major, dev.minor),
|
||||
@ -176,11 +206,12 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
} else {
|
||||
"device"
|
||||
},
|
||||
None,
|
||||
),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket"),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo"),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory"),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry"),
|
||||
EntryKind::Socket => ("0".to_string(), String::new(), "socket", None),
|
||||
EntryKind::Fifo => ("0".to_string(), String::new(), "fifo", None),
|
||||
EntryKind::Directory => ("0".to_string(), String::new(), "directory", None),
|
||||
EntryKind::GoodbyeTable => ("0".to_string(), String::new(), "bad entry", None),
|
||||
};
|
||||
|
||||
let file_name = match std::str::from_utf8(entry.path().as_os_str().as_bytes()) {
|
||||
@ -188,6 +219,25 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
Err(_) => std::borrow::Cow::Owned(format!("{:?}", entry.path())),
|
||||
};
|
||||
|
||||
if let Some(offset) = payload_offset {
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
Access: ({:o}/{}) Uid: {:<5} Gid: {:<5}\n\
|
||||
Modify: {}\n
|
||||
PayloadOffset: {}\n",
|
||||
file_name,
|
||||
link,
|
||||
size,
|
||||
type_name,
|
||||
meta.file_mode(),
|
||||
mode_string,
|
||||
meta.stat.uid,
|
||||
meta.stat.gid,
|
||||
format_mtime(&meta.stat.mtime),
|
||||
offset,
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
" File: {}{}\n \
|
||||
Size: {:<13} Type: {}\n\
|
||||
@ -204,3 +254,4 @@ pub fn format_multi_line_entry(entry: &Entry) -> String {
|
||||
format_mtime(&meta.stat.mtime),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -2,10 +2,10 @@ use std::io::Write;
|
||||
//use std::os::unix::io::FromRawFd;
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use anyhow::{format_err, Error};
|
||||
use anyhow::Error;
|
||||
use futures::future::{AbortHandle, Abortable};
|
||||
use futures::stream::Stream;
|
||||
use nix::dir::Dir;
|
||||
@ -15,7 +15,10 @@ use nix::sys::stat::Mode;
|
||||
use proxmox_async::blocking::TokioWriterAdapter;
|
||||
use proxmox_io::StdChannelWriter;
|
||||
|
||||
use pbs_datastore::catalog::CatalogWriter;
|
||||
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogWriter};
|
||||
|
||||
use crate::inject_reused_chunks::InjectChunks;
|
||||
use crate::pxar::create::PxarWriters;
|
||||
|
||||
/// Stream implementation to encode and upload .pxar archives.
|
||||
///
|
||||
@ -24,8 +27,9 @@ use pbs_datastore::catalog::CatalogWriter;
|
||||
/// consumer.
|
||||
pub struct PxarBackupStream {
|
||||
rx: Option<std::sync::mpsc::Receiver<Result<Vec<u8>, Error>>>,
|
||||
pub suggested_boundaries: Option<std::sync::mpsc::Receiver<u64>>,
|
||||
handle: Option<AbortHandle>,
|
||||
error: Arc<Mutex<Option<String>>>,
|
||||
error: Arc<Mutex<Option<Error>>>,
|
||||
}
|
||||
|
||||
impl Drop for PxarBackupStream {
|
||||
@ -38,37 +42,63 @@ impl Drop for PxarBackupStream {
|
||||
impl PxarBackupStream {
|
||||
pub fn new<W: Write + Send + 'static>(
|
||||
dir: Dir,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
catalog: Option<Arc<Mutex<CatalogWriter<W>>>>,
|
||||
options: crate::pxar::PxarCreateOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
|
||||
boundaries: Option<mpsc::Sender<InjectChunks>>,
|
||||
separate_payload_stream: bool,
|
||||
) -> Result<(Self, Option<Self>), Error> {
|
||||
let buffer_size = 256 * 1024;
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = Arc::clone(&error);
|
||||
let handler = async move {
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
let writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
|
||||
buffer_size,
|
||||
StdChannelWriter::new(tx),
|
||||
));
|
||||
|
||||
let writer = pxar::encoder::sync::StandardWriter::new(writer);
|
||||
|
||||
let (writer, payload_rx, suggested_boundaries_tx, suggested_boundaries_rx) =
|
||||
if separate_payload_stream {
|
||||
let (tx, rx) = std::sync::mpsc::sync_channel(10);
|
||||
let (suggested_boundaries_tx, suggested_boundaries_rx) = std::sync::mpsc::channel();
|
||||
let payload_writer = TokioWriterAdapter::new(std::io::BufWriter::with_capacity(
|
||||
buffer_size,
|
||||
StdChannelWriter::new(tx),
|
||||
));
|
||||
(
|
||||
pxar::PxarVariant::Split(
|
||||
writer,
|
||||
pxar::encoder::sync::StandardWriter::new(payload_writer),
|
||||
),
|
||||
Some(rx),
|
||||
Some(suggested_boundaries_tx),
|
||||
Some(suggested_boundaries_rx),
|
||||
)
|
||||
} else {
|
||||
(pxar::PxarVariant::Unified(writer), None, None, None)
|
||||
};
|
||||
|
||||
let error = Arc::new(Mutex::new(None));
|
||||
let error2 = Arc::clone(&error);
|
||||
let handler = async move {
|
||||
if let Err(err) = crate::pxar::create_archive(
|
||||
dir,
|
||||
PxarWriters::new(
|
||||
writer,
|
||||
catalog.map(|c| c as Arc<Mutex<dyn BackupCatalogWriter + Send>>),
|
||||
),
|
||||
crate::pxar::Flags::DEFAULT,
|
||||
move |path| {
|
||||
log::debug!("{:?}", path);
|
||||
Ok(())
|
||||
},
|
||||
Some(catalog),
|
||||
options,
|
||||
boundaries,
|
||||
suggested_boundaries_tx,
|
||||
)
|
||||
.await
|
||||
{
|
||||
let mut error = error2.lock().unwrap();
|
||||
*error = Some(err.to_string());
|
||||
*error = Some(err);
|
||||
}
|
||||
};
|
||||
|
||||
@ -76,21 +106,33 @@ impl PxarBackupStream {
|
||||
let future = Abortable::new(handler, registration);
|
||||
tokio::spawn(future);
|
||||
|
||||
Ok(Self {
|
||||
let backup_stream = Self {
|
||||
rx: Some(rx),
|
||||
suggested_boundaries: None,
|
||||
handle: Some(handle.clone()),
|
||||
error: Arc::clone(&error),
|
||||
};
|
||||
|
||||
let backup_payload_stream = payload_rx.map(|rx| Self {
|
||||
rx: Some(rx),
|
||||
suggested_boundaries: suggested_boundaries_rx,
|
||||
handle: Some(handle),
|
||||
error,
|
||||
})
|
||||
});
|
||||
|
||||
Ok((backup_stream, backup_payload_stream))
|
||||
}
|
||||
|
||||
pub fn open<W: Write + Send + 'static>(
|
||||
dirname: &Path,
|
||||
catalog: Arc<Mutex<CatalogWriter<W>>>,
|
||||
catalog: Option<Arc<Mutex<CatalogWriter<W>>>>,
|
||||
options: crate::pxar::PxarCreateOptions,
|
||||
) -> Result<Self, Error> {
|
||||
boundaries: Option<mpsc::Sender<InjectChunks>>,
|
||||
separate_payload_stream: bool,
|
||||
) -> Result<(Self, Option<Self>), Error> {
|
||||
let dir = nix::dir::Dir::open(dirname, OFlag::O_DIRECTORY, Mode::empty())?;
|
||||
|
||||
Self::new(dir, catalog, options)
|
||||
Self::new(dir, catalog, options, boundaries, separate_payload_stream)
|
||||
}
|
||||
}
|
||||
|
||||
@ -100,18 +142,18 @@ impl Stream for PxarBackupStream {
|
||||
fn poll_next(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
{
|
||||
// limit lock scope
|
||||
let error = self.error.lock().unwrap();
|
||||
if let Some(ref msg) = *error {
|
||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||
let mut error = self.error.lock().unwrap();
|
||||
if let Some(err) = error.take() {
|
||||
return Poll::Ready(Some(Err(err)));
|
||||
}
|
||||
}
|
||||
|
||||
match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) {
|
||||
Ok(data) => Poll::Ready(Some(data)),
|
||||
Err(_) => {
|
||||
let error = self.error.lock().unwrap();
|
||||
if let Some(ref msg) = *error {
|
||||
return Poll::Ready(Some(Err(format_err!("{}", msg))));
|
||||
let mut error = self.error.lock().unwrap();
|
||||
if let Some(err) = error.take() {
|
||||
return Poll::Ready(Some(Err(err)));
|
||||
}
|
||||
Poll::Ready(None) // channel closed, no error
|
||||
}
|
||||
|
@ -302,45 +302,43 @@ pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSour
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
static mut TEST_DEFAULT_ENCRYPTION_KEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
||||
static TEST_DEFAULT_ENCRYPTION_KEY: std::sync::Mutex<Result<Option<Vec<u8>>, Error>> =
|
||||
std::sync::Mutex::new(Ok(None));
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn read_optional_default_encryption_key() -> Result<Option<KeyWithSource>, Error> {
|
||||
// not safe when multiple concurrent test cases end up here!
|
||||
unsafe {
|
||||
match &TEST_DEFAULT_ENCRYPTION_KEY {
|
||||
match &*TEST_DEFAULT_ENCRYPTION_KEY.lock().unwrap() {
|
||||
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
||||
Ok(None) => Ok(None),
|
||||
Err(_) => bail!("test error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// not safe when multiple concurrent test cases end up here!
|
||||
pub(crate) unsafe fn set_test_encryption_key(value: Result<Option<Vec<u8>>, Error>) {
|
||||
TEST_DEFAULT_ENCRYPTION_KEY = value;
|
||||
*TEST_DEFAULT_ENCRYPTION_KEY.lock().unwrap() = value;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
static mut TEST_DEFAULT_MASTER_PUBKEY: Result<Option<Vec<u8>>, Error> = Ok(None);
|
||||
static TEST_DEFAULT_MASTER_PUBKEY: std::sync::Mutex<Result<Option<Vec<u8>>, Error>> =
|
||||
std::sync::Mutex::new(Ok(None));
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn read_optional_default_master_pubkey() -> Result<Option<KeyWithSource>, Error> {
|
||||
// not safe when multiple concurrent test cases end up here!
|
||||
unsafe {
|
||||
match &TEST_DEFAULT_MASTER_PUBKEY {
|
||||
match &*TEST_DEFAULT_MASTER_PUBKEY.lock().unwrap() {
|
||||
Ok(Some(key)) => Ok(Some(KeyWithSource::from_default(key.clone()))),
|
||||
Ok(None) => Ok(None),
|
||||
Err(_) => bail!("test error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
// not safe when multiple concurrent test cases end up here!
|
||||
pub(crate) unsafe fn set_test_default_master_pubkey(value: Result<Option<Vec<u8>>, Error>) {
|
||||
TEST_DEFAULT_MASTER_PUBKEY = value;
|
||||
*TEST_DEFAULT_MASTER_PUBKEY.lock().unwrap() = value;
|
||||
}
|
||||
|
||||
pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
|
||||
|
@ -1,10 +1,15 @@
|
||||
//! Shared tools useful for common CLI clients.
|
||||
use std::collections::HashMap;
|
||||
use std::env::VarError::{NotPresent, NotUnicode};
|
||||
use std::ffi::OsStr;
|
||||
use std::fs::File;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::os::unix::ffi::OsStrExt;
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::os::unix::io::FromRawFd;
|
||||
use std::path::PathBuf;
|
||||
use std::process::Command;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
use serde_json::{json, Value};
|
||||
@ -16,6 +21,12 @@ use proxmox_schema::*;
|
||||
use proxmox_sys::fs::file_get_json;
|
||||
|
||||
use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL};
|
||||
use pbs_datastore::catalog::{ArchiveEntry, DirEntryAttribute};
|
||||
use pbs_datastore::BackupManifest;
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::accessor::ReadAt;
|
||||
use pxar::format::SignedDuration;
|
||||
use pxar::{mode, EntryKind};
|
||||
|
||||
use crate::{BackupRepository, HttpClient, HttpClientOptions};
|
||||
|
||||
@ -117,6 +128,23 @@ pub fn get_default_repository() -> Option<String> {
|
||||
std::env::var("PBS_REPOSITORY").ok()
|
||||
}
|
||||
|
||||
pub fn remove_repository_from_value(param: &mut Value) -> Result<BackupRepository, Error> {
|
||||
if let Some(url) = param
|
||||
.as_object_mut()
|
||||
.ok_or_else(|| format_err!("unable to get repository (parameter is not an object)"))?
|
||||
.remove("repository")
|
||||
{
|
||||
return url
|
||||
.as_str()
|
||||
.ok_or_else(|| format_err!("invalid repository value (must be a string)"))?
|
||||
.parse();
|
||||
}
|
||||
|
||||
get_default_repository()
|
||||
.ok_or_else(|| format_err!("unable to get default repository"))?
|
||||
.parse()
|
||||
}
|
||||
|
||||
pub fn extract_repository_from_value(param: &Value) -> Result<BackupRepository, Error> {
|
||||
let repo_url = param["repository"]
|
||||
.as_str()
|
||||
@ -337,7 +365,7 @@ pub fn complete_pxar_archive_name(arg: &str, param: &HashMap<String, String>) ->
|
||||
complete_server_file_name(arg, param)
|
||||
.iter()
|
||||
.filter_map(|name| {
|
||||
if name.ends_with(".pxar.didx") {
|
||||
if has_pxar_filename_extension(name, true) {
|
||||
Some(pbs_tools::format::strip_server_file_extension(name).to_owned())
|
||||
} else {
|
||||
None
|
||||
@ -526,3 +554,211 @@ pub fn place_xdg_file(
|
||||
.and_then(|base| base.place_config_file(file_name).map_err(Error::from))
|
||||
.with_context(|| format!("failed to place {} in xdg home", description))
|
||||
}
|
||||
|
||||
pub fn get_pxar_archive_names(
|
||||
archive_name: &str,
|
||||
manifest: &BackupManifest,
|
||||
) -> Result<(String, Option<String>), Error> {
|
||||
let (filename, ext) = match archive_name.strip_suffix(".didx") {
|
||||
Some(filename) => (filename, ".didx"),
|
||||
None => (archive_name, ""),
|
||||
};
|
||||
|
||||
// Check if archive with given extension is present
|
||||
if manifest
|
||||
.files()
|
||||
.iter()
|
||||
.any(|fileinfo| fileinfo.filename == format!("{filename}.didx"))
|
||||
{
|
||||
// check if already given as one of split archive name variants
|
||||
if let Some(base) = filename
|
||||
.strip_suffix(".mpxar")
|
||||
.or_else(|| filename.strip_suffix(".ppxar"))
|
||||
{
|
||||
return Ok((
|
||||
format!("{base}.mpxar{ext}"),
|
||||
Some(format!("{base}.ppxar{ext}")),
|
||||
));
|
||||
}
|
||||
return Ok((archive_name.to_owned(), None));
|
||||
}
|
||||
|
||||
// if not, try fallback from regular to split archive
|
||||
if let Some(base) = filename.strip_suffix(".pxar") {
|
||||
return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest);
|
||||
}
|
||||
|
||||
bail!("archive not found in manifest");
|
||||
}
|
||||
|
||||
/// Check if the given filename has a valid pxar filename extension variant
|
||||
///
|
||||
/// If `with_didx_extension` is `true`, check the additional `.didx` ending.
|
||||
pub fn has_pxar_filename_extension(name: &str, with_didx_extension: bool) -> bool {
|
||||
if with_didx_extension {
|
||||
name.ends_with(".pxar.didx")
|
||||
|| name.ends_with(".mpxar.didx")
|
||||
|| name.ends_with(".ppxar.didx")
|
||||
} else {
|
||||
name.ends_with(".pxar") || name.ends_with(".mpxar") || name.ends_with(".ppxar")
|
||||
}
|
||||
}
|
||||
|
||||
/// Decode possible format version and prelude entries before getting the root directory
|
||||
/// entry.
|
||||
///
|
||||
/// Returns the root directory entry and, if present, the prelude entry
|
||||
pub fn handle_root_with_optional_format_version_prelude<R: pxar::decoder::SeqRead>(
|
||||
decoder: &mut pxar::decoder::sync::Decoder<R>,
|
||||
) -> Result<(pxar::Entry, Option<pxar::Entry>), Error> {
|
||||
let first = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
match first.kind() {
|
||||
pxar::EntryKind::Directory => {
|
||||
let version = pxar::format::FormatVersion::Version1;
|
||||
log::debug!("pxar format version '{version:?}'");
|
||||
Ok((first, None))
|
||||
}
|
||||
pxar::EntryKind::Version(version) => {
|
||||
log::debug!("pxar format version '{version:?}'");
|
||||
let second = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
match second.kind() {
|
||||
pxar::EntryKind::Directory => Ok((second, None)),
|
||||
pxar::EntryKind::Prelude(_prelude) => {
|
||||
let third = decoder
|
||||
.next()
|
||||
.ok_or_else(|| format_err!("missing root entry"))??;
|
||||
Ok((third, Some(second)))
|
||||
}
|
||||
_ => bail!("unexpected entry kind {:?}", second.kind()),
|
||||
}
|
||||
}
|
||||
_ => bail!("unexpected entry kind {:?}", first.kind()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Raise the soft limit for open file handles to the hard limit
|
||||
///
|
||||
/// Returns the values set before raising the limit as libc::rlimit64
|
||||
pub fn raise_nofile_limit() -> Result<libc::rlimit64, Error> {
|
||||
let mut old = libc::rlimit64 {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
if 0 != unsafe { libc::getrlimit64(libc::RLIMIT_NOFILE, &mut old as *mut libc::rlimit64) } {
|
||||
bail!("Failed to get nofile rlimit");
|
||||
}
|
||||
|
||||
let mut new = libc::rlimit64 {
|
||||
rlim_cur: old.rlim_max,
|
||||
rlim_max: old.rlim_max,
|
||||
};
|
||||
if 0 != unsafe { libc::setrlimit64(libc::RLIMIT_NOFILE, &mut new as *mut libc::rlimit64) } {
|
||||
bail!("Failed to set nofile rlimit");
|
||||
}
|
||||
|
||||
Ok(old)
|
||||
}
|
||||
|
||||
/// Look up the directory entries of the given directory `path` in a pxar archive via it's given
|
||||
/// `accessor` and return the entries formatted as [`ArchiveEntry`]'s, compatible with reading
|
||||
/// entries from the catalog.
|
||||
///
|
||||
/// If the optional `path_prefix` is given, all returned entry paths will be prefixed with it.
|
||||
pub async fn pxar_metadata_catalog_lookup<T: Clone + ReadAt>(
|
||||
accessor: Accessor<T>,
|
||||
path: &OsStr,
|
||||
path_prefix: Option<&str>,
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let root = accessor.open_root().await?;
|
||||
let dir_entry = root
|
||||
.lookup(&path)
|
||||
.await
|
||||
.map_err(|err| format_err!("lookup failed - {err}"))?
|
||||
.ok_or_else(|| format_err!("lookup failed - error opening '{path:?}'"))?;
|
||||
|
||||
let mut entries = Vec::new();
|
||||
if let EntryKind::Directory = dir_entry.kind() {
|
||||
let dir_entry = dir_entry
|
||||
.enter_directory()
|
||||
.await
|
||||
.map_err(|err| format_err!("failed to enter directory - {err}"))?;
|
||||
|
||||
let mut entries_iter = dir_entry.read_dir();
|
||||
while let Some(entry) = entries_iter.next().await {
|
||||
let entry = entry?.decode_entry().await?;
|
||||
|
||||
let entry_attr = match entry.kind() {
|
||||
EntryKind::Version(_) | EntryKind::Prelude(_) | EntryKind::GoodbyeTable => continue,
|
||||
EntryKind::Directory => DirEntryAttribute::Directory {
|
||||
start: entry.entry_range_info().entry_range.start,
|
||||
},
|
||||
EntryKind::File { size, .. } => {
|
||||
let mtime = match entry.metadata().mtime_as_duration() {
|
||||
SignedDuration::Positive(val) => i64::try_from(val.as_secs())?,
|
||||
SignedDuration::Negative(val) => -i64::try_from(val.as_secs())?,
|
||||
};
|
||||
DirEntryAttribute::File { size: *size, mtime }
|
||||
}
|
||||
EntryKind::Device(_) => match entry.metadata().file_type() {
|
||||
mode::IFBLK => DirEntryAttribute::BlockDevice,
|
||||
mode::IFCHR => DirEntryAttribute::CharDevice,
|
||||
_ => bail!("encountered unknown device type"),
|
||||
},
|
||||
EntryKind::Symlink(_) => DirEntryAttribute::Symlink,
|
||||
EntryKind::Hardlink(_) => DirEntryAttribute::Hardlink,
|
||||
EntryKind::Fifo => DirEntryAttribute::Fifo,
|
||||
EntryKind::Socket => DirEntryAttribute::Socket,
|
||||
};
|
||||
|
||||
let entry_path = if let Some(prefix) = path_prefix {
|
||||
let mut entry_path = PathBuf::from(prefix);
|
||||
match entry.path().strip_prefix("/") {
|
||||
Ok(path) => entry_path.push(path),
|
||||
Err(_) => entry_path.push(entry.path()),
|
||||
}
|
||||
entry_path
|
||||
} else {
|
||||
PathBuf::from(entry.path())
|
||||
};
|
||||
entries.push(ArchiveEntry::new(
|
||||
entry_path.as_os_str().as_bytes(),
|
||||
Some(&entry_attr),
|
||||
));
|
||||
}
|
||||
} else {
|
||||
bail!(format!(
|
||||
"expected directory entry, got entry kind '{:?}'",
|
||||
dir_entry.kind()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(entries)
|
||||
}
|
||||
|
||||
/// Creates a temporary file (with `O_TMPFILE`) in `XDG_CACHE_HOME`. If we
|
||||
/// cannot create the file there it will be created in `/tmp` instead.
|
||||
pub fn create_tmp_file() -> std::io::Result<std::fs::File> {
|
||||
static TMP_PATH: OnceLock<std::path::PathBuf> = OnceLock::new();
|
||||
let tmp_path = TMP_PATH.get_or_init(|| {
|
||||
xdg::BaseDirectories::new()
|
||||
.map(|base| base.get_cache_home())
|
||||
.unwrap_or_else(|_| std::path::PathBuf::from("/tmp"))
|
||||
});
|
||||
|
||||
let mut open_opts_binding = std::fs::OpenOptions::new();
|
||||
let builder = open_opts_binding
|
||||
.write(true)
|
||||
.read(true)
|
||||
.custom_flags(libc::O_TMPFILE);
|
||||
builder.open(tmp_path).or_else(|err| {
|
||||
if tmp_path != std::path::Path::new("/tmp") {
|
||||
builder.open("/tmp")
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -4,11 +4,11 @@ version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
description = "Configuration file management for PBS"
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
const_format.workspace = true
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
nix.workspace = true
|
||||
once_cell.workspace = true
|
||||
@ -17,6 +17,7 @@ regex.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
proxmox-notify.workspace = true
|
||||
proxmox-router = { workspace = true, default-features = false }
|
||||
proxmox-schema.workspace = true
|
||||
proxmox-section-config.workspace = true
|
||||
|
@ -2,26 +2,26 @@ use std::collections::{BTreeMap, BTreeSet, HashMap};
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, LazyLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema};
|
||||
|
||||
use pbs_api_types::{Authid, Role, Userid, ROLE_NAME_NO_ACCESS};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
/// Map of pre-defined [Roles](Role) to their associated [privileges](PRIVILEGES) combination
|
||||
/// and description.
|
||||
pub static ref ROLE_NAMES: HashMap<&'static str, (u64, &'static str)> = {
|
||||
pub static ROLE_NAMES: LazyLock<HashMap<&'static str, (u64, &'static str)>> = LazyLock::new(|| {
|
||||
let mut map = HashMap::new();
|
||||
|
||||
let list = match Role::API_SCHEMA {
|
||||
Schema::String(StringSchema { format: Some(ApiStringFormat::Enum(list)), .. }) => list,
|
||||
Schema::String(StringSchema {
|
||||
format: Some(ApiStringFormat::Enum(list)),
|
||||
..
|
||||
}) => list,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@ -31,8 +31,7 @@ lazy_static! {
|
||||
}
|
||||
|
||||
map
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
pub fn split_acl_path(path: &str) -> Vec<&str> {
|
||||
let items = path.split('/');
|
||||
@ -100,7 +99,8 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> {
|
||||
return Ok(());
|
||||
}
|
||||
match components[1] {
|
||||
"certificates" | "disks" | "log" | "status" | "tasks" | "time" => {
|
||||
"certificates" | "disks" | "log" | "notifications" | "status" | "tasks"
|
||||
| "time" => {
|
||||
if components_len == 2 {
|
||||
return Ok(());
|
||||
}
|
||||
@ -721,13 +721,13 @@ pub fn cached_config() -> Result<Arc<AclTree>, Error> {
|
||||
last_mtime_nsec: i64,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CACHED_CONFIG: RwLock<ConfigCache> = RwLock::new(ConfigCache {
|
||||
static CACHED_CONFIG: LazyLock<RwLock<ConfigCache>> = LazyLock::new(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_mtime: 0,
|
||||
last_mtime_nsec: 0
|
||||
last_mtime_nsec: 0,
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let stat = match nix::sys::stat::stat(ACL_CFG_FILENAME) {
|
||||
Ok(stat) => Some(stat),
|
||||
@ -1046,14 +1046,14 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(node.users.get(&user1).is_none());
|
||||
assert!(!node.users.contains_key(&user1));
|
||||
}
|
||||
}
|
||||
for path in &user2_paths {
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(node.users.get(&user2).is_some());
|
||||
assert!(node.users.contains_key(&user2));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1063,7 +1063,7 @@ acl:1:/storage/store1:user1@pbs:DatastoreBackup
|
||||
let node = tree.find_node(path);
|
||||
assert!(node.is_some());
|
||||
if let Some(node) = node {
|
||||
assert!(node.users.get(&user2).is_none());
|
||||
assert!(!node.users.contains_key(&user2));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
//! Cached user info for fast ACL permission checks
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, LazyLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_router::UserInformation;
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
@ -26,13 +25,13 @@ struct ConfigCache {
|
||||
last_user_cache_generation: usize,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CACHED_CONFIG: RwLock<ConfigCache> = RwLock::new(ConfigCache {
|
||||
static CACHED_CONFIG: LazyLock<RwLock<ConfigCache>> = LazyLock::new(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_update: 0,
|
||||
last_user_cache_generation: 0
|
||||
last_user_cache_generation: 0,
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
impl CachedUserInfo {
|
||||
/// Returns a cached instance (up to 5 seconds old).
|
||||
@ -179,7 +178,7 @@ impl CachedUserInfo {
|
||||
(privs, propagated_privs)
|
||||
}
|
||||
|
||||
/// Checks whether the `auth_id` has any of the privilegs `privs` on any object below `path`.
|
||||
/// Checks whether the `auth_id` has any of the privileges `privs` on any object below `path`.
|
||||
pub fn any_privs_below(
|
||||
&self,
|
||||
auth_id: &Authid,
|
||||
|
@ -1,6 +1,7 @@
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_schema::{AllOfSchema, ApiType};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -9,9 +10,7 @@ use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard, ConfigVersionCache};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
const OBJ_SCHEMA: &AllOfSchema = DataStoreConfig::API_SCHEMA.unwrap_all_of_schema();
|
||||
|
@ -1,20 +1,19 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use pbs_buildcfg::configdir;
|
||||
use proxmox_schema::{ApiType, ObjectSchema};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
use pbs_api_types::{LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
|
||||
use pbs_api_types::{AdRealmConfig, LdapRealmConfig, OpenIdRealmConfig, REALM_ID_SCHEMA};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
const AD_SCHEMA: &ObjectSchema = AdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const LDAP_SCHEMA: &ObjectSchema = LdapRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
const OPENID_SCHEMA: &ObjectSchema = OpenIdRealmConfig::API_SCHEMA.unwrap_object_schema();
|
||||
|
||||
@ -33,6 +32,10 @@ fn init() -> SectionConfig {
|
||||
|
||||
config.register_plugin(plugin);
|
||||
|
||||
let plugin = SectionConfigPlugin::new("ad".to_string(), Some(String::from("realm")), AD_SCHEMA);
|
||||
|
||||
config.register_plugin(plugin);
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
@ -60,7 +63,7 @@ pub fn save_config(config: &SectionConfigData) -> Result<(), Error> {
|
||||
|
||||
/// Check if a realm with the given name exists
|
||||
pub fn exists(domains: &SectionConfigData, realm: &str) -> bool {
|
||||
realm == "pbs" || realm == "pam" || domains.sections.get(realm).is_some()
|
||||
realm == "pbs" || realm == "pam" || domains.sections.contains_key(realm)
|
||||
}
|
||||
|
||||
// shell completion helper
|
||||
@ -95,3 +98,7 @@ pub fn complete_openid_realm_name(_arg: &str, _param: &HashMap<String, String>)
|
||||
pub fn complete_ldap_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_realm_of_type("ldap")
|
||||
}
|
||||
|
||||
pub fn complete_ad_realm_name(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
|
||||
complete_realm_of_type("ad")
|
||||
}
|
||||
|
@ -12,9 +12,9 @@
|
||||
//! [SectionConfig]: proxmox::api::section_config::SectionConfig
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -23,10 +23,8 @@ use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger, VirtualTapeDrive, DRIVE_NAME_SCHEMA};
|
||||
|
||||
lazy_static! {
|
||||
/// Static [`SectionConfig`] to access parser/writer functions.
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&DRIVE_NAME_SCHEMA);
|
||||
|
@ -7,6 +7,7 @@ pub mod drive;
|
||||
pub mod media_pool;
|
||||
pub mod metrics;
|
||||
pub mod network;
|
||||
pub mod notifications;
|
||||
pub mod prune;
|
||||
pub mod remote;
|
||||
pub mod sync;
|
||||
|
@ -7,9 +7,9 @@
|
||||
//! [SectionConfig]: proxmox_section_config::SectionConfig
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -18,10 +18,8 @@ use pbs_api_types::{MediaPoolConfig, MEDIA_POOL_NAME_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
/// Static [`SectionConfig`] to access parser/writer functions.
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&MEDIA_POOL_NAME_SCHEMA);
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -10,9 +10,7 @@ use pbs_api_types::{InfluxDbHttp, InfluxDbUdp, METRIC_SERVER_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&METRIC_SERVER_ID_SCHEMA);
|
||||
|
@ -2,10 +2,10 @@ use std::collections::HashMap;
|
||||
use std::os::unix::io::{AsRawFd, FromRawFd, OwnedFd};
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use const_format::concatcp;
|
||||
use lazy_static::lazy_static;
|
||||
use nix::ioctl_read_bad;
|
||||
use nix::sys::socket::{socket, AddressFamily, SockFlag, SockType};
|
||||
use regex::Regex;
|
||||
@ -48,16 +48,14 @@ pub static IPV4_REVERSE_MASK: &[&str] = &[
|
||||
"255.255.255.255",
|
||||
];
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IPV4_MASK_HASH_LOCALNET: HashMap<&'static str, u8> = {
|
||||
pub static IPV4_MASK_HASH_LOCALNET: LazyLock<HashMap<&'static str, u8>> = LazyLock::new(|| {
|
||||
let mut map = HashMap::new();
|
||||
#[allow(clippy::needless_range_loop)]
|
||||
for i in 0..IPV4_REVERSE_MASK.len() {
|
||||
map.insert(IPV4_REVERSE_MASK[i], i as u8);
|
||||
}
|
||||
map
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
pub fn parse_cidr(cidr: &str) -> Result<(String, u8, bool), Error> {
|
||||
let (address, mask, is_v6) = parse_address_or_cidr(cidr)?;
|
||||
@ -92,12 +90,10 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
|
||||
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option<u8>, bool), Error> {
|
||||
// NOTE: This is NOT the same regex as in proxmox-schema as this one has capture groups for
|
||||
// the addresses vs cidr portions!
|
||||
lazy_static! {
|
||||
pub static ref CIDR_V4_REGEX: Regex =
|
||||
Regex::new(concatcp!(r"^(", IPV4RE_STR, r")(?:/(\d{1,2}))?$")).unwrap();
|
||||
pub static ref CIDR_V6_REGEX: Regex =
|
||||
Regex::new(concatcp!(r"^(", IPV6RE_STR, r")(?:/(\d{1,3}))?$")).unwrap();
|
||||
}
|
||||
pub static CIDR_V4_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(concatcp!(r"^(", IPV4RE_STR, r")(?:/(\d{1,2}))?$")).unwrap());
|
||||
pub static CIDR_V6_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(concatcp!(r"^(", IPV6RE_STR, r")(?:/(\d{1,3}))?$")).unwrap());
|
||||
|
||||
if let Some(caps) = CIDR_V4_REGEX.captures(cidr) {
|
||||
let address = &caps[1];
|
||||
@ -133,9 +129,9 @@ pub fn get_network_interfaces() -> Result<HashMap<String, bool>, Error> {
|
||||
|
||||
ioctl_read_bad!(get_interface_flags, libc::SIOCGIFFLAGS, ifreq);
|
||||
|
||||
lazy_static! {
|
||||
static ref IFACE_LINE_REGEX: Regex = Regex::new(r"^\s*([^:\s]+):").unwrap();
|
||||
}
|
||||
static IFACE_LINE_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"^\s*([^:\s]+):").unwrap());
|
||||
|
||||
let raw = std::fs::read_to_string(PROC_NET_DEV)
|
||||
.map_err(|err| format_err!("unable to read {} - {}", PROC_NET_DEV, err))?;
|
||||
|
||||
|
@ -1,8 +1,7 @@
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use std::io::BufRead;
|
||||
use std::iter::Iterator;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub enum Token {
|
||||
@ -24,6 +23,8 @@ pub enum Token {
|
||||
MTU,
|
||||
BridgePorts,
|
||||
BridgeVlanAware,
|
||||
VlanId,
|
||||
VlanRawDevice,
|
||||
BondSlaves,
|
||||
BondMode,
|
||||
BondPrimary,
|
||||
@ -31,8 +32,7 @@ pub enum Token {
|
||||
EOF,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref KEYWORDS: HashMap<&'static str, Token> = {
|
||||
static KEYWORDS: LazyLock<HashMap<&'static str, Token>> = LazyLock::new(|| {
|
||||
let mut map = HashMap::new();
|
||||
map.insert("address", Token::Address);
|
||||
map.insert("auto", Token::Auto);
|
||||
@ -50,6 +50,10 @@ lazy_static! {
|
||||
map.insert("bridge_ports", Token::BridgePorts);
|
||||
map.insert("bridge-vlan-aware", Token::BridgeVlanAware);
|
||||
map.insert("bridge_vlan_aware", Token::BridgeVlanAware);
|
||||
map.insert("vlan-id", Token::VlanId);
|
||||
map.insert("vlan_id", Token::VlanId);
|
||||
map.insert("vlan-raw-device", Token::VlanRawDevice);
|
||||
map.insert("vlan_raw_device", Token::VlanRawDevice);
|
||||
map.insert("bond-slaves", Token::BondSlaves);
|
||||
map.insert("bond_slaves", Token::BondSlaves);
|
||||
map.insert("bond-mode", Token::BondMode);
|
||||
@ -58,8 +62,7 @@ lazy_static! {
|
||||
map.insert("bond_xmit_hash_policy", Token::BondXmitHashPolicy);
|
||||
map.insert("bond-xmit-hash-policy", Token::BondXmitHashPolicy);
|
||||
map
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
pub struct Lexer<R> {
|
||||
input: R,
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::io::Write;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
use serde::de::{value, Deserialize, IntoDeserializer};
|
||||
|
||||
@ -23,9 +23,11 @@ use pbs_api_types::{
|
||||
|
||||
use crate::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
static ref PHYSICAL_NIC_REGEX: Regex = Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap();
|
||||
}
|
||||
static PHYSICAL_NIC_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"^(?:eth\d+|en[^:.]+|ib\d+)$").unwrap());
|
||||
static VLAN_INTERFACE_REGEX: LazyLock<Regex> = LazyLock::new(|| {
|
||||
Regex::new(r"^(?P<vlan_raw_device>\S+)\.(?P<vlan_id>\d+)|vlan(?P<vlan_id2>\d+)$").unwrap()
|
||||
});
|
||||
|
||||
pub fn is_physical_nic(iface: &str) -> bool {
|
||||
PHYSICAL_NIC_REGEX.is_match(iface)
|
||||
@ -41,6 +43,21 @@ pub fn bond_xmit_hash_policy_from_str(s: &str) -> Result<BondXmitHashPolicy, Err
|
||||
.map_err(|_: value::Error| format_err!("invalid bond_xmit_hash_policy '{}'", s))
|
||||
}
|
||||
|
||||
pub fn parse_vlan_id_from_name(iface_name: &str) -> Option<u16> {
|
||||
VLAN_INTERFACE_REGEX.captures(iface_name).and_then(|cap| {
|
||||
cap.name("vlan_id")
|
||||
.or(cap.name("vlan_id2"))
|
||||
.and_then(|id| id.as_str().parse::<u16>().ok())
|
||||
})
|
||||
}
|
||||
|
||||
pub fn parse_vlan_raw_device_from_name(iface_name: &str) -> Option<&str> {
|
||||
VLAN_INTERFACE_REGEX
|
||||
.captures(iface_name)
|
||||
.and_then(|cap| cap.name("vlan_raw_device"))
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
// Write attributes not depending on address family
|
||||
fn write_iface_attributes(iface: &Interface, w: &mut dyn Write) -> Result<(), Error> {
|
||||
static EMPTY_LIST: Vec<String> = Vec::new();
|
||||
@ -79,6 +96,14 @@ fn write_iface_attributes(iface: &Interface, w: &mut dyn Write) -> Result<(), Er
|
||||
writeln!(w, "\tbond-slaves {}", slaves.join(" "))?;
|
||||
}
|
||||
}
|
||||
NetworkInterfaceType::Vlan => {
|
||||
if let Some(vlan_id) = iface.vlan_id {
|
||||
writeln!(w, "\tvlan-id {vlan_id}")?;
|
||||
}
|
||||
if let Some(vlan_raw_device) = &iface.vlan_raw_device {
|
||||
writeln!(w, "\tvlan-raw-device {vlan_raw_device}")?;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@ -243,7 +268,7 @@ impl NetworkConfig {
|
||||
}
|
||||
|
||||
/// Check if ports are used only once
|
||||
pub fn check_port_usage(&self) -> Result<(), Error> {
|
||||
fn check_port_usage(&self) -> Result<(), Error> {
|
||||
let mut used_ports = HashMap::new();
|
||||
let mut check_port_usage = |iface, ports: &Vec<String>| {
|
||||
for port in ports.iter() {
|
||||
@ -272,7 +297,7 @@ impl NetworkConfig {
|
||||
}
|
||||
|
||||
/// Check if child mtu is less or equal than parent mtu
|
||||
pub fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> {
|
||||
fn check_mtu(&self, parent_name: &str, child_name: &str) -> Result<(), Error> {
|
||||
let parent = self
|
||||
.interfaces
|
||||
.get(parent_name)
|
||||
@ -312,7 +337,7 @@ impl NetworkConfig {
|
||||
}
|
||||
|
||||
/// Check if bond slaves exists
|
||||
pub fn check_bond_slaves(&self) -> Result<(), Error> {
|
||||
fn check_bond_slaves(&self) -> Result<(), Error> {
|
||||
for (iface, interface) in self.interfaces.iter() {
|
||||
if let Some(slaves) = &interface.slaves {
|
||||
for slave in slaves.iter() {
|
||||
@ -340,10 +365,9 @@ impl NetworkConfig {
|
||||
}
|
||||
|
||||
/// Check if bridge ports exists
|
||||
pub fn check_bridge_ports(&self) -> Result<(), Error> {
|
||||
lazy_static! {
|
||||
static ref VLAN_INTERFACE_REGEX: Regex = Regex::new(r"^(\S+)\.(\d+)$").unwrap();
|
||||
}
|
||||
fn check_bridge_ports(&self) -> Result<(), Error> {
|
||||
static VLAN_INTERFACE_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"^(\S+)\.(\d+)$").unwrap());
|
||||
|
||||
for (iface, interface) in self.interfaces.iter() {
|
||||
if let Some(ports) = &interface.bridge_ports {
|
||||
@ -364,7 +388,7 @@ impl NetworkConfig {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_config(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||
fn write_config(&self, w: &mut dyn Write) -> Result<(), Error> {
|
||||
self.check_port_usage()?;
|
||||
self.check_bond_slaves()?;
|
||||
self.check_bridge_ports()?;
|
||||
@ -505,148 +529,159 @@ pub fn complete_port_list(arg: &str, _param: &HashMap<String, String>) -> Vec<St
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use NetworkConfigMethod::*;
|
||||
use NetworkInterfaceType::*;
|
||||
use NetworkOrderEntry::*;
|
||||
|
||||
#[test]
|
||||
fn test_network_config_create_lo_1() -> Result<(), Error> {
|
||||
let input = "";
|
||||
fn test_write_network_config_manual() {
|
||||
let iface_name = String::from("enp3s0");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Eth;
|
||||
iface.method = Some(Manual);
|
||||
iface.active = true;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "auto lo\niface lo inet loopback\n\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
// run again using output as input
|
||||
let mut parser = NetworkParser::new(output.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
r#"iface enp3s0 inet manual"#
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_create_lo_2() -> Result<(), Error> {
|
||||
let input = "#c1\n\n#c2\n\niface test inet manual\n";
|
||||
fn test_write_network_config_static() {
|
||||
let iface_name = String::from("enp3s0");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Eth;
|
||||
iface.method = Some(Static);
|
||||
iface.cidr = Some(String::from("10.0.0.100/16"));
|
||||
iface.active = true;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
// Note: loopback should be added in front of other interfaces
|
||||
let expected = "#c1\n#c2\n\nauto lo\niface lo inet loopback\n\niface test inet manual\n\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
r#"
|
||||
iface enp3s0 inet static
|
||||
address 10.0.0.100/16"#
|
||||
.to_string()
|
||||
.trim()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_1() -> Result<(), Error> {
|
||||
let input = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
# comment\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
iface ens21 inet manual\n\
|
||||
iface ens22 inet manual\n";
|
||||
fn test_write_network_config_static_with_gateway() {
|
||||
let iface_name = String::from("enp3s0");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Eth;
|
||||
iface.method = Some(Static);
|
||||
iface.cidr = Some(String::from("10.0.0.100/16"));
|
||||
iface.gateway = Some(String::from("10.0.0.1"));
|
||||
iface.active = true;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
#comment\n\
|
||||
\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
\n\
|
||||
iface ens21 inet manual\n\
|
||||
\n\
|
||||
iface ens22 inet manual\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
r#"
|
||||
iface enp3s0 inet static
|
||||
address 10.0.0.100/16
|
||||
gateway 10.0.0.1"#
|
||||
.to_string()
|
||||
.trim()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_2() -> Result<(), Error> {
|
||||
// Adapted from bug 2926
|
||||
let input = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n";
|
||||
fn test_write_network_config_vlan_id_in_name() {
|
||||
let iface_name = String::from("vmbr0.100");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Vlan;
|
||||
iface.method = Some(Manual);
|
||||
iface.active = true;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
"iface vmbr0.100 inet manual"
|
||||
);
|
||||
}
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
#[test]
|
||||
fn test_write_network_config_vlan_with_raw_device() {
|
||||
let iface_name = String::from("vlan100");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Vlan;
|
||||
iface.vlan_raw_device = Some(String::from("vmbr0"));
|
||||
iface.method = Some(Manual);
|
||||
iface.active = true;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
r#"
|
||||
iface vlan100 inet manual
|
||||
vlan-raw-device vmbr0"#
|
||||
.trim()
|
||||
);
|
||||
}
|
||||
|
||||
let expected = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
#[test]
|
||||
fn test_write_network_config_vlan_with_individual_name() {
|
||||
let iface_name = String::from("individual_name");
|
||||
let mut iface = Interface::new(iface_name.clone());
|
||||
iface.interface_type = Vlan;
|
||||
iface.vlan_raw_device = Some(String::from("vmbr0"));
|
||||
iface.vlan_id = Some(100);
|
||||
iface.method = Some(Manual);
|
||||
iface.active = true;
|
||||
|
||||
Ok(())
|
||||
let nw_config = NetworkConfig {
|
||||
interfaces: BTreeMap::from([(iface_name.clone(), iface)]),
|
||||
order: vec![Iface(iface_name.clone())],
|
||||
};
|
||||
assert_eq!(
|
||||
String::try_from(nw_config).unwrap().trim(),
|
||||
r#"
|
||||
iface individual_name inet manual
|
||||
vlan-id 100
|
||||
vlan-raw-device vmbr0"#
|
||||
.trim()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vlan_parse_vlan_id_from_name() {
|
||||
assert_eq!(parse_vlan_id_from_name("vlan100"), Some(100));
|
||||
assert_eq!(parse_vlan_id_from_name("vlan"), None);
|
||||
assert_eq!(parse_vlan_id_from_name("arbitrary"), None);
|
||||
assert_eq!(parse_vlan_id_from_name("vmbr0.100"), Some(100));
|
||||
assert_eq!(parse_vlan_id_from_name("vmbr0"), None);
|
||||
// assert_eq!(parse_vlan_id_from_name("vmbr0.1.400"), Some(400)); // NOTE ifupdown2 does actually support this
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_vlan_parse_vlan_raw_device_from_name() {
|
||||
assert_eq!(parse_vlan_raw_device_from_name("vlan100"), None);
|
||||
assert_eq!(parse_vlan_raw_device_from_name("arbitrary"), None);
|
||||
assert_eq!(parse_vlan_raw_device_from_name("vmbr0"), None);
|
||||
assert_eq!(parse_vlan_raw_device_from_name("vmbr0.200"), Some("vmbr0"));
|
||||
}
|
||||
}
|
||||
|
@ -1,9 +1,11 @@
|
||||
use crate::network::VLAN_INTERFACE_REGEX;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::BufRead;
|
||||
use std::iter::{Iterator, Peekable};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use regex::Regex;
|
||||
|
||||
use super::helper::*;
|
||||
@ -361,6 +363,20 @@ impl<R: BufRead> NetworkParser<R> {
|
||||
interface.bond_xmit_hash_policy = Some(policy);
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::VlanId => {
|
||||
self.eat(Token::VlanId)?;
|
||||
let vlan_id = self.next_text()?.parse()?;
|
||||
interface.vlan_id = Some(vlan_id);
|
||||
set_interface_type(interface, NetworkInterfaceType::Vlan)?;
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
Token::VlanRawDevice => {
|
||||
self.eat(Token::VlanRawDevice)?;
|
||||
let vlan_raw_device = self.next_text()?;
|
||||
interface.vlan_raw_device = Some(vlan_raw_device);
|
||||
set_interface_type(interface, NetworkInterfaceType::Vlan)?;
|
||||
self.eat(Token::Newline)?;
|
||||
}
|
||||
_ => {
|
||||
// parse addon attributes
|
||||
let option = self.parse_to_eol()?;
|
||||
@ -473,11 +489,11 @@ impl<R: BufRead> NetworkParser<R> {
|
||||
&mut self,
|
||||
existing_interfaces: Option<&HashMap<String, bool>>,
|
||||
) -> Result<NetworkConfig, Error> {
|
||||
self._parse_interfaces(existing_interfaces)
|
||||
self.do_parse_interfaces(existing_interfaces)
|
||||
.map_err(|err| format_err!("line {}: {}", self.line_nr, err))
|
||||
}
|
||||
|
||||
pub fn _parse_interfaces(
|
||||
fn do_parse_interfaces(
|
||||
&mut self,
|
||||
existing_interfaces: Option<&HashMap<String, bool>>,
|
||||
) -> Result<NetworkConfig, Error> {
|
||||
@ -520,10 +536,8 @@ impl<R: BufRead> NetworkParser<R> {
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref INTERFACE_ALIAS_REGEX: Regex = Regex::new(r"^\S+:\d+$").unwrap();
|
||||
static ref VLAN_INTERFACE_REGEX: Regex = Regex::new(r"^\S+\.\d+$").unwrap();
|
||||
}
|
||||
static INTERFACE_ALIAS_REGEX: LazyLock<Regex> =
|
||||
LazyLock::new(|| Regex::new(r"^\S+:\d+$").unwrap());
|
||||
|
||||
if let Some(existing_interfaces) = existing_interfaces {
|
||||
for (iface, active) in existing_interfaces.iter() {
|
||||
@ -570,7 +584,7 @@ impl<R: BufRead> NetworkParser<R> {
|
||||
}
|
||||
}
|
||||
|
||||
if config.interfaces.get("lo").is_none() {
|
||||
if !config.interfaces.contains_key("lo") {
|
||||
let mut interface = Interface::new(String::from("lo"));
|
||||
set_method_v4(&mut interface, NetworkConfigMethod::Loopback)?;
|
||||
interface.interface_type = NetworkInterfaceType::Loopback;
|
||||
@ -602,3 +616,231 @@ impl<R: BufRead> NetworkParser<R> {
|
||||
Ok(config)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_network_config_create_lo_1() -> Result<(), Error> {
|
||||
let input = "";
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "auto lo\niface lo inet loopback\n\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
// run again using output as input
|
||||
let mut parser = NetworkParser::new(output.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_create_lo_2() -> Result<(), Error> {
|
||||
let input = "#c1\n\n#c2\n\niface test inet manual\n";
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
// Note: loopback should be added in front of other interfaces
|
||||
let expected = "#c1\n#c2\n\nauto lo\niface lo inet loopback\n\niface test inet manual\n\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_1() -> Result<(), Error> {
|
||||
let input = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
# comment\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
iface ens21 inet manual\n\
|
||||
iface ens22 inet manual\n";
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto ens18\n\
|
||||
iface ens18 inet static\n\
|
||||
\taddress 192.168.20.144/20\n\
|
||||
\tgateway 192.168.16.1\n\
|
||||
#comment\n\
|
||||
\n\
|
||||
iface ens20 inet static\n\
|
||||
\taddress 192.168.20.145/20\n\
|
||||
\n\
|
||||
iface ens21 inet manual\n\
|
||||
\n\
|
||||
iface ens22 inet manual\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_no_blank_2() -> Result<(), Error> {
|
||||
// Adapted from bug 2926
|
||||
let input = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n";
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
|
||||
let config = parser.parse_interfaces(None)?;
|
||||
|
||||
let output = String::try_from(config)?;
|
||||
|
||||
let expected = "### Hetzner Online GmbH installimage\n\
|
||||
\n\
|
||||
source /etc/network/interfaces.d/*\n\
|
||||
\n\
|
||||
auto lo\n\
|
||||
iface lo inet loopback\n\
|
||||
\n\
|
||||
iface lo inet6 loopback\n\
|
||||
\n\
|
||||
auto enp4s0\n\
|
||||
iface enp4s0 inet static\n\
|
||||
\taddress 10.10.10.10/24\n\
|
||||
\tgateway 10.10.10.1\n\
|
||||
\t# route 10.10.20.10/24 via 10.10.20.1\n\
|
||||
\tup route add -net 10.10.20.10 netmask 255.255.255.0 gw 10.10.20.1 dev enp4s0\n\
|
||||
\n\
|
||||
iface enp4s0 inet6 static\n\
|
||||
\taddress fe80::5496:35ff:fe99:5a6a/64\n\
|
||||
\tgateway fe80::1\n\
|
||||
\n";
|
||||
assert_eq!(output, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_vlan_id_in_name() {
|
||||
let input = "iface vmbr0.100 inet static manual";
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let config = parser.parse_interfaces(None).unwrap();
|
||||
|
||||
let iface = config.interfaces.get("vmbr0.100").unwrap();
|
||||
assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan);
|
||||
assert_eq!(iface.vlan_raw_device, None);
|
||||
assert_eq!(iface.vlan_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_vlan_with_raw_device() {
|
||||
let input = r#"
|
||||
iface vlan100 inet manual
|
||||
vlan-raw-device vmbr0"#;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let config = parser.parse_interfaces(None).unwrap();
|
||||
|
||||
let iface = config.interfaces.get("vlan100").unwrap();
|
||||
assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan);
|
||||
assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0")));
|
||||
assert_eq!(iface.vlan_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_vlan_with_raw_device_static() {
|
||||
let input = r#"
|
||||
iface vlan100 inet static
|
||||
vlan-raw-device vmbr0
|
||||
address 10.0.0.100/16"#;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let config = parser.parse_interfaces(None).unwrap();
|
||||
|
||||
let iface = config.interfaces.get("vlan100").unwrap();
|
||||
assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan);
|
||||
assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0")));
|
||||
assert_eq!(iface.vlan_id, None);
|
||||
assert_eq!(iface.method, Some(NetworkConfigMethod::Static));
|
||||
assert_eq!(iface.cidr, Some(String::from("10.0.0.100/16")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_vlan_individual_name() {
|
||||
let input = r#"
|
||||
iface individual_name inet manual
|
||||
vlan-id 100
|
||||
vlan-raw-device vmbr0"#;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let config = parser.parse_interfaces(None).unwrap();
|
||||
|
||||
let iface = config.interfaces.get("individual_name").unwrap();
|
||||
assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan);
|
||||
assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0")));
|
||||
assert_eq!(iface.vlan_id, Some(100));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_network_config_parser_vlan_individual_name_static() {
|
||||
let input = r#"
|
||||
iface individual_name inet static
|
||||
vlan-id 100
|
||||
vlan-raw-device vmbr0
|
||||
address 10.0.0.100/16
|
||||
"#;
|
||||
|
||||
let mut parser = NetworkParser::new(input.as_bytes());
|
||||
let config = parser.parse_interfaces(None).unwrap();
|
||||
|
||||
let iface = config.interfaces.get("individual_name").unwrap();
|
||||
assert_eq!(iface.interface_type, NetworkInterfaceType::Vlan);
|
||||
assert_eq!(iface.vlan_raw_device, Some(String::from("vmbr0")));
|
||||
assert_eq!(iface.vlan_id, Some(100));
|
||||
assert_eq!(iface.method, Some(NetworkConfigMethod::Static));
|
||||
assert_eq!(iface.cidr, Some(String::from("10.0.0.100/16")));
|
||||
}
|
||||
}
|
||||
|
41
pbs-config/src/notifications.rs
Normal file
41
pbs-config/src/notifications.rs
Normal file
@ -0,0 +1,41 @@
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_notify::Config;
|
||||
|
||||
use pbs_buildcfg::configdir;
|
||||
|
||||
use crate::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
/// Configuration file location for notification targets/matchers.
|
||||
pub const NOTIFICATION_CONFIG_PATH: &str = configdir!("/notifications.cfg");
|
||||
|
||||
/// Private configuration file location for secrets - only readable by `root`.
|
||||
pub const NOTIFICATION_PRIV_CONFIG_PATH: &str = configdir!("/notifications-priv.cfg");
|
||||
|
||||
/// Lockfile to prevent concurrent write access.
|
||||
pub const NOTIFICATION_LOCK_FILE: &str = configdir!("/.notifications.lck");
|
||||
|
||||
/// Get exclusive lock for `notifications.cfg`
|
||||
pub fn lock_config() -> Result<BackupLockGuard, Error> {
|
||||
open_backup_lockfile(NOTIFICATION_LOCK_FILE, None, true)
|
||||
}
|
||||
|
||||
/// Load notification config.
|
||||
pub fn config() -> Result<Config, Error> {
|
||||
let content =
|
||||
proxmox_sys::fs::file_read_optional_string(NOTIFICATION_CONFIG_PATH)?.unwrap_or_default();
|
||||
|
||||
let priv_content = proxmox_sys::fs::file_read_optional_string(NOTIFICATION_PRIV_CONFIG_PATH)?
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(Config::new(&content, &priv_content)?)
|
||||
}
|
||||
|
||||
/// Save notification config.
|
||||
pub fn save_config(config: Config) -> Result<(), Error> {
|
||||
let (cfg, priv_cfg) = config.write()?;
|
||||
crate::replace_backup_config(NOTIFICATION_CONFIG_PATH, cfg.as_bytes())?;
|
||||
crate::replace_secret_config(NOTIFICATION_PRIV_CONFIG_PATH, priv_cfg.as_bytes())?;
|
||||
|
||||
Ok(())
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -10,9 +10,7 @@ use pbs_api_types::{PruneJobConfig, JOB_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
const OBJ_SCHEMA: &AllOfSchema = PruneJobConfig::API_SCHEMA.unwrap_all_of_schema();
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -10,9 +10,7 @@ use pbs_api_types::{Remote, REMOTE_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match Remote::API_SCHEMA {
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::{ApiType, Schema};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -10,9 +10,7 @@ use pbs_api_types::{SyncJobConfig, JOB_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match SyncJobConfig::API_SCHEMA {
|
||||
|
@ -1,6 +1,6 @@
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use proxmox_schema::{ApiType, Schema};
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -9,9 +9,7 @@ use pbs_api_types::{TapeBackupJobConfig, JOB_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match TapeBackupJobConfig::API_SCHEMA {
|
||||
|
@ -1,8 +1,8 @@
|
||||
//! Traffic Control Settings (Network rate limits)
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::{ApiType, Schema};
|
||||
|
||||
@ -13,10 +13,8 @@ use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlug
|
||||
use crate::ConfigVersionCache;
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
/// Static [`SectionConfig`] to access parser/writer functions.
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&TRAFFIC_CONTROL_ID_SCHEMA);
|
||||
|
@ -1,8 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::{Arc, LazyLock, RwLock};
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -13,9 +12,7 @@ use crate::ConfigVersionCache;
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let mut config = SectionConfig::new(&Authid::API_SCHEMA);
|
||||
@ -57,7 +54,7 @@ pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
|
||||
let digest = openssl::sha::sha256(content.as_bytes());
|
||||
let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?;
|
||||
|
||||
if data.sections.get("root@pam").is_none() {
|
||||
if !data.sections.contains_key("root@pam") {
|
||||
let user: User = User {
|
||||
userid: Userid::root_userid().clone(),
|
||||
comment: Some("Superuser".to_string()),
|
||||
@ -80,13 +77,13 @@ pub fn cached_config() -> Result<Arc<SectionConfigData>, Error> {
|
||||
last_mtime_nsec: i64,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref CACHED_CONFIG: RwLock<ConfigCache> = RwLock::new(ConfigCache {
|
||||
static CACHED_CONFIG: LazyLock<RwLock<ConfigCache>> = LazyLock::new(|| {
|
||||
RwLock::new(ConfigCache {
|
||||
data: None,
|
||||
last_mtime: 0,
|
||||
last_mtime_nsec: 0
|
||||
last_mtime_nsec: 0,
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
let stat = match nix::sys::stat::stat(USER_CFG_FILENAME) {
|
||||
Ok(stat) => Some(stat),
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use anyhow::Error;
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use proxmox_schema::*;
|
||||
use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin};
|
||||
@ -10,9 +10,7 @@ use pbs_api_types::{VerificationJobConfig, JOB_ID_SCHEMA};
|
||||
|
||||
use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref CONFIG: SectionConfig = init();
|
||||
}
|
||||
pub static CONFIG: LazyLock<SectionConfig> = LazyLock::new(init);
|
||||
|
||||
fn init() -> SectionConfig {
|
||||
let obj_schema = match VerificationJobConfig::API_SCHEMA {
|
||||
|
@ -4,6 +4,7 @@ version = "0.1.0"
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
description = "low level pbs data storage access"
|
||||
rust-version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
@ -12,7 +13,6 @@ crc32fast.workspace = true
|
||||
endian_trait.workspace = true
|
||||
futures.workspace = true
|
||||
hex = { workspace = true, features = [ "serde" ] }
|
||||
lazy_static.workspace = true
|
||||
libc.workspace = true
|
||||
log.workspace = true
|
||||
nix.workspace = true
|
||||
@ -20,21 +20,24 @@ openssl.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio = { workspace = true, features = [] }
|
||||
tracing.workspace = true
|
||||
walkdir.workspace = true
|
||||
zstd.workspace = true
|
||||
zstd-safe.workspace = true
|
||||
|
||||
pathpatterns.workspace = true
|
||||
pxar.workspace = true
|
||||
|
||||
proxmox-borrow.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace=true
|
||||
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
|
||||
proxmox-serde = { workspace = true, features = [ "serde_json" ] }
|
||||
proxmox-sys.workspace = true
|
||||
proxmox-time.workspace = true
|
||||
proxmox-uuid.workspace = true
|
||||
proxmox-sys.workspace = true
|
||||
proxmox-worker-task.workspace = true
|
||||
|
||||
pbs-api-types.workspace = true
|
||||
pbs-buildcfg.workspace = true
|
||||
|
@ -415,7 +415,9 @@ impl BackupDir {
|
||||
|
||||
/// Returns the absolute path for backup_dir, using the cached formatted time string.
|
||||
pub fn full_path(&self) -> PathBuf {
|
||||
self.store.snapshot_path(&self.ns, &self.dir)
|
||||
let mut path = self.store.base_path();
|
||||
path.push(self.relative_path());
|
||||
path
|
||||
}
|
||||
|
||||
pub fn protected_file(&self) -> PathBuf {
|
||||
|
@ -10,7 +10,6 @@ use anyhow::Error;
|
||||
use futures::ready;
|
||||
use tokio::io::{AsyncRead, AsyncSeek, ReadBuf};
|
||||
|
||||
use proxmox_lang::error::io_err_other;
|
||||
use proxmox_lang::io_format_err;
|
||||
|
||||
use pbs_tools::async_lru_cache::{AsyncCacher, AsyncLruCache};
|
||||
@ -182,7 +181,7 @@ where
|
||||
this.position += read as u64;
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(io_err_other(err)),
|
||||
Err(err) => Err(std::io::Error::other(err)),
|
||||
};
|
||||
|
||||
// future completed, drop
|
||||
|
@ -3,6 +3,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use tracing::info;
|
||||
|
||||
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
||||
use proxmox_io::ReadExt;
|
||||
@ -10,8 +11,7 @@ use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateO
|
||||
use proxmox_sys::process_locker::{
|
||||
ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker,
|
||||
};
|
||||
use proxmox_sys::task_log;
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
use proxmox_worker_task::WorkerTaskContext;
|
||||
|
||||
use crate::file_formats::{
|
||||
COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
|
||||
@ -92,7 +92,6 @@ impl ChunkStore {
|
||||
path: P,
|
||||
uid: nix::unistd::Uid,
|
||||
gid: nix::unistd::Gid,
|
||||
worker: Option<&dyn WorkerTaskContext>,
|
||||
sync_level: DatastoreFSyncLevel,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
@ -143,9 +142,7 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i * 100) / (64 * 1024);
|
||||
if percentage != last_percentage {
|
||||
if let Some(worker) = worker {
|
||||
task_log!(worker, "Chunkstore create: {}%", percentage)
|
||||
}
|
||||
info!("Chunkstore create: {percentage}%");
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -251,8 +248,9 @@ impl ChunkStore {
|
||||
pub fn get_chunk_iterator(
|
||||
&self,
|
||||
) -> Result<
|
||||
impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>
|
||||
+ std::iter::FusedIterator,
|
||||
impl std::iter::FusedIterator<
|
||||
Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool),
|
||||
>,
|
||||
Error,
|
||||
> {
|
||||
// unwrap: only `None` in unit tests
|
||||
@ -374,7 +372,7 @@ impl ChunkStore {
|
||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
task_log!(worker, "processed {}% ({} chunks)", percentage, chunk_count,);
|
||||
info!("processed {percentage}% ({chunk_count} chunks)");
|
||||
}
|
||||
|
||||
worker.check_abort()?;
|
||||
@ -578,15 +576,8 @@ fn test_chunk_store1() {
|
||||
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let chunk_store = ChunkStore::create(
|
||||
"test",
|
||||
&path,
|
||||
user.uid,
|
||||
user.gid,
|
||||
None,
|
||||
DatastoreFSyncLevel::None,
|
||||
)
|
||||
.unwrap();
|
||||
let chunk_store =
|
||||
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None).unwrap();
|
||||
|
||||
let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8])
|
||||
.build()
|
||||
@ -598,14 +589,8 @@ fn test_chunk_store1() {
|
||||
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
|
||||
assert!(exists);
|
||||
|
||||
let chunk_store = ChunkStore::create(
|
||||
"test",
|
||||
&path,
|
||||
user.uid,
|
||||
user.gid,
|
||||
None,
|
||||
DatastoreFSyncLevel::None,
|
||||
);
|
||||
let chunk_store =
|
||||
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None);
|
||||
assert!(chunk_store.is_err());
|
||||
|
||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||
|
@ -1,3 +1,5 @@
|
||||
use std::sync::mpsc::Receiver;
|
||||
|
||||
/// Note: window size 32 or 64, is faster because we can
|
||||
/// speedup modulo operations, but always computes hash 0
|
||||
/// for constant data streams .. 0,0,0,0,0,0
|
||||
@ -5,6 +7,20 @@
|
||||
/// use hash value 0 to detect a boundary.
|
||||
const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||
|
||||
/// Additional context for chunker to find possible boundaries in payload streams
|
||||
#[derive(Default)]
|
||||
pub struct Context {
|
||||
/// Already consumed bytes of the chunk stream consumer
|
||||
pub base: u64,
|
||||
/// Total size currently buffered
|
||||
pub total: u64,
|
||||
}
|
||||
|
||||
pub trait Chunker {
|
||||
fn scan(&mut self, data: &[u8], ctx: &Context) -> usize;
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
/// Sliding window chunker (Buzhash)
|
||||
///
|
||||
/// This is a rewrite of *casync* chunker (cachunker.h) in rust.
|
||||
@ -15,7 +31,7 @@ const CA_CHUNKER_WINDOW_SIZE: usize = 64;
|
||||
/// Hash](https://en.wikipedia.org/wiki/Rolling_hash) article from
|
||||
/// Wikipedia.
|
||||
|
||||
pub struct Chunker {
|
||||
pub struct ChunkerImpl {
|
||||
h: u32,
|
||||
window_size: usize,
|
||||
chunk_size: usize,
|
||||
@ -32,6 +48,16 @@ pub struct Chunker {
|
||||
window: [u8; CA_CHUNKER_WINDOW_SIZE],
|
||||
}
|
||||
|
||||
/// Sliding window chunker (Buzhash) with boundary suggestions
|
||||
///
|
||||
/// Suggest to chunk at a given boundary instead of the regular chunk boundary for better alignment
|
||||
/// with file payload boundaries.
|
||||
pub struct PayloadChunker {
|
||||
chunker: ChunkerImpl,
|
||||
current_suggested: Option<u64>,
|
||||
suggested_boundaries: Receiver<u64>,
|
||||
}
|
||||
|
||||
const BUZHASH_TABLE: [u32; 256] = [
|
||||
0x458be752, 0xc10748cc, 0xfbbcdbb8, 0x6ded5b68, 0xb10a82b5, 0x20d75648, 0xdfc5665f, 0xa8428801,
|
||||
0x7ebf5191, 0x841135c7, 0x65cc53b3, 0x280a597c, 0x16f60255, 0xc78cbc3e, 0x294415f5, 0xb938d494,
|
||||
@ -67,7 +93,7 @@ const BUZHASH_TABLE: [u32; 256] = [
|
||||
0x5eff22f4, 0x6027f4cc, 0x77178b3c, 0xae507131, 0x7bf7cabc, 0xf9c18d66, 0x593ade65, 0xd95ddf11,
|
||||
];
|
||||
|
||||
impl Chunker {
|
||||
impl ChunkerImpl {
|
||||
/// Create a new Chunker instance, which produces and average
|
||||
/// chunk size of `chunk_size_avg` (need to be a power of two). We
|
||||
/// allow variation from `chunk_size_avg/4` up to a maximum of
|
||||
@ -105,11 +131,44 @@ impl Chunker {
|
||||
}
|
||||
}
|
||||
|
||||
// fast implementation avoiding modulo
|
||||
// #[inline(always)]
|
||||
fn shall_break(&self) -> bool {
|
||||
if self.chunk_size >= self.chunk_size_max {
|
||||
return true;
|
||||
}
|
||||
|
||||
if self.chunk_size < self.chunk_size_min {
|
||||
return false;
|
||||
}
|
||||
|
||||
//(self.h & 0x1ffff) <= 2 //THIS IS SLOW!!!
|
||||
|
||||
//(self.h & self.break_test_mask) <= 2 // Bad on 0 streams
|
||||
|
||||
(self.h & self.break_test_mask) >= self.break_test_minimum
|
||||
}
|
||||
|
||||
// This is the original implementation from casync
|
||||
/*
|
||||
#[inline(always)]
|
||||
fn shall_break_orig(&self) -> bool {
|
||||
|
||||
if self.chunk_size >= self.chunk_size_max { return true; }
|
||||
|
||||
if self.chunk_size < self.chunk_size_min { return false; }
|
||||
|
||||
(self.h % self.discriminator) == (self.discriminator - 1)
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
impl Chunker for ChunkerImpl {
|
||||
/// Scans the specified data for a chunk border. Returns 0 if none
|
||||
/// was found (and the function should be called with more data
|
||||
/// later on), or another value indicating the position of a
|
||||
/// border.
|
||||
pub fn scan(&mut self, data: &[u8]) -> usize {
|
||||
fn scan(&mut self, data: &[u8], _ctx: &Context) -> usize {
|
||||
let window_len = self.window.len();
|
||||
let data_len = data.len();
|
||||
|
||||
@ -167,36 +226,89 @@ impl Chunker {
|
||||
0
|
||||
}
|
||||
|
||||
// fast implementation avoiding modulo
|
||||
// #[inline(always)]
|
||||
fn shall_break(&self) -> bool {
|
||||
if self.chunk_size >= self.chunk_size_max {
|
||||
return true;
|
||||
fn reset(&mut self) {
|
||||
self.h = 0;
|
||||
self.chunk_size = 0;
|
||||
self.window_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if self.chunk_size < self.chunk_size_min {
|
||||
return false;
|
||||
impl PayloadChunker {
|
||||
/// Create a new PayloadChunker instance, which produces and average
|
||||
/// chunk size of `chunk_size_avg` (need to be a power of two), if no
|
||||
/// suggested boundaries are provided.
|
||||
/// Use suggested boundaries instead, whenever the chunk size is within
|
||||
/// the min - max range.
|
||||
pub fn new(chunk_size_avg: usize, suggested_boundaries: Receiver<u64>) -> Self {
|
||||
Self {
|
||||
chunker: ChunkerImpl::new(chunk_size_avg),
|
||||
current_suggested: None,
|
||||
suggested_boundaries,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//(self.h & 0x1ffff) <= 2 //THIS IS SLOW!!!
|
||||
impl Chunker for PayloadChunker {
|
||||
fn scan(&mut self, data: &[u8], ctx: &Context) -> usize {
|
||||
assert!(ctx.total >= data.len() as u64);
|
||||
let pos = ctx.total - data.len() as u64;
|
||||
|
||||
//(self.h & self.break_test_mask) <= 2 // Bad on 0 streams
|
||||
|
||||
(self.h & self.break_test_mask) >= self.break_test_minimum
|
||||
loop {
|
||||
if let Some(boundary) = self.current_suggested {
|
||||
if boundary < ctx.base + pos {
|
||||
log::debug!("Boundary {boundary} in past");
|
||||
// ignore passed boundaries
|
||||
self.current_suggested = None;
|
||||
continue;
|
||||
}
|
||||
|
||||
// This is the original implementation from casync
|
||||
/*
|
||||
#[inline(always)]
|
||||
fn shall_break_orig(&self) -> bool {
|
||||
|
||||
if self.chunk_size >= self.chunk_size_max { return true; }
|
||||
|
||||
if self.chunk_size < self.chunk_size_min { return false; }
|
||||
|
||||
(self.h % self.discriminator) == (self.discriminator - 1)
|
||||
if boundary > ctx.base + ctx.total {
|
||||
log::debug!("Boundary {boundary} in future");
|
||||
// boundary in future, cannot decide yet
|
||||
return self.chunker.scan(data, ctx);
|
||||
}
|
||||
|
||||
let chunk_size = (boundary - ctx.base) as usize;
|
||||
if chunk_size < self.chunker.chunk_size_min {
|
||||
log::debug!("Chunk size {chunk_size} below minimum chunk size");
|
||||
// chunk to small, ignore boundary
|
||||
self.current_suggested = None;
|
||||
continue;
|
||||
}
|
||||
|
||||
if chunk_size <= self.chunker.chunk_size_max {
|
||||
self.current_suggested = None;
|
||||
// calculate boundary relative to start of given data buffer
|
||||
let len = chunk_size - pos as usize;
|
||||
if len == 0 {
|
||||
// passed this one, previous scan did not know about boundary just yet
|
||||
return self.chunker.scan(data, ctx);
|
||||
}
|
||||
self.chunker.reset();
|
||||
log::debug!(
|
||||
"Chunk at suggested boundary: {boundary}, chunk size: {chunk_size}"
|
||||
);
|
||||
return len;
|
||||
}
|
||||
|
||||
log::debug!("Chunk {chunk_size} to big, regular scan");
|
||||
// chunk to big, cannot decide yet
|
||||
// scan for hash based chunk boundary instead
|
||||
return self.chunker.scan(data, ctx);
|
||||
}
|
||||
|
||||
if let Ok(boundary) = self.suggested_boundaries.try_recv() {
|
||||
self.current_suggested = Some(boundary);
|
||||
} else {
|
||||
log::debug!("No suggested boundary, regular scan");
|
||||
return self.chunker.scan(data, ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.chunker.reset();
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -209,17 +321,18 @@ fn test_chunker1() {
|
||||
buffer.push(byte);
|
||||
}
|
||||
}
|
||||
let mut chunker = Chunker::new(64 * 1024);
|
||||
let mut chunker = ChunkerImpl::new(64 * 1024);
|
||||
|
||||
let mut pos = 0;
|
||||
let mut last = 0;
|
||||
|
||||
let mut chunks1: Vec<(usize, usize)> = vec![];
|
||||
let mut chunks2: Vec<(usize, usize)> = vec![];
|
||||
let ctx = Context::default();
|
||||
|
||||
// test1: feed single bytes
|
||||
while pos < buffer.len() {
|
||||
let k = chunker.scan(&buffer[pos..pos + 1]);
|
||||
let k = chunker.scan(&buffer[pos..pos + 1], &ctx);
|
||||
pos += 1;
|
||||
if k != 0 {
|
||||
let prev = last;
|
||||
@ -229,13 +342,13 @@ fn test_chunker1() {
|
||||
}
|
||||
chunks1.push((last, buffer.len() - last));
|
||||
|
||||
let mut chunker = Chunker::new(64 * 1024);
|
||||
let mut chunker = ChunkerImpl::new(64 * 1024);
|
||||
|
||||
let mut pos = 0;
|
||||
|
||||
// test2: feed with whole buffer
|
||||
while pos < buffer.len() {
|
||||
let k = chunker.scan(&buffer[pos..]);
|
||||
let k = chunker.scan(&buffer[pos..], &ctx);
|
||||
if k != 0 {
|
||||
chunks2.push((pos, k));
|
||||
pos += k;
|
||||
@ -269,3 +382,97 @@ fn test_chunker1() {
|
||||
panic!("got different chunks");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_suggested_boundary() {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
for i in 0..(256 * 1024) {
|
||||
for j in 0..4 {
|
||||
let byte = ((i >> (j << 3)) & 0xff) as u8;
|
||||
buffer.push(byte);
|
||||
}
|
||||
}
|
||||
let (tx, rx) = std::sync::mpsc::channel();
|
||||
let mut chunker = PayloadChunker::new(64 * 1024, rx);
|
||||
|
||||
// Suggest chunk boundary within regular chunk
|
||||
tx.send(32 * 1024).unwrap();
|
||||
// Suggest chunk boundary within regular chunk, resulting chunk being 0
|
||||
tx.send(32 * 1024).unwrap();
|
||||
// Suggest chunk boundary in the past, must be ignored
|
||||
tx.send(0).unwrap();
|
||||
// Suggest chunk boundary aligned with regular boundary
|
||||
tx.send(405521).unwrap();
|
||||
|
||||
let mut pos = 0;
|
||||
let mut last = 0;
|
||||
|
||||
let mut chunks1: Vec<(usize, usize)> = vec![];
|
||||
let mut chunks2: Vec<(usize, usize)> = vec![];
|
||||
let mut ctx = Context::default();
|
||||
|
||||
// test1: feed single bytes with suggeset boundary
|
||||
while pos < buffer.len() {
|
||||
ctx.total += 1;
|
||||
let k = chunker.scan(&buffer[pos..pos + 1], &ctx);
|
||||
pos += 1;
|
||||
if k != 0 {
|
||||
let prev = last;
|
||||
last = pos;
|
||||
ctx.base += pos as u64;
|
||||
ctx.total = 0;
|
||||
chunks1.push((prev, pos - prev));
|
||||
}
|
||||
}
|
||||
chunks1.push((last, buffer.len() - last));
|
||||
|
||||
let mut pos = 0;
|
||||
let mut ctx = Context::default();
|
||||
ctx.total = buffer.len() as u64;
|
||||
chunker.reset();
|
||||
// Suggest chunk boundary within regular chunk
|
||||
tx.send(32 * 1024).unwrap();
|
||||
// Suggest chunk boundary within regular chunk,
|
||||
// resulting chunk being to small and therefore ignored
|
||||
tx.send(32 * 1024).unwrap();
|
||||
// Suggest chunk boundary in the past, must be ignored
|
||||
tx.send(0).unwrap();
|
||||
// Suggest chunk boundary aligned with regular boundary
|
||||
tx.send(405521).unwrap();
|
||||
|
||||
while pos < buffer.len() {
|
||||
let k = chunker.scan(&buffer[pos..], &ctx);
|
||||
if k != 0 {
|
||||
chunks2.push((pos, k));
|
||||
pos += k;
|
||||
ctx.base += pos as u64;
|
||||
ctx.total = (buffer.len() - pos) as u64;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
chunks2.push((pos, buffer.len() - pos));
|
||||
|
||||
if chunks1 != chunks2 {
|
||||
let mut size1 = 0;
|
||||
for (_offset, len) in &chunks1 {
|
||||
size1 += len;
|
||||
}
|
||||
println!("Chunks1: {size1}\n{chunks1:?}\n");
|
||||
|
||||
let mut size2 = 0;
|
||||
for (_offset, len) in &chunks2 {
|
||||
size2 += len;
|
||||
}
|
||||
println!("Chunks2: {size2}\n{chunks2:?}\n");
|
||||
|
||||
panic!("got different chunks");
|
||||
}
|
||||
|
||||
let expected_sizes = [32768, 110609, 229376, 32768, 262144, 262144, 118767];
|
||||
for ((_, chunk_size), expected) in chunks1.iter().zip(expected_sizes.iter()) {
|
||||
assert_eq!(chunk_size, expected);
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user