Merge 3.0.2-1

This commit is contained in:
Andrew A. Vasilyev 2023-09-05 17:11:09 +03:00
commit 1713e83afe
78 changed files with 1072 additions and 839 deletions

20
.gitignore vendored
View File

@ -1,16 +1,22 @@
local.mak
/target
**/*.rs.bk
*~
*.5
*.7
*.backup
*.backup[0-9]
*.backup[0-9][0-9]
*.old
*.old[0-9]
*.old[0-9][0-9]
*.5
*.7
__pycache__/
/etc/proxmox-backup.service
*~
/*.build
/*.buildinfo
/*.changes
/*.deb
/*.dsc
/*.tar*
/etc/proxmox-backup-proxy.service
/etc/proxmox-backup.service
/target
__pycache__/
build/
local.mak

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "2.4.3"
version = "3.0.2"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -57,25 +57,26 @@ path = "src/lib.rs"
proxmox-async = "0.4"
proxmox-auth-api = "0.3"
proxmox-borrow = "1"
proxmox-compression = "0.2.0"
proxmox-compression = "0.2"
proxmox-fuse = "0.1.3"
proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "websocket" ] } # see below
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
proxmox-ldap = "0.2"
proxmox-metrics = "0.3"
proxmox-rest-server = { version = "0.4.0", features = [ "templates" ] }
proxmox-rest-server = { version = "0.4.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "2.0", default_features = false }
proxmox-router = { version = "2.0.0", default_features = false }
# everything but pbs-config and pbs-client ues "api-macro"
proxmox-schema = "2.0"
proxmox-schema = "2.0.0"
proxmox-section-config = "2"
proxmox-serde = "0.1.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.4.0", features = [ "api-types" ] }
proxmox-subscription = { version = "0.4", features = [ "api-types" ] }
proxmox-sys = "0.5.0"
proxmox-tfa = { version = "4", features = [ "api", "api-types" ] }
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2"
proxmox-uuid = "1"
@ -124,8 +125,6 @@ nom = "7"
num-traits = "0.2"
once_cell = "1.3.1"
openssl = "0.10.40"
pam = "0.7"
pam-sys = "0.5"
percent-encoding = "2.1"
pin-project-lite = "0.2"
regex = "1.5.5"
@ -148,7 +147,7 @@ udev = "0.4"
url = "2.1"
walkdir = "2"
xdg = "2.2"
zstd = { version = ">= 0.6, < 0.13", features = [ "bindgen" ] }
zstd = { version = "0.12", features = [ "bindgen" ] }
[dependencies]
anyhow.workspace = true
@ -174,8 +173,6 @@ nom.workspace = true
num-traits.workspace = true
once_cell.workspace = true
openssl.workspace = true
pam-sys.workspace = true
pam.workspace = true
percent-encoding.workspace = true
regex.workspace = true
rustyline.workspace = true
@ -202,6 +199,7 @@ proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
proxmox-compression.workspace = true
proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async", "rate-limited-stream" ] } # pbs-client doesn't use these
proxmox-human-byte.workspace = true
proxmox-io.workspace = true
proxmox-lang.workspace = true
proxmox-ldap.workspace = true

View File

@ -18,7 +18,7 @@ USR_BIN := \
# Binaries usable by admins
USR_SBIN := \
proxmox-backup-manager \
proxmox-backup-debug \
proxmox-backup-debug
# Binaries for services:
SERVICE_BIN := \
@ -120,10 +120,16 @@ deb: build
lintian $(DEBS) $(DOC_DEB)
.PHONY: dsc
dsc: $(DSC)
dsc:
rm -rf $(DSC) build/
$(MAKE) $(DSC)
lintian $(DSC)
$(DSC): build
cd build; dpkg-buildpackage -S -us -uc -d
lintian $(DSC)
sbuild: $(DSC)
sbuild $<
.PHONY: clean distclean deb clean
distclean: clean
@ -195,6 +201,7 @@ install: $(COMPILED_BINS)
$(foreach i,$(USR_SBIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(SBINDIR)/ ; \
install -m644 zsh-completions/_$(i) $(DESTDIR)$(ZSH_COMPL_DEST)/ ;)
install -m755 $(COMPILEDIR)/pbs2to3 $(DESTDIR)$(SBINDIR)/
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup
install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
$(foreach i,$(RESTORE_BIN), \

View File

@ -47,8 +47,8 @@ Build
on Debian 11 Bullseye
Setup:
1. # echo 'deb http://download.proxmox.com/debian/devel/ bullseye main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list
2. # sudo wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
1. # echo 'deb http://download.proxmox.com/debian/devel/ bookworm main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list
2. # sudo wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
3. # sudo apt update
4. # sudo apt install devscripts debcargo clang
5. # git clone git://git.proxmox.com/git/proxmox-backup.git

92
debian/changelog vendored
View File

@ -1,16 +1,96 @@
rust-proxmox-backup (2.4.3-1) bullseye; urgency=medium
rust-proxmox-backup (3.0.2-1) bookworm; urgency=medium
* add pbs2to3 major-upgrade checker binary
* docs: update FAQ release support table, add PBS 2.x EOL date
* cargo: bump proxmox-apt to 0.9.4 to improve repository API during upgrade
* encrypted backup: fix log message
-- Proxmox Support Team <support@proxmox.com> Wed, 28 Jun 2023 18:55:03 +0200
* reset tfa failure count when unlocking
rust-proxmox-backup (2.4.2-2) bullseye; urgency=medium
* sync over pbs2to3 upgrade check-list script to master
* pbs-client: pxar: preserve error context
* pbs-client: pxar: refactor body of `extract_archive` to `ExtractorIter`
* pbs-client: pxar: add PxarExtractContext
* proxmox-backup-client: restore: add 'ignore-extract-device-errors' flag
* docs: replace concrete device paths with pseudo paths
* pbs-client: backup-writer: use log::warn instead of eprintln!
* fix #4591: pbs-client: backup_writer: improve error reporting
* backup: improve skipped download error message
* handle pve-kernel -> proxmox-kernel rename
-- Proxmox Support Team <support@proxmox.com> Tue, 01 Aug 2023 11:53:07 +0200
rust-proxmox-backup (3.0.1-1) bookworm; urgency=medium
* build with newer proxmox-rest-server to remove the full static file path
from error messages, which might trigger some simple security scanners
confusing the API server with a classic (file serving) HTTP server.
* build with newer proxmox-apt to fix #4653: (In)Release file: improve
handling of special suites
* ui: user view: fix refresh for TOTP-locked column
* api: node services: switch to systemd-journald and chrony instead of
syslog and systemd-timesyncd, respectively
* docs: consistent spelling of "USB flash drive"
* docs: fix spelling of "command line" (noun) & "command-line" (adjective)
-- Proxmox Support Team <support@proxmox.com> Tue, 27 Jun 2023 16:04:19 +0200
rust-proxmox-backup (3.0.0-2) bookworm; urgency=medium
* etc/pbs-enterprise.list: change to bookworm
-- Proxmox Support Team <support@proxmox.com> Mon, 26 Jun 2023 22:13:43 +0200
rust-proxmox-backup (3.0.0-1) bookworm; urgency=medium
* ui: tape: fix restore datastore mapping parameter construction
-- Proxmox Support Team <support@proxmox.com> Tue, 06 Jun 2023 13:16:41 +0200
* fix #4638: proxmox-backup-client: status: guard against div by zero
* file-restore: add zfs. prefix to arc_min/max settings so that they get
actually applied
* file-restore: set zfs_arc_min to current lowest valid minimum of 32M
* fix #4734: manager: add user tfa {list, delete} commands
* api: fix schema return annotation of tfa_update_auth
* access: ldap check connection on creation and change
* api, manager: add user tfa unlock endpoint and command
* enable TFA lockout, for the relatively low-entropy TOTP type after 8
consecutive tries, for all other types after 1000 consecutive tries, as
they have much higher entropy
* ui: add TFA lock status and unlock button
* docs: user-management: add section for TFA lockouts
* docs: update package repos and secure APT release key checksums for
Debian 12 Bookworm based release
-- Proxmox Support Team <support@proxmox.com> Mon, 26 Jun 2023 19:59:56 +0200
rust-proxmox-backup (2.99.0-1) bookworm; urgency=medium
* initial re-build for Debian 12 Bookworm based releases
-- Proxmox Support Team <support@proxmox.com> Sun, 21 May 2023 13:51:05 +0200
rust-proxmox-backup (2.4.2-1) bullseye; urgency=medium

1
debian/compat vendored
View File

@ -1 +0,0 @@
12

72
debian/control vendored
View File

@ -44,20 +44,18 @@ Build-Depends: bash-completion,
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pam-0.7+default-dev,
librust-pam-sys-0.5+default-dev,
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-rs-0.4+default-dev,
librust-proxmox-apt-0.10+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.2-~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.3+api-dev,
librust-proxmox-auth-api-0.3+api-types-dev,
librust-proxmox-auth-api-0.3+default-dev,
librust-proxmox-auth-api-0.3+pam-authenticator-dev,
librust-proxmox-borrow-1+default-dev,
librust-proxmox-compression-0.2+default-dev (>= 0.2~),
librust-proxmox-compression-0.2+default-dev,
librust-proxmox-fuse-0.1+default-dev (>= 0.1.3-~~),
librust-proxmox-http-0.9+client-dev,
librust-proxmox-http-0.9+client-trait-dev,
@ -67,35 +65,36 @@ Build-Depends: bash-completion,
librust-proxmox-http-0.9+rate-limited-stream-dev,
librust-proxmox-http-0.9+rate-limiter-dev,
librust-proxmox-http-0.9+websocket-dev,
librust-proxmox-human-byte-0.1+default-dev,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
librust-proxmox-ldap-0.1+default-dev,
librust-proxmox-ldap-0.2+default-dev,
librust-proxmox-metrics-0.3+default-dev,
librust-proxmox-openid-0.10+default-dev (>= 0.10.0-~~),
librust-proxmox-rest-server-0.4+default-dev,
librust-proxmox-rest-server-0.4+rate-limited-stream-dev,
librust-proxmox-rest-server-0.4+templates-dev,
librust-proxmox-router-1+cli-dev (>= 1.3.1-~~),
librust-proxmox-router-1+default-dev (>= 1.3.1-~~),
librust-proxmox-router-1+server-dev (>= 1.3.1-~~),
librust-proxmox-schema-1+api-macro-dev (>= 1.3.6-~~),
librust-proxmox-schema-1+default-dev (>= 1.3.6-~~),
librust-proxmox-section-config-1+default-dev,
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.4+default-dev (>= 0.4.1-~~),
librust-proxmox-rest-server-0.4+rate-limited-stream-dev (>= 0.4.1-~~),
librust-proxmox-rest-server-0.4+templates-dev (>= 0.4.1-~~),
librust-proxmox-router-2+cli-dev,
librust-proxmox-router-2+default-dev,
librust-proxmox-router-2+server-dev,
librust-proxmox-schema-2+api-macro-dev,
librust-proxmox-schema-2+default-dev,
librust-proxmox-section-config-2+default-dev,
librust-proxmox-serde-0.1+default-dev (>= 0.1.1-~~),
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
librust-proxmox-shared-memory-0.3+default-dev (>= 0.3.0-~~),
librust-proxmox-shared-memory-0.3+default-dev,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.4+api-types-dev,
librust-proxmox-subscription-0.4+default-dev,
librust-proxmox-sys-0.5+acl-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.0-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.0-~~),
librust-proxmox-tfa-4+api-dev,
librust-proxmox-tfa-4+api-types-dev,
librust-proxmox-tfa-4+default-dev,
librust-proxmox-sys-0.5+acl-dev,
librust-proxmox-sys-0.5+crypt-dev,
librust-proxmox-sys-0.5+default-dev,
librust-proxmox-sys-0.5+logrotate-dev,
librust-proxmox-sys-0.5+timer-dev,
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
librust-proxmox-time-1+default-dev (>= 1.1.2-~~),
librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev,
@ -166,7 +165,7 @@ Depends: fonts-font-awesome,
libjs-extjs (>= 7~),
libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins,
libsgutils2-2,
libsgutils2-1.46-2,
libzstd1 (>= 1.3.8),
lvm2,
openssh-server,
@ -180,19 +179,17 @@ Depends: fonts-font-awesome,
smartmontools,
${misc:Depends},
${shlibs:Depends},
Recommends: zfsutils-linux,
ifupdown2,
proxmox-offline-mirror-helper,
Recommends: ifupdown2,
proxmox-mail-forward,
proxmox-offline-mirror-helper,
zfsutils-linux,
Description: Proxmox Backup Server daemon with tools and GUI
This package contains the Proxmox Backup Server daemons and related
tools. This includes a web-based graphical user interface.
Package: proxmox-backup-client
Architecture: any
Depends: qrencode,
${misc:Depends},
${shlibs:Depends},
Depends: qrencode, ${misc:Depends}, ${shlibs:Depends},
Description: Proxmox Backup Client tools
This package contains the Proxmox Backup client, which provides a
simple command line tool to create and restore backups.
@ -200,21 +197,16 @@ Description: Proxmox Backup Client tools
Package: proxmox-backup-docs
Build-Profiles: <!nodoc>
Section: doc
Depends: fonts-font-awesome,
libjs-extjs,
libjs-mathjax,
${misc:Depends},
Depends: fonts-font-awesome, libjs-extjs, libjs-mathjax, ${misc:Depends},
Architecture: all
Description: Proxmox Backup Documentation
This package contains the Proxmox Backup Documentation files.
Package: proxmox-backup-file-restore
Architecture: any
Depends: ${misc:Depends},
${shlibs:Depends},
Recommends: pve-qemu-kvm (>= 5.0.0-9),
proxmox-backup-restore-image,
Breaks: proxmox-backup-restore-image (<< 0.3.1)
Depends: ${misc:Depends}, ${shlibs:Depends},
Recommends: proxmox-backup-restore-image, pve-qemu-kvm (>= 5.0.0-9),
Breaks: proxmox-backup-restore-image (<< 0.3.1),
Description: Proxmox Backup single file restore tools for pxar and block device backups
This package contains the Proxmox Backup single file restore client for
restoring individual files and folders from both host/container and VM/block

View File

@ -1,6 +1,6 @@
/usr/share/doc/proxmox-backup/proxmox-backup.pdf /usr/share/doc/proxmox-backup/html/proxmox-backup.pdf
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
/usr/share/fonts-font-awesome/ /usr/share/doc/proxmox-backup/html/lto-barcode/font-awesome
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/api-viewer/extjs
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/lto-barcode/extjs
/usr/share/javascript/extjs /usr/share/doc/proxmox-backup/html/prune-simulator/extjs
/usr/share/javascript/mathjax /usr/share/doc/proxmox-backup/html/_static/mathjax

View File

@ -1,4 +1,4 @@
usr/bin/proxmox-file-restore
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
usr/share/man/man1/proxmox-file-restore.1
usr/share/zsh/vendor-completions/_proxmox-file-restore
usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon

View File

@ -1,43 +1,43 @@
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
etc/pbs-enterprise.list /etc/apt/sources.list.d/
etc/proxmox-backup-banner.service /lib/systemd/system/
etc/proxmox-backup-daily-update.service /lib/systemd/system/
etc/proxmox-backup-daily-update.timer /lib/systemd/system/
etc/pbs-enterprise.list /etc/apt/sources.list.d/
etc/proxmox-backup-proxy.service /lib/systemd/system/
etc/proxmox-backup.service /lib/systemd/system/
usr/bin/pmt
usr/bin/pmtx
usr/bin/proxmox-tape
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-banner
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy
usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-daily-update
usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd
usr/sbin/pbs2to3
usr/sbin/proxmox-backup-debug
usr/sbin/proxmox-backup-manager
usr/sbin/pbs2to3
usr/bin/pmtx
usr/bin/pmt
usr/bin/proxmox-tape
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/css/ext6-pbs.css
usr/share/javascript/proxmox-backup/images
usr/share/javascript/proxmox-backup/index.hbs
usr/share/javascript/proxmox-backup/js/proxmox-backup-gui.js
usr/share/man/man1/pbs2to3.1
usr/share/man/man1/pmt.1
usr/share/man/man1/pmtx.1
usr/share/man/man1/proxmox-backup-debug.1
usr/share/man/man1/proxmox-backup-manager.1
usr/share/man/man1/proxmox-backup-proxy.1
usr/share/man/man1/proxmox-tape.1
usr/share/man/man1/pmtx.1
usr/share/man/man1/pmt.1
usr/share/man/man1/pbs2to3.1
usr/share/man/man5/acl.cfg.5
usr/share/man/man5/datastore.cfg.5
usr/share/man/man5/domains.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/remote.cfg.5
usr/share/man/man5/sync.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/man/man5/media-pool.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/tape-job.cfg.5
usr/share/man/man5/tape.cfg.5
usr/share/man/man5/user.cfg.5
usr/share/man/man5/verification.cfg.5
usr/share/zsh/vendor-completions/_pmt
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_proxmox-backup-debug
usr/share/zsh/vendor-completions/_proxmox-backup-manager
usr/share/zsh/vendor-completions/_proxmox-tape
usr/share/zsh/vendor-completions/_pmtx
usr/share/zsh/vendor-completions/_pmt

2
debian/rules vendored
View File

@ -46,7 +46,7 @@ override_dh_auto_install:
override_dh_installsystemd:
dh_installsystemd -pproxmox-backup-server proxmox-backup-daily-update.timer
# note: we start/try-reload-restart services manually in postinst
dh_installsystemd --no-start --no-restart-after-upgrade
dh_installsystemd --no-start --no-restart-after-upgrade --no-stop-on-upgrade
override_dh_fixperms:
dh_fixperms --exclude sg-tape-cmd

View File

@ -1,7 +1,7 @@
Backup Client Usage
===================
The command line client for Proxmox Backup Server is called
The command-line client for Proxmox Backup Server is called
:command:`proxmox-backup-client`.
.. _client_repository:
@ -23,7 +23,7 @@ port (for example, with NAT and port forwarding configurations).
Note that if the server uses an IPv6 address, you have to write it with square
brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command line option, or
You can pass the repository with the ``--repository`` command-line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
Below are some examples of valid repositories and their corresponding real
@ -391,7 +391,7 @@ To set up a master key:
It is recommended that you keep your master key safe, but easily accessible, in
order for quick disaster recovery. For this reason, the best place to store it
is in your password manager, where it is immediately recoverable. As a backup to
this, you should also save the key to a USB drive and store that in a secure
this, you should also save the key to a USB flash drive and store that in a secure
place. This way, it is detached from any system, but is still easy to recover
from, in case of emergency. Finally, in preparation for the worst case scenario,
you should also consider keeping a paper copy of your master key locked away in
@ -472,7 +472,7 @@ to use the interactive recovery shell.
bin boot dev etc home lib lib32
...
The interactive recovery shell is a minimal command line interface that
The interactive recovery shell is a minimal command-line interface that
utilizes the metadata stored in the catalog to quickly list, navigate and
search for files in a file archive.
To restore files, you can select them individually or match them with a glob

View File

@ -91,8 +91,8 @@ contact point for renewal-due or similar notifications from the ACME
endpoint.
You can register or deactivate ACME accounts over the web interface
``Certificates -> ACME Accounts`` or using the ``proxmox-backup-manager`` command
line tool.
``Certificates -> ACME Accounts`` or using the ``proxmox-backup-manager``
command-line tool.
::
@ -278,7 +278,7 @@ expired or if it will expire in the next 30 days.
.. _manually_change_certificate_over_command_line:
Manually Change Certificate over Command-Line
Manually Change Certificate over the Command Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to get rid of certificate verification warnings, you have to
@ -330,7 +330,7 @@ Test your new certificate, using your browser.
To transfer files to and from your `Proxmox Backup`_, you can use
secure copy: If your desktop runs Linux, you can use the ``scp``
command line tool. If your desktop PC runs windows, please use an scp
command-line tool. If your desktop PC runs windows, please use an scp
client like WinSCP (see https://winscp.net/).
.. [1]

View File

@ -1,4 +1,4 @@
Command Line Tools
Command-line Tools
------------------
``proxmox-backup-client``

View File

@ -1,7 +1,7 @@
Command Syntax
==============
.. NOTE:: Logging verbosity for the command line tools can be controlled with the
.. NOTE:: Logging verbosity for the command-line tools can be controlled with the
``PBS_LOG`` (for ``pxar``: ``PXAR_LOG``) environment variable. Possible values are `off`,
`error`, `warn`, `info`, `debug` and `trace` with `info` being the default.

View File

@ -27,7 +27,9 @@ How long will my Proxmox Backup Server version be supported?
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+======================+===============+============+====================+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | tba | tba |
|Proxmox Backup 3.x | Debian 12 (Bookworm) | 2023-06 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | 2024-07 | 2024-07 |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 |
+-----------------------+----------------------+---------------+------------+--------------------+

View File

@ -30,7 +30,7 @@ backup data and provides an API to create and manage datastores. With the
API, it's also possible to manage disks and other server-side resources.
The backup client uses this API to access the backed up data. You can use the
``proxmox-backup-client`` command line tool to create and restore file backups.
``proxmox-backup-client`` command-line tool to create and restore file backups.
For QEMU_ and LXC_ within `Proxmox Virtual Environment`_, we deliver an
integrated client.

View File

@ -8,10 +8,10 @@ Network Management
:align: right
:alt: System and Network Configuration Overview
Proxmox Backup Server provides both a web interface and a command line tool for
Proxmox Backup Server provides both a web interface and a command-line tool for
network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu
tree item. The command line tool is accessed via the ``network`` subcommand.
tree item. The command-line tool is accessed via the ``network`` subcommand.
These interfaces allow you to carry out some basic network management tasks,
such as adding, configuring, and removing network interfaces.

View File

@ -6,7 +6,7 @@ Debian Package Repositories
All Debian based systems use APT_ as a package management tool. The lists of
repositories are defined in ``/etc/apt/sources.list`` and the ``.list`` files found
in the ``/etc/apt/sources.d/`` directory. Updates can be installed directly
with the ``apt`` command line tool, or via the GUI.
with the ``apt`` command-line tool, or via the GUI.
APT_ ``sources.list`` files list one package repository per line, with the most
preferred source listed first. Empty lines are ignored and a ``#`` character
@ -17,11 +17,11 @@ update``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian bullseye main contrib
deb http://ftp.debian.org/debian bullseye-updates main contrib
deb http://deb.debian.org/debian bookworm main contrib
deb http://deb.debian.org/debian bookworm-updates main contrib
# security updates
deb http://security.debian.org/debian-security bullseye-security main contrib
deb http://security.debian.org/debian-security bookworm-security main contrib
In addition, you need a package repository from Proxmox to get Proxmox Backup
@ -48,21 +48,21 @@ key with the following commands:
.. code-block:: console
# wget https://enterprise.proxmox.com/debian/proxmox-release-bullseye.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
# wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
Verify the SHA512 checksum afterwards with the expected output below:
.. code-block:: console
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
7fb03ec8a1675723d2853b84aa4fdb49a46a3bb72b9951361488bfd19b29aab0a789a4f8c7406e71a69aabbc727c936d3549731c4659ffa1a08f44db8fdcebfa /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
# sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
7da6fe34168adc6e479327ba517796d4702fa2f8b4f0a9833f5ea6e6b48f6507a6da403a274fe201595edc86a84463d50383d07f64bdde2e3658108db7d6dc87 /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
and the md5sum, with the expected output below:
.. code-block:: console
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
bcc35c7173e0845c0d6ad6470b70f50e /etc/apt/trusted.gpg.d/proxmox-release-bullseye.gpg
# md5sum /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
41558dc019ef90bd0f6067644a51cf5b /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg
.. _sysadmin_package_repos_enterprise:
@ -77,7 +77,7 @@ enabled by default:
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list.d/pbs-enterprise.list``
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs bookworm pbs-enterprise
To never miss important security fixes, the superuser (``root@pam`` user) is
@ -107,15 +107,15 @@ We recommend to configure this repository in ``/etc/apt/sources.list``.
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://ftp.debian.org/debian bullseye main contrib
deb http://ftp.debian.org/debian bullseye-updates main contrib
deb http://deb.debian.org/debian bookworm main contrib
deb http://deb.debian.org/debian bookworm-updates main contrib
# Proxmox Backup Server pbs-no-subscription repository provided by proxmox.com,
# NOT recommended for production use
deb http://download.proxmox.com/debian/pbs bullseye pbs-no-subscription
deb http://download.proxmox.com/debian/pbs bookworm pbs-no-subscription
# security updates
deb http://security.debian.org/debian-security bullseye-security main contrib
deb http://security.debian.org/debian-security bookworm-security main contrib
`Proxmox Backup`_ Test Repository
@ -133,7 +133,7 @@ You can access this repository by adding the following line to
.. code-block:: sources.list
:caption: sources.list entry for ``pbstest``
deb http://download.proxmox.com/debian/pbs bullseye pbstest
deb http://download.proxmox.com/debian/pbs bookworm pbstest
.. _package_repositories_client_only:
@ -158,6 +158,20 @@ In order to configure this repository you need to first :ref:`setup the Proxmox
release key <package_repos_secure_apt>`. After that, add the repository URL to
the APT sources lists.
**Repositories for Debian 12 (Bookworm) based releases**
This repository is tested with:
- Debian Bookworm
Edit the file ``/etc/apt/sources.list.d/pbs-client.list`` and add the following
snippet
.. code-block:: sources.list
:caption: File: ``/etc/apt/sources.list``
deb http://download.proxmox.com/debian/pbs-client bookworm main
**Repositories for Debian 11 (Bullseye) based releases**
This repository is tested with:

View File

@ -1,4 +1,4 @@
Command line tool for restoring files and directories from Proxmox Backup
Command-line tool for restoring files and directories from Proxmox Backup
archives. In contrast to proxmox-backup-client, this supports both
container/host and VM backups.

View File

@ -20,7 +20,7 @@ Server as of `Proxmox VE 6.3
A Proxmox Backup Server can be added under ``Datacenter -> Storage``.
Using the Proxmox VE Command-Line
Using the Proxmox VE Command Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You need to define a new storage with type 'pbs' on your `Proxmox VE`_

View File

@ -1,4 +1,4 @@
pxar Command Line Tool
pxar Command-line Tool
======================
.. include:: pxar/description.rst

View File

@ -1,4 +1,4 @@
``pxar`` is a command line utility for creating and manipulating archives in the
``pxar`` is a command-line utility for creating and manipulating archives in the
:ref:`pxar-format`.
It is inspired by `casync file archive format
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,

View File

@ -48,12 +48,12 @@ You can create an ``ext4`` or ``xfs`` filesystem on a disk using ``fs
create``, or by navigating to **Administration -> Storage/Disks -> Directory**
in the web interface and creating one from there. The following command creates
an ``ext4`` filesystem and passes the ``--add-datastore`` parameter, in order to
automatically create a datastore on the disk (in this case ``sdd``). This will
automatically create a datastore on the disk. This will
create a datastore at the location ``/mnt/datastore/store1``:
.. code-block:: console
# proxmox-backup-manager disk fs create store1 --disk sdd --filesystem ext4 --add-datastore true
# proxmox-backup-manager disk fs create store1 --disk sdX --filesystem ext4 --add-datastore true
.. image:: images/screenshots/pbs-gui-disks-zfs-create.png
:align: right
@ -61,12 +61,12 @@ create a datastore at the location ``/mnt/datastore/store1``:
You can also create a ``zpool`` with various raid levels from **Administration
-> Storage/Disks -> ZFS** in the web interface, or by using ``zpool create``. The command
below creates a mirrored ``zpool`` using two disks (``sdb`` & ``sdc``) and
below creates a mirrored ``zpool`` using two disks and
mounts it under ``/mnt/datastore/zpool1``:
.. code-block:: console
# proxmox-backup-manager disk zpool create zpool1 --devices sdb,sdc --raidlevel mirror
# proxmox-backup-manager disk zpool create zpool1 --devices sdX,sdY --raidlevel mirror
.. note:: You can also pass the ``--add-datastore`` parameter here, to automatically
create a datastore from the disk.

View File

@ -84,7 +84,7 @@ Setting up a New Partition for use as Synced ESP
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To format and initialize a partition as synced ESP, for example, after replacing a
failed vdev in an rpool, ``proxmox-boot-tool`` from ``pve-kernel-helper`` can be used.
failed vdev in an rpool, ``proxmox-boot-tool`` from ``proxmox-kernel-helper`` can be used.
WARNING: the ``format`` command will format the ``<partition>``. Make sure to pass
in the right device/partition!

View File

@ -180,7 +180,7 @@ Configuration
-------------
Please note that you can configure anything using the graphical user
interface or the command line interface. Both methods result in the
interface or the command-line interface. Both methods result in the
same configuration.
.. _tape_changer_config:
@ -712,7 +712,7 @@ backup and *Media Pool* is the pool to back up to.
Administration
--------------
Many sub-commands of the ``proxmox-tape`` command line tools take a
Many sub-commands of the ``proxmox-tape`` command-line tools take a
parameter called ``--drive``, which specifies the tape drive you want
to work on. For convenience, you can set this in an environment
variable:
@ -937,8 +937,8 @@ Encryption Key Management
Proxmox Backup Server also provides an interface for handling encryption keys on
the backup server. Encryption keys can be managed from the **Tape Backup ->
Encryption Keys** section of the GUI or through the ``proxmox-tape key`` command
line tool. To create a new encryption key from the command line:
Encryption Keys** section of the GUI or through the ``proxmox-tape key``
command-line tool. To create a new encryption key from the command line:
.. code-block:: console

View File

@ -194,7 +194,7 @@ Index files(*.fidx*, *.didx*) contain information about how to rebuild a file.
More precisely, they contain an ordered list of references to the chunks that
the original file was split into. If there is something wrong with a snapshot,
it might be useful to find out which chunks are referenced in it, and check
whether they are present and intact. The ``proxmox-backup-debug`` command line
whether they are present and intact. The ``proxmox-backup-debug`` command-line
tool can be used to inspect such files and recover their contents. For example,
to get a list of the referenced chunks of a *.fidx* index:

View File

@ -30,7 +30,7 @@ choose the realm when you add a new user. Possible realms are:
After installation, there is a single user, ``root@pam``, which corresponds to
the Unix superuser. User configuration information is stored in the file
``/etc/proxmox-backup/user.cfg``. You can use the ``proxmox-backup-manager``
command line tool to list or manipulate users:
command-line tool to list or manipulate users:
.. code-block:: console
@ -563,6 +563,32 @@ use :ref:`API Tokens <user_tokens>` for all other use cases, especially
non-interactive ones (for example, adding a Proxmox Backup Server to Proxmox VE
as a storage).
.. _user_tfa_lockout:
Limits and Lockout of Two-Factor Authentication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A second factor is meant to protect users if their password is somehow leaked
or guessed. However, some factors could still be broken by brute force. For
this reason, users will be locked out after too many failed 2nd factor login
attempts.
For TOTP, 8 failed attempts will disable the user's TOTP factors. They are
unlocked when logging in with a recovery key. If TOTP was the only available
factor, admin intervention is required, and it is highly recommended to require
the user to change their password immediately.
Since FIDO2/Webauthn and recovery keys are less susceptible to brute force
attacks, the limit there is higher (100 tries), but all second factors are
blocked for an hour when exceeded.
An admin can unlock a user's Two-Factor Authentication at any time via the user
list view in the web UI, or using the command line:
.. code-block:: console
proxmox-backup-manager user tfa unlock joe@pbs
Authentication Realms
---------------------

View File

@ -1 +1 @@
deb https://enterprise.proxmox.com/debian/pbs bullseye pbs-enterprise
deb https://enterprise.proxmox.com/debian/pbs bookworm pbs-enterprise

View File

@ -15,6 +15,7 @@ serde.workspace = true
serde_plain.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api-types" ] }
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde.workspace = true

View File

@ -1,358 +0,0 @@
use anyhow::{bail, Error};
use proxmox_schema::{ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType};
/// Size units for byte sizes
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SizeUnit {
Byte,
// SI (base 10)
KByte,
MByte,
GByte,
TByte,
PByte,
// IEC (base 2)
Kibi,
Mebi,
Gibi,
Tebi,
Pebi,
}
impl SizeUnit {
/// Returns the scaling factor
pub fn factor(&self) -> f64 {
match self {
SizeUnit::Byte => 1.0,
// SI (base 10)
SizeUnit::KByte => 1_000.0,
SizeUnit::MByte => 1_000_000.0,
SizeUnit::GByte => 1_000_000_000.0,
SizeUnit::TByte => 1_000_000_000_000.0,
SizeUnit::PByte => 1_000_000_000_000_000.0,
// IEC (base 2)
SizeUnit::Kibi => 1024.0,
SizeUnit::Mebi => 1024.0 * 1024.0,
SizeUnit::Gibi => 1024.0 * 1024.0 * 1024.0,
SizeUnit::Tebi => 1024.0 * 1024.0 * 1024.0 * 1024.0,
SizeUnit::Pebi => 1024.0 * 1024.0 * 1024.0 * 1024.0 * 1024.0,
}
}
/// gets the biggest possible unit still having a value greater zero before the decimal point
/// 'binary' specifies if IEC (base 2) units should be used or SI (base 10) ones
pub fn auto_scale(size: f64, binary: bool) -> SizeUnit {
if binary {
let bits = 64 - (size as u64).leading_zeros();
match bits {
51.. => SizeUnit::Pebi,
41..=50 => SizeUnit::Tebi,
31..=40 => SizeUnit::Gibi,
21..=30 => SizeUnit::Mebi,
11..=20 => SizeUnit::Kibi,
_ => SizeUnit::Byte,
}
} else if size >= 1_000_000_000_000_000.0 {
SizeUnit::PByte
} else if size >= 1_000_000_000_000.0 {
SizeUnit::TByte
} else if size >= 1_000_000_000.0 {
SizeUnit::GByte
} else if size >= 1_000_000.0 {
SizeUnit::MByte
} else if size >= 1_000.0 {
SizeUnit::KByte
} else {
SizeUnit::Byte
}
}
}
/// Returns the string representation
impl std::fmt::Display for SizeUnit {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SizeUnit::Byte => write!(f, "B"),
// SI (base 10)
SizeUnit::KByte => write!(f, "KB"),
SizeUnit::MByte => write!(f, "MB"),
SizeUnit::GByte => write!(f, "GB"),
SizeUnit::TByte => write!(f, "TB"),
SizeUnit::PByte => write!(f, "PB"),
// IEC (base 2)
SizeUnit::Kibi => write!(f, "KiB"),
SizeUnit::Mebi => write!(f, "MiB"),
SizeUnit::Gibi => write!(f, "GiB"),
SizeUnit::Tebi => write!(f, "TiB"),
SizeUnit::Pebi => write!(f, "PiB"),
}
}
}
/// Strips a trailing SizeUnit inclusive trailing whitespace
/// Supports both IEC and SI based scales, the B/b byte symbol is optional.
fn strip_unit(v: &str) -> (&str, SizeUnit) {
let v = v.strip_suffix(&['b', 'B'][..]).unwrap_or(v); // byte is implied anyway
let (v, binary) = match v.strip_suffix('i') {
Some(n) => (n, true),
None => (v, false),
};
let mut unit = SizeUnit::Byte;
#[rustfmt::skip]
let value = v.strip_suffix(|c: char| match c {
'k' | 'K' if !binary => { unit = SizeUnit::KByte; true }
'm' | 'M' if !binary => { unit = SizeUnit::MByte; true }
'g' | 'G' if !binary => { unit = SizeUnit::GByte; true }
't' | 'T' if !binary => { unit = SizeUnit::TByte; true }
'p' | 'P' if !binary => { unit = SizeUnit::PByte; true }
// binary (IEC recommended) variants
'k' | 'K' if binary => { unit = SizeUnit::Kibi; true }
'm' | 'M' if binary => { unit = SizeUnit::Mebi; true }
'g' | 'G' if binary => { unit = SizeUnit::Gibi; true }
't' | 'T' if binary => { unit = SizeUnit::Tebi; true }
'p' | 'P' if binary => { unit = SizeUnit::Pebi; true }
_ => false
}).unwrap_or(v).trim_end();
(value, unit)
}
/// Byte size which can be displayed in a human friendly way
#[derive(Debug, Copy, Clone, UpdaterType, PartialEq)]
pub struct HumanByte {
/// The siginficant value, it does not includes any factor of the `unit`
size: f64,
/// The scale/unit of the value
unit: SizeUnit,
}
fn verify_human_byte(s: &str) -> Result<(), Error> {
match s.parse::<HumanByte>() {
Ok(_) => Ok(()),
Err(err) => bail!("byte-size parse error for '{}': {}", s, err),
}
}
impl ApiType for HumanByte {
const API_SCHEMA: Schema = StringSchema::new(
"Byte size with optional unit (B, KB (base 10), MB, GB, ..., KiB (base 2), MiB, Gib, ...).",
)
.format(&ApiStringFormat::VerifyFn(verify_human_byte))
.min_length(1)
.max_length(64)
.schema();
}
impl HumanByte {
/// Create instance with size and unit (size must be positive)
pub fn with_unit(size: f64, unit: SizeUnit) -> Result<Self, Error> {
if size < 0.0 {
bail!("byte size may not be negative");
}
Ok(HumanByte { size, unit })
}
/// Create a new instance with optimal binary unit computed
pub fn new_binary(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, true);
HumanByte {
size: size / unit.factor(),
unit,
}
}
/// Create a new instance with optimal decimal unit computed
pub fn new_decimal(size: f64) -> Self {
let unit = SizeUnit::auto_scale(size, false);
HumanByte {
size: size / unit.factor(),
unit,
}
}
/// Returns the size as u64 number of bytes
pub fn as_u64(&self) -> u64 {
self.as_f64() as u64
}
/// Returns the size as f64 number of bytes
pub fn as_f64(&self) -> f64 {
self.size * self.unit.factor()
}
/// Returns a copy with optimal binary unit computed
pub fn auto_scale_binary(self) -> Self {
HumanByte::new_binary(self.as_f64())
}
/// Returns a copy with optimal decimal unit computed
pub fn auto_scale_decimal(self) -> Self {
HumanByte::new_decimal(self.as_f64())
}
}
impl From<u64> for HumanByte {
fn from(v: u64) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl From<usize> for HumanByte {
fn from(v: usize) -> Self {
HumanByte::new_binary(v as f64)
}
}
impl std::fmt::Display for HumanByte {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let precision = f.precision().unwrap_or(3) as f64;
let precision_factor = 1.0 * 10.0_f64.powf(precision);
// this could cause loss of information, rust has sadly no shortest-max-X flt2dec fmt yet
let size = ((self.size * precision_factor).round()) / precision_factor;
write!(f, "{} {}", size, self.unit)
}
}
impl std::str::FromStr for HumanByte {
type Err = Error;
fn from_str(v: &str) -> Result<Self, Error> {
let (v, unit) = strip_unit(v);
HumanByte::with_unit(v.parse()?, unit)
}
}
proxmox_serde::forward_deserialize_to_from_str!(HumanByte);
proxmox_serde::forward_serialize_to_display!(HumanByte);
#[test]
fn test_human_byte_parser() -> Result<(), Error> {
assert!("-10".parse::<HumanByte>().is_err()); // negative size
fn do_test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> Result<(), Error> {
let h: HumanByte = v.parse()?;
if h.size != size {
bail!("got unexpected size for '{}' ({} != {})", v, h.size, size);
}
if h.unit != unit {
bail!(
"got unexpected unit for '{}' ({:?} != {:?})",
v,
h.unit,
unit
);
}
let new = h.to_string();
if new != *as_str {
bail!("to_string failed for '{}' ({:?} != {:?})", v, new, as_str);
}
Ok(())
}
fn test(v: &str, size: f64, unit: SizeUnit, as_str: &str) -> bool {
match do_test(v, size, unit, as_str) {
Ok(_) => true,
Err(err) => {
eprintln!("{}", err); // makes debugging easier
false
}
}
}
assert!(test("14", 14.0, SizeUnit::Byte, "14 B"));
assert!(test("14.4", 14.4, SizeUnit::Byte, "14.4 B"));
assert!(test("14.45", 14.45, SizeUnit::Byte, "14.45 B"));
assert!(test("14.456", 14.456, SizeUnit::Byte, "14.456 B"));
assert!(test("14.4567", 14.4567, SizeUnit::Byte, "14.457 B"));
let h: HumanByte = "1.2345678".parse()?;
assert_eq!(&format!("{:.0}", h), "1 B");
assert_eq!(&format!("{:.0}", h.as_f64()), "1"); // use as_f64 to get raw bytes without unit
assert_eq!(&format!("{:.1}", h), "1.2 B");
assert_eq!(&format!("{:.2}", h), "1.23 B");
assert_eq!(&format!("{:.3}", h), "1.235 B");
assert_eq!(&format!("{:.4}", h), "1.2346 B");
assert_eq!(&format!("{:.5}", h), "1.23457 B");
assert_eq!(&format!("{:.6}", h), "1.234568 B");
assert_eq!(&format!("{:.7}", h), "1.2345678 B");
assert_eq!(&format!("{:.8}", h), "1.2345678 B");
assert!(test(
"987654321",
987654321.0,
SizeUnit::Byte,
"987654321 B"
));
assert!(test("1300b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 B", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1300 b", 1300.0, SizeUnit::Byte, "1300 B"));
assert!(test("1.5KB", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.5kb", 1.5, SizeUnit::KByte, "1.5 KB"));
assert!(test("1.654321MB", 1.654_321, SizeUnit::MByte, "1.654 MB"));
assert!(test("2.0GB", 2.0, SizeUnit::GByte, "2 GB"));
assert!(test("1.4TB", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("1.4tb", 1.4, SizeUnit::TByte, "1.4 TB"));
assert!(test("2KiB", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2Ki", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2kib", 2.0, SizeUnit::Kibi, "2 KiB"));
assert!(test("2.3454MiB", 2.3454, SizeUnit::Mebi, "2.345 MiB"));
assert!(test("2.3456MiB", 2.3456, SizeUnit::Mebi, "2.346 MiB"));
assert!(test("4gib", 4.0, SizeUnit::Gibi, "4 GiB"));
Ok(())
}
#[test]
fn test_human_byte_auto_unit_decimal() {
fn convert(b: u64) -> String {
HumanByte::new_decimal(b as f64).to_string()
}
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1.022 KB");
assert_eq!(convert(9_000), "9 KB");
assert_eq!(convert(1_000), "1 KB");
assert_eq!(convert(1_000_000), "1 MB");
assert_eq!(convert(1_000_000_000), "1 GB");
assert_eq!(convert(1_000_000_000_000), "1 TB");
assert_eq!(convert(1_000_000_000_000_000), "1 PB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.182 GB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.208 GB");
assert_eq!(convert((2 << 50) + 500 * (1 << 40)), "2.802 PB");
}
#[test]
fn test_human_byte_auto_unit_binary() {
fn convert(b: u64) -> String {
HumanByte::from(b).to_string()
}
assert_eq!(convert(0), "0 B");
assert_eq!(convert(987), "987 B");
assert_eq!(convert(1022), "1022 B");
assert_eq!(convert(9_000), "8.789 KiB");
assert_eq!(convert(10_000_000), "9.537 MiB");
assert_eq!(convert(10_000_000_000), "9.313 GiB");
assert_eq!(convert(10_000_000_000_000), "9.095 TiB");
assert_eq!(convert(1 << 10), "1 KiB");
assert_eq!(convert((1 << 10) * 10), "10 KiB");
assert_eq!(convert(1 << 20), "1 MiB");
assert_eq!(convert(1 << 30), "1 GiB");
assert_eq!(convert(1 << 40), "1 TiB");
assert_eq!(convert(1 << 50), "1 PiB");
assert_eq!(convert((1 << 30) + 103 * (1 << 20)), "1.101 GiB");
assert_eq!(convert((1 << 30) + 128 * (1 << 20)), "1.125 GiB");
assert_eq!(convert((1 << 40) + 128 * (1 << 30)), "1.125 TiB");
assert_eq!(convert((2 << 50) + 512 * (1 << 40)), "2.5 PiB");
}

View File

@ -1,8 +1,6 @@
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater,
};
use proxmox_schema::{api, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema, Updater};
use super::{REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA};
@ -142,27 +140,7 @@ pub enum RemoveVanished {
Properties,
}
macro_rules! DOMAIN_PART_REGEX {
() => {
r#"("[^"]+"|[^ ,+"/<>;=#][^,+"/<>;=]*[^ ,+"/<>;=]|[^ ,+"/<>;=#])"#
};
}
const_regex! {
pub LDAP_DOMAIN_REGEX = concat!(
r#"^\w+="#,
DOMAIN_PART_REGEX!(),
r#"(,\s*\w+="#,
DOMAIN_PART_REGEX!(),
")*$"
);
}
pub const LDAP_DOMAIN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&LDAP_DOMAIN_REGEX);
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain")
.format(&LDAP_DOMAIN_FORMAT)
.schema();
pub const LDAP_DOMAIN_SCHEMA: Schema = StringSchema::new("LDAP Domain").schema();
pub const SYNC_DEFAULTS_STRING_SCHEMA: Schema = StringSchema::new("sync defaults options")
.format(&ApiStringFormat::PropertyString(

View File

@ -72,9 +72,6 @@ pub use acl::*;
mod datastore;
pub use datastore::*;
mod human_byte;
pub use human_byte::HumanByte;
mod jobs;
pub use jobs::*;

View File

@ -1,10 +1,10 @@
use serde::{Deserialize, Serialize};
use proxmox_human_byte::HumanByte;
use proxmox_schema::{api, IntegerSchema, Schema, StringSchema, Updater};
use crate::{
HumanByte, CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT,
SINGLE_LINE_COMMENT_SCHEMA,
CIDR_SCHEMA, DAILY_DURATION_FORMAT, PROXMOX_SAFE_ID_FORMAT, SINGLE_LINE_COMMENT_SCHEMA,
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =

View File

@ -73,9 +73,20 @@ pub const EMAIL_SCHEMA: Schema = StringSchema::new("E-Mail Address.")
type: ApiToken
},
},
"totp-locked": {
type: bool,
optional: true,
default: false,
description: "True if the user is currently locked out of TOTP factors",
},
"tfa-locked-until": {
optional: true,
description: "Contains a timestamp until when a user is locked out of 2nd factors",
},
}
)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// User properties with added list of ApiTokens
pub struct UserWithTokens {
pub userid: Userid,
@ -93,6 +104,14 @@ pub struct UserWithTokens {
pub email: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub tokens: Vec<ApiToken>,
#[serde(skip_serializing_if = "bool_is_false", default)]
pub totp_locked: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub tfa_locked_until: Option<i64>,
}
fn bool_is_false(b: &bool) -> bool {
!b
}
#[api(

View File

@ -37,6 +37,7 @@ proxmox-async.workspace = true
proxmox-auth-api.workspace = true
proxmox-compression.workspace = true
proxmox-http = { workspace = true, features = [ "rate-limiter" ] }
proxmox-human-byte.workspace = true
proxmox-io = { workspace = true, features = [ "tokio" ] }
proxmox-lang.workspace = true
proxmox-router = { workspace = true, features = [ "cli", "server" ] }

View File

@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{BackupDir, BackupNamespace, HumanByte};
use pbs_api_types::{BackupDir, BackupNamespace};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
@ -21,6 +21,8 @@ use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME};
use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1};
use pbs_tools::crypt_config::CryptConfig;
use proxmox_human_byte::HumanByte;
use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo};
use super::{H2Client, HttpClient};
@ -282,6 +284,13 @@ impl BackupWriter {
let close_path = format!("{}_close", prefix);
if let Some(manifest) = options.previous_manifest {
if !manifest
.files()
.iter()
.any(|file| file.filename == archive_name)
{
log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download..");
} else {
// try, but ignore errors
match ArchiveType::from_path(archive_name) {
Ok(ArchiveType::FixedIndex) => {
@ -293,7 +302,7 @@ impl BackupWriter {
)
.await
{
eprintln!("Error downloading .fidx from previous manifest: {}", err);
log::warn!("Error downloading .fidx from previous manifest: {}", err);
}
}
Ok(ArchiveType::DynamicIndex) => {
@ -305,12 +314,13 @@ impl BackupWriter {
)
.await
{
eprintln!("Error downloading .didx from previous manifest: {}", err);
log::warn!("Error downloading .didx from previous manifest: {}", err);
}
}
_ => { /* do nothing */ }
}
}
}
let wid = self
.h2

View File

@ -7,7 +7,7 @@ use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use anyhow::{bail, Context, Error};
use futures::future::BoxFuture;
use futures::FutureExt;
use nix::dir::Dir;
@ -159,7 +159,7 @@ where
fs_magic,
&mut fs_feature_flags,
)
.map_err(|err| format_err!("failed to get metadata for source directory: {}", err))?;
.context("failed to get metadata for source directory")?;
let mut device_set = options.device_set.clone();
if let Some(ref mut set) = device_set {
@ -441,7 +441,7 @@ impl Archiver {
) {
Ok(stat) => stat,
Err(ref err) if err.not_found() => continue,
Err(err) => bail!("stat failed on {:?}: {}", full_path, err),
Err(err) => return Err(err).context(format!("stat failed on {:?}", full_path)),
};
let match_path = PathBuf::from("/").join(full_path.clone());
@ -796,7 +796,7 @@ fn get_fcaps(
Ok(())
}
Err(Errno::EBADF) => Ok(()), // symlinks
Err(err) => bail!("failed to read file capabilities: {}", err),
Err(err) => Err(err).context("failed to read file capabilities"),
}
}
@ -818,7 +818,7 @@ fn get_xattr_fcaps_acl(
return Ok(());
}
Err(Errno::EBADF) => return Ok(()), // symlinks
Err(err) => bail!("failed to read xattrs: {}", err),
Err(err) => return Err(err).context("failed to read xattrs"),
};
for attr in &xattrs {
@ -843,7 +843,9 @@ fn get_xattr_fcaps_acl(
Err(Errno::ENODATA) => (), // it got removed while we were iterating...
Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either
Err(err) => bail!("error reading extended attribute {:?}: {}", attr, err),
Err(err) => {
return Err(err).context(format!("error reading extended attribute {attr:?}"))
}
}
}
@ -858,7 +860,7 @@ fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> {
Err(errno) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
Err(err) => return Err(err).context("failed to read file attributes"),
}
metadata.stat.flags |= Flags::from_chattr(attr).bits();
@ -880,7 +882,7 @@ fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(),
Err(errno) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read fat attributes: {}", err),
Err(err) => return Err(err).context("failed to read fat attributes"),
}
metadata.stat.flags |= Flags::from_fat_attr(attr).bits();
@ -919,7 +921,7 @@ fn get_quota_project_id(
if errno_is_unsupported(errno) {
return Ok(());
} else {
bail!("error while reading quota project id ({})", errno);
return Err(errno).context("error while reading quota project id");
}
}
@ -973,7 +975,7 @@ fn get_acl_do(
Err(Errno::EBADF) => return Ok(()),
// Don't bail if there is no data
Err(Errno::ENODATA) => return Ok(()),
Err(err) => bail!("error while reading ACL - {}", err),
Err(err) => return Err(err).context("error while reading ACL"),
};
process_acl(metadata, acl, acl_type)

View File

@ -2,7 +2,7 @@ use std::ffi::OsString;
use std::os::unix::io::{AsRawFd, BorrowedFd, RawFd};
use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error};
use anyhow::{bail, Context, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::{mkdirat, Mode};
@ -130,7 +130,7 @@ impl PxarDirStack {
let dirs_len = self.dirs.len();
let mut fd = self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))?
.context("lost track of directory file descriptors")?
.as_raw_fd();
while self.created < dirs_len {
@ -142,7 +142,7 @@ impl PxarDirStack {
self.dirs[self.created - 1]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
.context("lost track of directory file descriptors")
}
pub fn create_last_dir(&mut self, allow_existing_dirs: bool) -> Result<(), Error> {
@ -156,7 +156,7 @@ impl PxarDirStack {
self.dirs[0]
.try_as_borrowed_fd()
.ok_or_else(|| format_err!("lost track of directory file descriptors"))
.context("lost track of directory file descriptors")
}
pub fn path(&self) -> &Path {

View File

@ -8,7 +8,7 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use anyhow::{bail, format_err, Context, Error};
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
@ -40,41 +40,102 @@ pub struct PxarExtractOptions<'a> {
pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
pub fn extract_archive<T, F>(
mut decoder: pxar::decoder::Decoder<T>,
decoder: pxar::decoder::Decoder<T>,
destination: &Path,
feature_flags: Flags,
mut callback: F,
callback: F,
options: PxarExtractOptions,
) -> Result<(), Error>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
ExtractorIter::new(decoder, destination, feature_flags, callback, options)
.context("failed to initialize extractor")?
.collect::<Result<(), Error>>()
.context("encountered unexpected error during extraction")
}
struct ExtractorIterState {
match_stack: Vec<bool>,
err_path_stack: Vec<OsString>,
current_match: bool,
end_reached: bool,
}
/// An [`Iterator`] that encapsulates the process of extraction in [extract_archive].
/// Therefore, traversing over an [`ExtractorIter`] until exhaustion extracts an
/// entire PXAR archive.
struct ExtractorIter<'a, T, F>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
decoder: pxar::decoder::Decoder<T>,
callback: F,
extractor: Extractor,
match_list: &'a [MatchEntry],
state: ExtractorIterState,
}
impl ExtractorIterState {
fn new(options: &PxarExtractOptions) -> Self {
Self {
match_stack: Vec::new(),
err_path_stack: Vec::new(),
current_match: options.extract_match_default,
end_reached: false,
}
}
}
impl<'a, T, F> ExtractorIter<'a, T, F>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
/// Creates and initializes the state of a new [`ExtractorIter`].
///
/// This function requires that the given [`Decoder`][D] has not made a single
/// traversal (a call to [`next()`][next]) yet.
///
/// [D]: pxar::decoder::Decoder
/// [next]: std::iter::Iterator::next()
fn new(
mut decoder: pxar::decoder::Decoder<T>,
destination: &Path,
feature_flags: Flags,
callback: F,
options: PxarExtractOptions<'a>,
) -> Result<Self, Error> {
// we use this to keep track of our directory-traversal
decoder.enable_goodbye_entries(true);
let root = decoder
.next()
.ok_or_else(|| format_err!("found empty pxar archive"))?
.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
.context("found empty pxar archive")?
.context("error reading pxar archive")?;
if !root.is_dir() {
bail!("pxar archive does not start with a directory entry!");
}
let mut state = ExtractorIterState::new(&options);
state.err_path_stack.push(OsString::from("/"));
create_path(
destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| format_err!("error creating directory {:?}: {}", destination, err))?;
.with_context(|| format!("error creating directory {destination:?}"))?;
let dir = Dir::open(
destination,
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| format_err!("unable to open target directory {:?}: {}", destination, err,))?;
.with_context(|| format!("unable to open target directory {destination:?}"))?;
let mut extractor = Extractor::new(
dir,
@ -88,27 +149,96 @@ where
extractor.on_error(on_error);
}
let mut match_stack = Vec::new();
let mut err_path_stack = vec![OsString::from("/")];
let mut current_match = options.extract_match_default;
while let Some(entry) = decoder.next() {
let entry = entry.map_err(|err| format_err!("error reading pxar archive: {}", err))?;
let file_name_os = entry.file_name();
// safety check: a file entry in an archive must never contain slashes:
if file_name_os.as_bytes().contains(&b'/') {
bail!("archive file entry contains slashes, which is invalid and a security concern");
Ok(Self {
decoder,
callback,
extractor,
match_list: options.match_list,
state,
})
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
#[inline(always)]
fn callback(&mut self, path: &Path) {
(self.callback)(path)
}
}
impl<'a, T, F> Iterator for ExtractorIter<'a, T, F>
where
T: pxar::decoder::SeqRead,
F: FnMut(&Path),
{
type Item = Result<(), Error>;
/// Performs the extraction of [`Entries`][E] yielded by the [`Decoder`][D].
///
/// In detail, the [`ExtractorIter`] will stop if and only if one of the
/// following conditions is true:
/// * The [`Decoder`][D] is exhausted
/// * The [`Decoder`][D] failed to read from the archive and consequently
/// yielded an [`io::Error`]
/// * The [`Entry`][E]'s filename is invalid (contains nul bytes or a slash)
///
/// Should an error occur during any point of extraction (**not** while
/// fetching the next [`Entry`][E]), the error may be handled by the
/// [`ErrorHandler`] provided by the [`PxarExtractOptions`] used to
/// initialize the iterator.
///
/// Extraction errors will have a corresponding [`PxarExtractContext`] attached.
///
/// [E]: pxar::Entry
/// [D]: pxar::decoder::Decoder
fn next(&mut self) -> Option<Self::Item> {
if self.state.end_reached {
return None;
}
let entry = match self.decoder.next() {
None => {
self.state.end_reached = true;
if !self.extractor.dir_stack.is_empty() {
return Some(Err(format_err!(
"unexpected eof while decoding pxar archive"
)));
} else {
return None;
}
}
Some(Err(err)) => {
self.state.end_reached = true;
return Some(Err(format_err!(err).context("error reading pxar archive")));
}
Some(Ok(entry)) => entry,
};
let file_name_os = entry.file_name();
let file_name_bytes = file_name_os.as_bytes();
if file_name_bytes.contains(&b'/') {
self.state.end_reached = true;
return Some(Err(format_err!(
"archive file entry contains slashes, which is invalid and a security concern"
)));
}
let file_name = match CString::new(file_name_bytes) {
Err(err) => {
self.state.end_reached = true;
return Some(Err(format_err!(err)));
}
Ok(file_name_ref) => file_name_ref,
};
let metadata = entry.metadata();
extractor.set_path(entry.path().as_os_str().to_owned());
self.extractor.set_path(entry.path().as_os_str().to_owned());
let match_result = options.match_list.matches(
let match_result = self.match_list.matches(
entry.path().as_os_str().as_bytes(),
Some(metadata.file_type() as u32),
);
@ -116,101 +246,192 @@ where
let did_match = match match_result {
Some(MatchType::Include) => true,
Some(MatchType::Exclude) => false,
None => current_match,
None => self.state.current_match,
};
match (did_match, entry.kind()) {
let extract_res = match (did_match, entry.kind()) {
(_, EntryKind::Directory) => {
callback(entry.path());
self.callback(entry.path());
let create = current_match && match_result != Some(MatchType::Exclude);
extractor
let create = self.state.current_match && match_result != Some(MatchType::Exclude);
let res = self
.extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), create)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
.context(PxarExtractContext::EnterDirectory);
if res.is_ok() {
// We're starting a new directory, push our old matching state and replace it with
// our new one:
match_stack.push(current_match);
current_match = did_match;
self.state.match_stack.push(self.state.current_match);
self.state.current_match = did_match;
// When we hit the goodbye table we'll try to apply metadata to the directory, but
// the Goodbye entry will not contain the path, so push it to our path stack for
// error messages:
err_path_stack.push(extractor.clone_path());
self.state.err_path_stack.push(self.extractor.clone_path());
}
Ok(())
res
}
(_, EntryKind::GoodbyeTable) => {
// go up a directory
extractor.set_path(err_path_stack.pop().ok_or_else(|| {
format_err!(
"error at entry {:?}: unexpected end of directory",
file_name_os
)
})?);
extractor
.leave_directory()
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
let res = self
.state
.err_path_stack
.pop()
.context("unexpected end of directory")
.map(|path| self.extractor.set_path(path))
.and(self.extractor.leave_directory())
.context(PxarExtractContext::LeaveDirectory);
if res.is_ok() {
// We left a directory, also get back our previous matching state. This is in sync
// with `dir_stack` so this should never be empty except for the final goodbye
// table, in which case we get back to the default of `true`.
current_match = match_stack.pop().unwrap_or(true);
self.state.current_match = self.state.match_stack.pop().unwrap_or(true);
}
Ok(())
res
}
(true, EntryKind::Symlink(link)) => {
callback(entry.path());
extractor.extract_symlink(&file_name, metadata, link.as_ref())
self.callback(entry.path());
self.extractor
.extract_symlink(&file_name, metadata, link.as_ref())
.context(PxarExtractContext::ExtractSymlink)
}
(true, EntryKind::Hardlink(link)) => {
callback(entry.path());
extractor.extract_hardlink(&file_name, link.as_os_str())
self.callback(entry.path());
self.extractor
.extract_hardlink(&file_name, link.as_os_str())
.context(PxarExtractContext::ExtractHardlink)
}
(true, EntryKind::Device(dev)) => {
if extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
callback(entry.path());
extractor.extract_device(&file_name, metadata, dev)
if self.extractor.contains_flags(Flags::WITH_DEVICE_NODES) {
self.callback(entry.path());
self.extractor
.extract_device(&file_name, metadata, dev)
.context(PxarExtractContext::ExtractDevice)
} else {
Ok(())
}
}
(true, EntryKind::Fifo) => {
if extractor.contains_flags(Flags::WITH_FIFOS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
if self.extractor.contains_flags(Flags::WITH_FIFOS) {
self.callback(entry.path());
self.extractor
.extract_special(&file_name, metadata, 0)
.context(PxarExtractContext::ExtractFifo)
} else {
Ok(())
}
}
(true, EntryKind::Socket) => {
if extractor.contains_flags(Flags::WITH_SOCKETS) {
callback(entry.path());
extractor.extract_special(&file_name, metadata, 0)
if self.extractor.contains_flags(Flags::WITH_SOCKETS) {
self.callback(entry.path());
self.extractor
.extract_special(&file_name, metadata, 0)
.context(PxarExtractContext::ExtractSocket)
} else {
Ok(())
}
}
(true, EntryKind::File { size, .. }) => extractor.extract_file(
(true, EntryKind::File { size, .. }) => {
let contents = self.decoder.contents();
if let Some(mut contents) = contents {
self.extractor.extract_file(
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
extractor.overwrite,
),
&mut contents,
self.extractor.overwrite,
)
} else {
Err(format_err!(
"found regular file entry without contents in archive"
))
}
.context(PxarExtractContext::ExtractFile)
}
(false, _) => Ok(()), // skip this
};
Some(
extract_res
.with_context(|| format!("error at entry {file_name_os:?}"))
.or_else(&mut *self.extractor.on_error),
)
}
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
}
if !extractor.dir_stack.is_empty() {
bail!("unexpected eof while decoding pxar archive");
/// Provides additional [context][C] for [`anyhow::Error`]s that are returned
/// while traversing an [`ExtractorIter`]. The [`PxarExtractContext`] can then
/// be accessed [via `anyhow`'s facilities][A] and may aid during error handling.
///
///
/// # Example
///
/// ```
/// # use anyhow::{anyhow, Error};
/// # use std::io;
/// # use pbs_client::pxar::PxarExtractContext;
///
/// let err = anyhow!("oh noes!").context(PxarExtractContext::ExtractFile);
///
/// if let Some(ctx) = err.downcast_ref::<PxarExtractContext>() {
/// match ctx {
/// PxarExtractContext::ExtractFile => {
/// // Conditionally handle the underlying error by type
/// if let Some(io_err) = err.downcast_ref::<io::Error>() {
/// // ...
/// };
/// },
/// PxarExtractContext::ExtractSocket => {
/// // ...
/// },
/// // ...
/// # _ => (),
/// }
/// }
/// ```
///
/// [A]: anyhow::Error
/// [C]: anyhow::Context
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum PxarExtractContext {
EnterDirectory,
LeaveDirectory,
ExtractSymlink,
ExtractHardlink,
ExtractDevice,
ExtractFifo,
ExtractSocket,
ExtractFile,
}
Ok(())
impl PxarExtractContext {
#[inline]
pub fn as_str(&self) -> &'static str {
use PxarExtractContext::*;
match *self {
EnterDirectory => "failed to enter directory",
LeaveDirectory => "failed to leave directory",
ExtractSymlink => "failed to extract symlink",
ExtractHardlink => "failed to extract hardlink",
ExtractDevice => "failed to extract device",
ExtractFifo => "failed to extract named pipe",
ExtractSocket => "failed to extract unix socket",
ExtractFile => "failed to extract file",
}
}
}
impl std::fmt::Display for PxarExtractContext {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
/// Common state for file extraction.
@ -254,7 +475,7 @@ impl Extractor {
pub fn on_error(&mut self, mut on_error: Box<dyn FnMut(Error) -> Result<(), Error> + Send>) {
let path = Arc::clone(&self.current_path);
self.on_error = Box::new(move |err: Error| -> Result<(), Error> {
on_error(format_err!("error at {:?}: {}", path.lock().unwrap(), err))
on_error(err.context(format!("error at {:?}", path.lock().unwrap())))
});
}
@ -291,8 +512,8 @@ impl Extractor {
let dir = self
.dir_stack
.pop()
.map_err(|err| format_err!("unexpected end of directory entry: {}", err))?
.ok_or_else(|| format_err!("broken pxar archive (directory stack underrun)"))?;
.context("unexpected end of directory entry")?
.context("broken pxar archive (directory stack underrun)")?;
if let Some(fd) = dir.try_as_borrowed_fd() {
metadata::apply(
@ -302,7 +523,7 @@ impl Extractor {
&path_info,
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply directory metadata: {}", err))?;
.context("failed to apply directory metadata")?;
}
Ok(())
@ -316,7 +537,7 @@ impl Extractor {
self.dir_stack
.last_dir_fd(self.allow_existing_dirs)
.map(|d| d.as_raw_fd())
.map_err(|err| format_err!("failed to get parent directory file descriptor: {}", err))
.context("failed to get parent directory file descriptor")
}
pub fn extract_symlink(
@ -370,16 +591,12 @@ impl Extractor {
device: libc::dev_t,
) -> Result<(), Error> {
let mode = metadata.stat.mode;
let mode = u32::try_from(mode).map_err(|_| {
format_err!(
"device node's mode contains illegal bits: 0x{:x} (0o{:o})",
mode,
mode,
)
let mode = u32::try_from(mode).with_context(|| {
format!("device node's mode contains illegal bits: 0x{mode:x} (0o{mode:o})")
})?;
let parent = self.parent_fd()?;
unsafe { c_result!(libc::mknodat(parent, file_name.as_ptr(), mode, device)) }
.map_err(|err| format_err!("failed to create device node: {}", err))?;
.context("failed to create device node")?;
metadata::apply_at(
self.feature_flags,
@ -409,7 +626,7 @@ impl Extractor {
let mut file = unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(parent, file_name, oflags, Mode::from_bits(0o600).unwrap())
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
.with_context(|| format!("failed to create file {file_name:?}"))?,
)
};
@ -419,10 +636,10 @@ impl Extractor {
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
.context("failed to apply initial flags")?;
let result = sparse_copy(&mut *contents, &mut file)
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
let result =
sparse_copy(&mut *contents, &mut file).context("failed to copy file contents")?;
if size != result.written {
bail!(
@ -436,7 +653,7 @@ impl Extractor {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(errno) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
Err(err) => return Err(err).context("error setting file size"),
} {}
}
@ -467,7 +684,7 @@ impl Extractor {
let mut file = tokio::fs::File::from_std(unsafe {
std::fs::File::from_raw_fd(
nix::fcntl::openat(parent, file_name, oflags, Mode::from_bits(0o600).unwrap())
.map_err(|err| format_err!("failed to create file {:?}: {}", file_name, err))?,
.with_context(|| format!("failed to create file {file_name:?}"))?,
)
});
@ -477,11 +694,11 @@ impl Extractor {
file.as_raw_fd(),
&mut self.on_error,
)
.map_err(|err| format_err!("failed to apply initial flags: {}", err))?;
.context("failed to apply initial flags")?;
let result = sparse_copy_async(&mut *contents, &mut file)
.await
.map_err(|err| format_err!("failed to copy file contents: {}", err))?;
.context("failed to copy file contents")?;
if size != result.written {
bail!(
@ -495,7 +712,7 @@ impl Extractor {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
Err(errno) if errno == nix::errno::Errno::EINTR => true,
Err(err) => bail!("error setting file size: {}", err),
Err(err) => return Err(err).context("error setting file size"),
} {}
}
@ -532,12 +749,12 @@ where
header.set_size(size);
add_metadata_to_header(&mut header, metadata);
header.set_cksum();
match contents {
Some(content) => tar.add_entry(&mut header, path, content).await,
None => tar.add_entry(&mut header, path, tokio::io::empty()).await,
}
.map_err(|err| format_err!("could not send file entry: {}", err))?;
Ok(())
.context("could not send file entry")
}
/// Creates a tar file from `path` and writes it into `output`
@ -551,7 +768,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
.with_context(|| format!("error opening {:?}", path.as_ref()))?;
let mut components = file.entry().path().components();
components.next_back(); // discard last
@ -574,13 +791,13 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.map_err(|err| format_err!("could not send dir entry: {}", err))?;
.context("could not send dir entry")?;
}
let mut decoder = dir.decode_full().await?;
decoder.enable_goodbye_entries(false);
while let Some(entry) = decoder.next().await {
let entry = entry.map_err(|err| format_err!("cannot decode entry: {}", err))?;
let entry = entry.context("cannot decode entry")?;
let metadata = entry.metadata();
let path = entry.path().strip_prefix(prefix)?;
@ -595,7 +812,7 @@ where
let entry = root
.lookup(&path)
.await?
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
.with_context(|| format!("error looking up {path:?}"))?;
let realfile = accessor.follow_hardlink(&entry).await?;
let metadata = realfile.entry().metadata();
let realpath = Path::new(link);
@ -630,7 +847,7 @@ where
tarencoder
.add_link(&mut header, path, stripped_path)
.await
.map_err(|err| format_err!("could not send hardlink entry: {}", err))?;
.context("could not send hardlink entry")?;
}
}
EntryKind::Symlink(link) if !link.data.is_empty() => {
@ -643,7 +860,7 @@ where
tarencoder
.add_link(&mut header, path, realpath)
.await
.map_err(|err| format_err!("could not send symlink entry: {}", err))?;
.context("could not send symlink entry")?;
}
EntryKind::Fifo => {
log::debug!("adding '{}' to tar", path.display());
@ -657,7 +874,7 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.map_err(|err| format_err!("could not send fifo entry: {}", err))?;
.context("coult not send fifo entry")?;
}
EntryKind::Directory => {
log::debug!("adding '{}' to tar", path.display());
@ -671,7 +888,7 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.map_err(|err| format_err!("could not send dir entry: {}", err))?;
.context("could not send dir entry")?;
}
}
EntryKind::Device(device) => {
@ -690,7 +907,7 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.map_err(|err| format_err!("could not send device entry: {}", err))?;
.context("could not send device entry")?;
}
_ => {} // ignore all else
}
@ -714,7 +931,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
.with_context(|| format!("error opening {:?}", path.as_ref()))?;
let prefix = {
let mut components = file.entry().path().components();
@ -756,13 +973,13 @@ where
);
zip.add_entry(entry, decoder.contents())
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
.context("could not send file entry")?;
}
EntryKind::Hardlink(_) => {
let entry = root
.lookup(&path)
.await?
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
.with_context(|| format!("error looking up {:?}", path))?;
let realfile = accessor.follow_hardlink(&entry).await?;
let metadata = realfile.entry().metadata();
log::debug!("adding '{}' to zip", path.display());
@ -774,7 +991,7 @@ where
);
zip.add_entry(entry, decoder.contents())
.await
.map_err(|err| format_err!("could not send file entry: {}", err))?;
.context("could not send file entry")?;
}
EntryKind::Directory => {
log::debug!("adding '{}' to zip", path.display());
@ -806,26 +1023,14 @@ where
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)
.map_err(|err| {
format_err!(
"error creating directory {:?}: {}",
destination.as_ref(),
err
)
})?;
.with_context(|| format!("error creating directory {:?}", destination.as_ref()))?;
let dir = Dir::open(
destination.as_ref(),
OFlag::O_DIRECTORY | OFlag::O_CLOEXEC,
Mode::empty(),
)
.map_err(|err| {
format_err!(
"unable to open target directory {:?}: {}",
destination.as_ref(),
err,
)
})?;
.with_context(|| format!("unable to open target directory {:?}", destination.as_ref()))?;
Ok(Extractor::new(dir, metadata, false, false, Flags::DEFAULT))
}
@ -850,7 +1055,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
.with_context(|| format!("error opening {:?}", path.as_ref()))?;
recurse_files_extractor(&mut extractor, file).await
}
@ -866,7 +1071,7 @@ where
decoder.enable_goodbye_entries(true);
let root = match decoder.next().await {
Some(Ok(root)) => root,
Some(Err(err)) => bail!("error getting root entry from pxar: {}", err),
Some(Err(err)) => return Err(err).context("error getting root entry from pxar"),
None => bail!("cannot extract empty archive"),
};
@ -920,8 +1125,8 @@ fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
bail!("archive file entry contains slashes, which is invalid and a security concern");
}
let file_name = CString::new(file_name_os.as_bytes())
.map_err(|_| format_err!("encountered file name with null-bytes"))?;
let file_name =
CString::new(file_name_os.as_bytes()).context("encountered file name with null-bytes")?;
Ok((file_name_os, file_name))
}
@ -943,7 +1148,7 @@ where
EntryKind::Directory => {
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
.with_context(|| format!("error at entry {file_name_os:?}"))?;
let dir = file.enter_directory().await?;
let mut seq_decoder = dir.decode_full().await?;
@ -957,9 +1162,10 @@ where
&file_name,
metadata,
*size,
&mut file.contents().await.map_err(|_| {
format_err!("found regular file entry without contents in archive")
})?,
&mut file
.contents()
.await
.context("found regular file entry without contents in archive")?,
extractor.overwrite,
)
.await?
@ -997,7 +1203,7 @@ where
dir_level += 1;
extractor
.enter_directory(file_name_os.to_owned(), metadata.clone(), true)
.map_err(|err| format_err!("error at entry {:?}: {}", file_name_os, err))?;
.with_context(|| format!("error at entry {file_name_os:?}"))?;
}
EntryKind::File { size, .. } => {
extractor
@ -1005,9 +1211,9 @@ where
&file_name,
metadata,
*size,
&mut decoder.contents().ok_or_else(|| {
format_err!("found regular file entry without contents in archive")
})?,
&mut decoder
.contents()
.context("found regular file entry without contents in archive")?,
extractor.overwrite,
)
.await?

View File

@ -2,7 +2,7 @@ use std::ffi::{CStr, CString};
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
use anyhow::{bail, format_err, Error};
use anyhow::{anyhow, bail, Context, Error};
use nix::errno::Errno;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
@ -106,7 +106,7 @@ pub fn apply(
.or_else(&mut *on_error)?;
add_fcaps(flags, c_proc_path.as_ptr(), metadata, &mut skip_xattrs).or_else(&mut *on_error)?;
apply_acls(flags, &c_proc_path, metadata, path_info)
.map_err(|err| format_err!("failed to apply acls: {}", err))
.context("failed to apply acls")
.or_else(&mut *on_error)?;
apply_quota_project_id(flags, fd, metadata).or_else(&mut *on_error)?;
@ -118,7 +118,7 @@ pub fn apply(
})
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to change file mode: {}", err))
.context("failed to change file mode")
.or_else(&mut *on_error)?;
}
@ -134,11 +134,9 @@ pub fn apply(
Ok(_) => (),
Err(ref err) if err.is_errno(Errno::EOPNOTSUPP) => (),
Err(err) => {
on_error(format_err!(
"failed to restore mtime attribute on {:?}: {}",
path_info,
err
))?;
on_error(anyhow!(err).context(format!(
"failed to restore mtime attribute on {path_info:?}"
)))?;
}
}
@ -167,7 +165,7 @@ pub fn apply_ownership(
))
.map(drop)
.or_else(allow_notsupp)
.map_err(|err| format_err!("failed to set ownership: {}", err))
.context("failed to set ownership")
.or_else(&mut *on_error)?;
}
Ok(())
@ -198,9 +196,7 @@ fn add_fcaps(
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, skip_xattrs))
.map_err(|err| format_err!("failed to apply file capabilities: {}", err))?;
Ok(())
.context("failed to apply file capabilities")
}
fn apply_xattrs(
@ -234,7 +230,7 @@ fn apply_xattrs(
})
.map(drop)
.or_else(|err| allow_notsupp_remember(err, &mut *skip_xattrs))
.map_err(|err| format_err!("failed to apply extended attributes: {}", err))?;
.context("failed to apply extended attributes")?;
}
Ok(())
@ -348,21 +344,13 @@ fn apply_quota_project_id(flags: Flags, fd: RawFd, metadata: &Metadata) -> Resul
let mut fsxattr = fs::FSXAttr::default();
unsafe {
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr).map_err(|err| {
format_err!(
"error while getting fsxattr to restore quota project id - {}",
err
)
})?;
fs::fs_ioc_fsgetxattr(fd, &mut fsxattr)
.context("error while getting fsxattr to restore quota project id")?;
fsxattr.fsx_projid = projid.projid as u32;
fs::fs_ioc_fssetxattr(fd, &fsxattr).map_err(|err| {
format_err!(
"error while setting fsxattr to restore quota project id - {}",
err
)
})?;
fs::fs_ioc_fssetxattr(fd, &fsxattr)
.context("error while setting fsxattr to restore quota project id")?;
}
Ok(())
@ -386,7 +374,7 @@ fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(
Err(errno) if errno_is_unsupported(errno) => {
return Ok(());
}
Err(err) => bail!("failed to read file attributes: {}", err),
Err(err) => return Err(err).context("failed to read file attributes"),
}
let attr = (chattr & mask) | (fattr & !mask);
@ -398,7 +386,7 @@ fn apply_chattr(fd: RawFd, chattr: libc::c_long, mask: libc::c_long) -> Result<(
match unsafe { fs::write_attr_fd(fd, &attr) } {
Ok(_) => Ok(()),
Err(errno) if errno_is_unsupported(errno) => Ok(()),
Err(err) => bail!("failed to set file attributes: {}", err),
Err(err) => Err(err).context("failed to set file attributes"),
}
}
@ -412,7 +400,7 @@ fn apply_flags(flags: Flags, fd: RawFd, entry_flags: u64) -> Result<(), Error> {
match unsafe { fs::write_fat_attr_fd(fd, &fatattr) } {
Ok(_) => (),
Err(errno) if errno_is_unsupported(errno) => (),
Err(err) => bail!("failed to set file FAT attributes: {}", err),
Err(err) => return Err(err).context("failed to set file FAT attributes"),
}
}

View File

@ -59,7 +59,7 @@ pub use flags::Flags;
pub use create::{create_archive, PxarCreateOptions};
pub use extract::{
create_tar, create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
PxarExtractOptions,
PxarExtractContext, PxarExtractOptions,
};
/// The format requires to build sorted directory lookup tables in

View File

@ -4,7 +4,7 @@ use std::ffi::OsStr;
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use anyhow::{bail, format_err, Error};
use anyhow::{bail, Context, Error};
use nix::sys::stat::Mode;
use pxar::{format::StatxTimestamp, mode, Entry, EntryKind, Metadata};
@ -12,10 +12,13 @@ use pxar::{format::StatxTimestamp, mode, Entry, EntryKind, Metadata};
/// Get the file permissions as `nix::Mode`
pub fn perms_from_metadata(meta: &Metadata) -> Result<Mode, Error> {
let mode = meta.stat.get_permission_bits();
u32::try_from(mode)
.map_err(drop)
.and_then(|mode| Mode::from_bits(mode).ok_or(()))
.map_err(|_| format_err!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
.context("couldn't narrow permission bits")
.and_then(|mode| {
Mode::from_bits(mode)
.with_context(|| format!("mode contains illegal bits: 0x{:x} (0o{:o})", mode, mode))
})
}
/// Make sure path is relative and not '.' or '..'.

View File

@ -28,6 +28,7 @@ pxar.workspace = true
proxmox-borrow.workspace = true
proxmox-io.workspace = true
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-serde = { workspace = true, features = [ "serde_json" ] }

View File

@ -8,6 +8,7 @@ use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use nix::unistd::{unlinkat, UnlinkatFlags};
use proxmox_human_byte::HumanByte;
use proxmox_schema::ApiType;
use proxmox_sys::error::SysError;
@ -19,7 +20,7 @@ use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{
Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel,
DatastoreTuning, GarbageCollectionStatus, HumanByte, Operation, UPID,
DatastoreTuning, GarbageCollectionStatus, Operation, UPID,
};
use crate::backup_info::{BackupDir, BackupGroup};

View File

@ -32,6 +32,7 @@ zstd.workspace = true
proxmox-async.workspace = true
proxmox-io = { workspace = true, features = [ "tokio" ] }
proxmox-human-byte.workspace = true
proxmox-lang.workspace=true
proxmox-sys.workspace = true
proxmox-time.workspace = true

View File

@ -3,7 +3,7 @@ use std::borrow::Borrow;
use anyhow::Error;
use serde_json::Value;
use pbs_api_types::HumanByte;
use proxmox_human_byte::HumanByte;
pub fn strip_server_file_extension(name: &str) -> &str {
if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") {

View File

@ -25,6 +25,7 @@ pxar.workspace = true
proxmox-async.workspace = true
proxmox-fuse.workspace = true
proxmox-human-byte.workspace = true
proxmox-io.workspace = true
proxmox-router = { workspace = true, features = [ "cli" ] }
proxmox-schema = { workspace = true, features = [ "api-macro" ] }

View File

@ -15,6 +15,7 @@ use xdg::BaseDirectories;
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use proxmox_async::blocking::TokioWriterAdapter;
use proxmox_human_byte::HumanByte;
use proxmox_io::StdChannelWriter;
use proxmox_router::{cli::*, ApiMethod, RpcEnvironment};
use proxmox_schema::api;
@ -24,11 +25,12 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
Fingerprint, GroupListItem, HumanByte, PruneJobOptions, PruneListItem, RateLimitConfig,
SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem,
StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::pxar::ErrorHandler as PxarErrorHandler;
use pbs_client::tools::{
complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot,
complete_backup_source, complete_chunk_size, complete_group_or_snapshot,
@ -1054,7 +1056,7 @@ async fn create_backup(
if let Some(rsa_encrypted_key) = rsa_encrypted_key {
let target = ENCRYPTED_KEY_BLOB_NAME;
log::info!("Upload RSA encoded key to '{:?}' as {}", repo, target);
log::info!("Upload RSA encoded key to '{}' as {}", repo, target);
let options = UploadOptions {
compress: false,
encrypt: false,
@ -1232,6 +1234,12 @@ We do not extract '.pxar' archives when writing to standard output.
optional: true,
default: false,
},
"ignore-extract-device-errors": {
type: Boolean,
description: "ignore errors that occur during device node extraction",
optional: true,
default: false,
}
}
}
)]
@ -1244,6 +1252,7 @@ async fn restore(
ignore_ownership: bool,
ignore_permissions: bool,
overwrite: bool,
ignore_extract_device_errors: bool,
) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
@ -1364,12 +1373,27 @@ async fn restore(
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
let on_error = if ignore_extract_device_errors {
let handler: PxarErrorHandler = Box::new(move |err: Error| {
use pbs_client::pxar::PxarExtractContext;
match err.downcast_ref::<PxarExtractContext>() {
Some(PxarExtractContext::ExtractDevice) => Ok(()),
_ => Err(err),
}
});
Some(handler)
} else {
None
};
let options = pbs_client::pxar::PxarExtractOptions {
match_list: &[],
extract_match_default: true,
allow_existing_dirs,
overwrite,
on_error: None,
on_error,
};
let mut feature_flags = pbs_client::pxar::Flags::DEFAULT;
@ -1590,9 +1614,12 @@ async fn status(param: Value) -> Result<Value, Error> {
let v = v.as_u64().unwrap();
let total = record["total"].as_u64().unwrap();
let roundup = total / 200;
let per = ((v + roundup) * 100) / total;
if let Some(per) = ((v + roundup) * 100).checked_div(total) {
let info = format!(" ({} %)", per);
Ok(format!("{} {:>8}", v, info))
} else {
bail!("Cannot render total percentage: denominator is zero");
}
};
let options = default_table_format_options()

View File

@ -585,7 +585,11 @@ where
}
fn main() {
init_cli_logger("PBS_LOG", "info");
let loglevel = match qemu_helper::debug_mode() {
true => "debug",
false => "info",
};
init_cli_logger("PBS_LOG", loglevel);
let list_cmd_def = CliCommand::new(&API_METHOD_LIST)
.arg_param(&["snapshot", "path"])

View File

@ -194,6 +194,12 @@ pub(crate) async fn hotplug_memory(cid: i32, dimm_mb: usize) -> Result<(), Error
Ok(())
}
pub fn debug_mode() -> bool {
std::env::var("PBS_QEMU_DEBUG")
.map(|s| !s.is_empty())
.unwrap_or(false)
}
pub async fn start_vm(
// u16 so we can do wrapping_add without going too high
mut cid: u16,
@ -205,11 +211,7 @@ pub async fn start_vm(
bail!("environment variable PBS_PASSWORD has to be set for QEMU VM restore");
}
let debug = if let Ok(val) = std::env::var("PBS_QEMU_DEBUG") {
!val.is_empty()
} else {
false
};
let debug = debug_mode();
validate_img_existance(debug)?;
@ -260,7 +262,7 @@ pub async fn start_vm(
// NOTE: ZFS requires that the ARC can at least grow to the max transaction size of 64MB
// also: setting any of min/max to zero will rather do the opposite of what one wants here
&format!(
"{} panic=1 zfs_arc_min=16777216 zfs_arc_max=67108864 memhp_default_state=online_kernel",
"{} panic=1 zfs.zfs_arc_min=33554432 zfs.zfs_arc_max=67108864 memhp_default_state=online_kernel",
if debug { "debug" } else { "quiet" }
),
"-daemonize",

View File

@ -35,7 +35,7 @@ pub(crate) fn get_acme_plugin(
}
"standalone" => {
// this one has no config
Box::new(StandaloneServer::default())
Box::<StandaloneServer>::default()
}
other => bail!("missing implementation for plugin type '{}'", other),
}))

View File

@ -56,6 +56,11 @@ async fn tfa_update_auth(
input: {
properties: { userid: { type: Userid } },
},
returns: {
description: "The list of TFA entries.",
type: Array,
items: { type: methods::TypedTfaInfo }
},
access: {
permission: &Permission::Or(&[
&Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
@ -64,7 +69,7 @@ async fn tfa_update_auth(
},
)]
/// Add a TOTP secret to the user.
fn list_user_tfa(userid: Userid) -> Result<Vec<methods::TypedTfaInfo>, Error> {
pub fn list_user_tfa(userid: Userid) -> Result<Vec<methods::TypedTfaInfo>, Error> {
let _lock = crate::config::tfa::read_lock()?;
methods::list_user_tfa(&crate::config::tfa::read()?, userid.as_str())
@ -117,7 +122,7 @@ fn get_tfa_entry(userid: Userid, id: String) -> Result<methods::TypedTfaInfo, Er
},
)]
/// Delete a single TFA entry.
async fn delete_tfa(
pub async fn delete_tfa(
userid: Userid,
id: String,
password: Option<String>,

View File

@ -8,6 +8,7 @@ use std::collections::HashMap;
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api;
use proxmox_tfa::api::TfaConfig;
use pbs_api_types::{
ApiToken, Authid, Tokenname, User, UserUpdater, UserWithTokens, Userid, ENABLE_USER_SCHEMA,
@ -18,8 +19,17 @@ use pbs_config::token_shadow;
use pbs_config::CachedUserInfo;
fn new_user_with_tokens(user: User) -> UserWithTokens {
fn new_user_with_tokens(user: User, tfa: &TfaConfig) -> UserWithTokens {
UserWithTokens {
totp_locked: tfa
.users
.get(user.userid.as_str())
.map(|data| data.totp_locked)
.unwrap_or(false),
tfa_locked_until: tfa
.users
.get(user.userid.as_str())
.and_then(|data| data.tfa_locked_until),
userid: user.userid,
comment: user.comment,
enable: user.enable,
@ -32,6 +42,7 @@ fn new_user_with_tokens(user: User) -> UserWithTokens {
}
#[api(
protected: true,
input: {
properties: {
include_tokens: {
@ -78,6 +89,8 @@ pub fn list_users(
rpcenv["digest"] = hex::encode(digest).into();
let tfa_data = crate::config::tfa::read()?;
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
let tokens: Vec<ApiToken> = config.convert_to_typed_array("token")?;
@ -93,13 +106,14 @@ pub fn list_users(
},
);
iter.map(|user: User| {
let mut user = new_user_with_tokens(user);
let mut user = new_user_with_tokens(user, &tfa_data);
user.tokens = user_to_tokens.remove(&user.userid).unwrap_or_default();
user
})
.collect()
} else {
iter.map(new_user_with_tokens).collect()
iter.map(|user: User| new_user_with_tokens(user, &tfa_data))
.collect()
};
Ok(list)
@ -728,6 +742,40 @@ pub fn list_tokens(
Ok(res)
}
#[api(
protected: true,
input: {
properties: {
userid: {
type: Userid,
},
},
},
returns: {
type: bool,
description: "Whether the user was previously locked out of any 2nd factor.",
},
access: {
permission: &Permission::Privilege(&["access", "users"], PRIV_PERMISSIONS_MODIFY, false),
},
)]
/// Unlock a user's TFA authentication.
pub fn unlock_tfa(userid: Userid) -> Result<bool, Error> {
let _lock = crate::config::tfa::write_lock()?;
let mut config = crate::config::tfa::read()?;
if proxmox_tfa::api::methods::unlock_and_reset_tfa(
&mut config,
&crate::config::tfa::UserAccess,
userid.as_str(),
)? {
crate::config::tfa::write(&config)?;
Ok(true)
} else {
Ok(false)
}
}
const TOKEN_ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_TOKEN)
.put(&API_METHOD_UPDATE_TOKEN)
@ -738,7 +786,9 @@ const TOKEN_ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TOKENS)
.match_all("token-name", &TOKEN_ITEM_ROUTER);
const USER_SUBDIRS: SubdirMap = &[("token", &TOKEN_ROUTER)];
const UNLOCK_TFA_ROUTER: Router = Router::new().put(&API_METHOD_UNLOCK_TFA);
const USER_SUBDIRS: SubdirMap = &[("token", &TOKEN_ROUTER), ("unlock-tfa", &UNLOCK_TFA_ROUTER)];
const USER_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_USER)

View File

@ -237,7 +237,7 @@ pub fn list_groups(
.to_owned();
let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
let comment = file_read_firstline(&note_path).ok();
let comment = file_read_firstline(note_path).ok();
group_info.push(GroupListItem {
backup: group.into(),

View File

@ -1,8 +1,10 @@
use crate::auth::LdapAuthenticator;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use anyhow::{format_err, Error};
use hex::FromHex;
use serde_json::Value;
use proxmox_ldap::Connection;
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
@ -70,6 +72,11 @@ pub fn create_ldap_realm(config: LdapRealmConfig, password: Option<String>) -> R
param_bail!("realm", "realm '{}' already exists.", config.realm);
}
let ldap_config =
LdapAuthenticator::api_type_to_config_with_password(&config, password.clone())?;
let conn = Connection::new(ldap_config);
proxmox_async::runtime::block_on(conn.check_connection()).map_err(|e| format_err!("{e:#}"))?;
if let Some(password) = password {
auth_helpers::store_ldap_bind_password(&config.realm, &password, &domain_config_lock)?;
}
@ -317,10 +324,6 @@ pub fn update_ldap_realm(
config.bind_dn = Some(bind_dn);
}
if let Some(password) = password {
auth_helpers::store_ldap_bind_password(&realm, &password, &domain_config_lock)?;
}
if let Some(filter) = update.filter {
config.filter = Some(filter);
}
@ -334,6 +337,19 @@ pub fn update_ldap_realm(
config.user_classes = Some(user_classes);
}
let ldap_config = if let Some(_) = password {
LdapAuthenticator::api_type_to_config_with_password(&config, password.clone())?
} else {
LdapAuthenticator::api_type_to_config(&config)?
};
let conn = Connection::new(ldap_config);
proxmox_async::runtime::block_on(conn.check_connection()).map_err(|e| format_err!("{e:#}"))?;
if let Some(password) = password {
auth_helpers::store_ldap_bind_password(&realm, &password, &domain_config_lock)?;
}
domains.set_data(&realm, "ldap", &config)?;
domains::save_config(&domains)?;

View File

@ -585,7 +585,7 @@ pub fn add_plugin(r#type: String, core: DnsPluginCore, data: String) -> Result<(
param_bail!("type", "invalid ACME plugin type: {:?}", r#type);
}
let data = String::from_utf8(base64::decode(&data)?)
let data = String::from_utf8(base64::decode(data)?)
.map_err(|_| format_err!("data must be valid UTF-8"))?;
let id = core.id.clone();
@ -695,7 +695,7 @@ pub fn update_plugin(
let (mut plugins, expected_digest) = plugin::config()?;
if let Some(digest) = digest {
let digest = <[u8; 32]>::from_hex(&digest)?;
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}

View File

@ -352,7 +352,8 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
}
}
let is_kernel = |name: &str| name.starts_with("pve-kernel-");
let is_kernel =
|name: &str| name.starts_with("pve-kernel-") || name.starts_with("proxmox-kernel");
let mut packages: Vec<APTUpdateInfo> = Vec::new();
let pbs_packages = apt::list_installed_apt_packages(

View File

@ -15,10 +15,10 @@ static SERVICE_NAME_LIST: [&str; 7] = [
"proxmox-backup",
"proxmox-backup-proxy",
"sshd",
"syslog",
"systemd-journald",
"cron",
"postfix",
"systemd-timesyncd",
"chrony",
];
pub fn real_service_name(service: &str) -> &str {
@ -78,7 +78,9 @@ fn json_service_state(service: &str, status: Value) -> Value {
let name = status["Name"].as_str().unwrap_or(service);
let state = if status["Type"] == "oneshot" && status["SubState"] == "dead" {
status["Result"].as_str().or(status["SubState"].as_str())
status["Result"]
.as_str()
.or_else(|| status["SubState"].as_str())
} else {
status["SubState"].as_str()
}

View File

@ -652,7 +652,7 @@ pub async fn read_label(drive: String, inventorize: Option<bool>) -> Result<Medi
let mut drive = open_drive(&config, &drive)?;
let (media_id, _key_config) = drive.read_label()?;
let media_id = media_id.ok_or(format_err!("Media is empty (no label)."))?;
let media_id = media_id.ok_or_else(|| format_err!("Media is empty (no label)."))?;
let label = if let Some(ref set) = media_id.media_set_label {
let key = &set.encryption_key_fingerprint;

View File

@ -7,6 +7,7 @@ use std::sync::Arc;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_human_byte::HumanByte;
use proxmox_io::ReadExt;
use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
use proxmox_schema::{api, ApiType};
@ -17,10 +18,9 @@ use proxmox_uuid::Uuid;
use pbs_api_types::{
parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
HumanByte, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA,
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -949,7 +949,7 @@ fn restore_list_worker(
for (datastore, _) in store_map.used_datastores().values() {
let tmp_path = media_set_tmpdir(datastore, &media_set_uuid);
match std::fs::remove_dir_all(&tmp_path) {
match std::fs::remove_dir_all(tmp_path) {
Ok(()) => {}
Err(err) if err.kind() == std::io::ErrorKind::NotFound => {}
Err(err) => task_warn!(worker, "error cleaning up: {}", err),

View File

@ -170,6 +170,16 @@ impl Authenticator for LdapAuthenticator {
impl LdapAuthenticator {
pub fn api_type_to_config(config: &LdapRealmConfig) -> Result<Config, Error> {
Self::api_type_to_config_with_password(
config,
auth_helpers::get_ldap_bind_password(&config.realm)?,
)
}
pub fn api_type_to_config_with_password(
config: &LdapRealmConfig,
password: Option<String>,
) -> Result<Config, Error> {
let mut servers = vec![config.server1.clone()];
if let Some(server) = &config.server2 {
servers.push(server.clone());
@ -198,7 +208,7 @@ impl LdapAuthenticator {
user_attr: config.user_attr.clone(),
base_dn: config.base_dn.clone(),
bind_dn: config.bind_dn.clone(),
bind_password: auth_helpers::get_ldap_bind_password(&config.realm)?,
bind_password: password,
tls_mode,
verify_certificate: config.verify.unwrap_or_default(),
additional_trusted_certificates: trusted_cert,
@ -266,11 +276,11 @@ pub fn setup_auth_context(use_private_key: bool) {
}
pub(crate) fn private_auth_keyring() -> &'static Keyring {
&*PRIVATE_KEYRING
&PRIVATE_KEYRING
}
pub(crate) fn public_auth_keyring() -> &'static Keyring {
&*PUBLIC_KEYRING
&PUBLIC_KEYRING
}
struct PbsAuthContext {
@ -341,7 +351,7 @@ impl proxmox_auth_api::api::AuthContext for PbsAuthContext {
if let Ok(Empty) = Ticket::parse(password).and_then(|ticket| {
ticket.verify(
&self.keyring,
self.keyring,
TERM_PREFIX,
Some(&crate::tools::ticket::term_aad(userid, &path, port)),
)

View File

@ -131,7 +131,7 @@ impl Checker {
let (krunning, kinstalled) = if self.upgraded {
(
Regex::new(r"^6\.(?:2\.(?:[2-9]\d+|1[6-8]|1\d\d+)|5)[^~]*$")?,
"pve-kernel-6.2",
"proxmox-kernel-6.2",
)
} else {
(Regex::new(r"^(?:5\.(?:13|15)|6\.2)")?, "pve-kernel-5.15")

View File

@ -3,6 +3,7 @@ use std::collections::HashMap;
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use proxmox_human_byte::HumanByte;
use proxmox_io::ReadExt;
use proxmox_router::cli::*;
use proxmox_router::RpcEnvironment;
@ -18,10 +19,9 @@ use pbs_config::drive::complete_drive_name;
use pbs_config::media_pool::complete_pool_name;
use pbs_api_types::{
Authid, BackupNamespace, GroupListItem, HumanByte, Userid, DATASTORE_MAP_LIST_SCHEMA,
DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, GROUP_FILTER_LIST_SCHEMA, MEDIA_LABEL_SCHEMA,
MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_SCHEMA, TAPE_RESTORE_NAMESPACE_SCHEMA,
TAPE_RESTORE_SNAPSHOT_SCHEMA,
Authid, BackupNamespace, GroupListItem, Userid, DATASTORE_MAP_LIST_SCHEMA, DATASTORE_SCHEMA,
DRIVE_NAME_SCHEMA, GROUP_FILTER_LIST_SCHEMA, MEDIA_LABEL_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
NS_MAX_DEPTH_SCHEMA, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA,
};
use pbs_tape::{BlockReadError, MediaContentHeader, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0};

View File

@ -9,10 +9,11 @@ use anyhow::{bail, Context as AnyhowContext, Error};
use futures::future::BoxFuture;
use futures::FutureExt;
use proxmox_human_byte::HumanByte;
use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface};
use proxmox_schema::api;
use pbs_api_types::{BackupNamespace, BackupPart, HumanByte};
use pbs_api_types::{BackupNamespace, BackupPart};
use pbs_client::tools::key_source::{
crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA,
};

View File

@ -157,6 +157,40 @@ fn list_permissions(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
Ok(Value::Null)
}
#[api(
input: {
properties: {
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
userid: {
type: Userid,
}
},
}
)]
/// List all tfa methods for a user.
fn list_user_tfa(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let info = &api2::access::tfa::API_METHOD_LIST_USER_TFA;
let mut data = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
let options = default_table_format_options()
.column(ColumnConfig::new("id"))
.column(ColumnConfig::new("type"))
.column(ColumnConfig::new("description"))
.column(ColumnConfig::new("created").renderer(pbs_tools::format::render_epoch));
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
Ok(Value::Null)
}
pub fn user_commands() -> CommandLineInterface {
let cmd_def = CliCommandMap::new()
.insert("list", CliCommand::new(&API_METHOD_LIST_USERS))
@ -196,6 +230,7 @@ pub fn user_commands() -> CommandLineInterface {
.completion_cb("userid", pbs_config::user::complete_userid)
.completion_cb("token-name", pbs_config::user::complete_token_name),
)
.insert("tfa", tfa_commands())
.insert(
"permissions",
CliCommand::new(&API_METHOD_LIST_PERMISSIONS)
@ -206,3 +241,27 @@ pub fn user_commands() -> CommandLineInterface {
cmd_def.into()
}
fn tfa_commands() -> CommandLineInterface {
CliCommandMap::new()
.insert(
"list",
CliCommand::new(&API_METHOD_LIST_USER_TFA)
.arg_param(&["userid"])
.completion_cb("userid", pbs_config::user::complete_userid),
)
.insert(
"delete",
CliCommand::new(&api2::access::tfa::API_METHOD_DELETE_TFA)
.arg_param(&["userid", "id"])
.completion_cb("userid", pbs_config::user::complete_userid)
.completion_cb("id", proxmox_backup::config::tfa::complete_tfa_id),
)
.insert(
"unlock",
CliCommand::new(&api2::access::user::API_METHOD_UNLOCK_TFA)
.arg_param(&["userid"])
.completion_cb("userid", pbs_config::user::complete_userid),
)
.into()
}

View File

@ -187,9 +187,9 @@ pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(
let cert_path = PathBuf::from(configdir!("/proxy.pem"));
create_configdir()?;
pbs_config::replace_backup_config(&key_path, key_pem)
pbs_config::replace_backup_config(key_path, key_pem)
.map_err(|err| format_err!("error writing certificate private key - {}", err))?;
pbs_config::replace_backup_config(&cert_path, cert_pem)
pbs_config::replace_backup_config(cert_path, cert_pem)
.map_err(|err| format_err!("error writing certificate file - {}", err))?;
Ok(())

View File

@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom};
use std::os::unix::fs::OpenOptionsExt;
@ -280,16 +281,15 @@ impl proxmox_tfa::api::OpenUserChallengeData for UserAccess {
/// `remove` user data if it exists.
fn remove(&self, userid: &str) -> Result<bool, Error> {
let path = challenge_data_path_str(userid);
match std::fs::remove_file(&path) {
match std::fs::remove_file(path) {
Ok(()) => Ok(true),
Err(err) if err.not_found() => Ok(false),
Err(err) => Err(err.into()),
}
}
// TODO: enable once we have admin ui stuff to unlock locked-out users
fn enable_lockout(&self) -> bool {
false
true
}
}
@ -302,3 +302,30 @@ impl proxmox_tfa::api::UserChallengeAccess for TfaUserChallengeData {
TfaUserChallengeData::save(self)
}
}
// shell completion helper
pub fn complete_tfa_id(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut results = Vec::new();
let data = match read() {
Ok(data) => data,
Err(_err) => return results,
};
let user = match param
.get("userid")
.and_then(|user_name| data.users.get(user_name))
{
Some(user) => user,
None => return results,
};
results.extend(user.totp.iter().map(|token| token.info.id.clone()));
results.extend(user.u2f.iter().map(|token| token.info.id.clone()));
results.extend(user.webauthn.iter().map(|token| token.info.id.clone()));
results.extend(user.yubico.iter().map(|token| token.info.id.clone()));
if user.recovery.is_some() {
results.push("recovery".to_string());
};
results
}

View File

@ -5,12 +5,13 @@ use handlebars::{
Context, Handlebars, Helper, HelperResult, Output, RenderContext, RenderError, TemplateError,
};
use proxmox_human_byte::HumanByte;
use proxmox_lang::try_block;
use proxmox_schema::ApiType;
use proxmox_sys::email::sendmail;
use pbs_api_types::{
APTUpdateInfo, DataStoreConfig, DatastoreNotify, GarbageCollectionStatus, HumanByte, Notify,
APTUpdateInfo, DataStoreConfig, DatastoreNotify, GarbageCollectionStatus, Notify,
SyncJobConfig, TapeBackupJobSetup, User, Userid, VerificationJobConfig,
};

View File

@ -229,7 +229,7 @@ impl Job {
pub fn new(jobtype: &str, jobname: &str) -> Result<Self, Error> {
let path = get_path(jobtype, jobname);
let _lock = get_lock(&path)?;
let _lock = get_lock(path)?;
Ok(Self {
jobtype: jobtype.to_string(),

View File

@ -90,7 +90,7 @@ impl VirtualTapeHandle {
fn load_tape_index(&self, tape_name: &str) -> Result<TapeIndex, Error> {
let path = self.tape_index_path(tape_name);
let raw = proxmox_sys::fs::file_get_contents(&path)?;
let raw = proxmox_sys::fs::file_get_contents(path)?;
if raw.is_empty() {
return Ok(TapeIndex { files: 0 });
}
@ -103,7 +103,7 @@ impl VirtualTapeHandle {
let raw = serde_json::to_string_pretty(&serde_json::to_value(index)?)?;
let options = CreateOptions::new();
replace_file(&path, raw.as_bytes(), options, false)?;
replace_file(path, raw.as_bytes(), options, false)?;
Ok(())
}
@ -131,7 +131,7 @@ impl VirtualTapeHandle {
let default = serde_json::to_value(VirtualDriveStatus { current_tape: None })?;
let data = proxmox_sys::fs::file_get_json(&path, Some(default))?;
let data = proxmox_sys::fs::file_get_json(path, Some(default))?;
let status: VirtualDriveStatus = serde_json::from_value(data)?;
Ok(status)
}
@ -141,7 +141,7 @@ impl VirtualTapeHandle {
let raw = serde_json::to_string_pretty(&serde_json::to_value(status)?)?;
let options = CreateOptions::new();
replace_file(&path, raw.as_bytes(), options, false)?;
replace_file(path, raw.as_bytes(), options, false)?;
Ok(())
}

View File

@ -38,7 +38,7 @@ mod hex_key {
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
<[u8; 32]>::from_hex(&s).map_err(serde::de::Error::custom)
<[u8; 32]>::from_hex(s).map_err(serde::de::Error::custom)
}
}

View File

@ -254,7 +254,7 @@ impl MediaCatalog {
.write(true)
.create(true)
.truncate(true)
.open(&tmp_path)?;
.open(tmp_path)?;
if cfg!(test) {
// We cannot use chown inside test environment (no permissions)

View File

@ -279,9 +279,7 @@ impl MediaPool {
.inventory
.media_set_start_time(self.current_media_set.uuid())
{
if let Ok(Some(alloc_time)) =
event.compute_next_event(set_start_time as i64)
{
if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time) {
if current_time >= alloc_time {
create_new_set =
Some(String::from("policy CreateAt event triggered"));

View File

@ -108,7 +108,7 @@ impl DiskManage {
/// Get a `Disk` for a name in `/sys/block/<name>`.
pub fn disk_by_name(self: Arc<Self>, name: &str) -> io::Result<Disk> {
let syspath = format!("/sys/block/{}", name);
self.disk_by_sys_path(&syspath)
self.disk_by_sys_path(syspath)
}
/// Gather information about mounted disks:

View File

@ -122,7 +122,7 @@ lazy_static::lazy_static! {
fn parse_objset_stat(pool: &str, objset_id: &str) -> Result<(String, BlockDevStat), Error> {
let path = PathBuf::from(format!("{}/{}/{}", ZFS_KSTAT_BASE_PATH, pool, objset_id));
let text = match proxmox_sys::fs::file_read_optional_string(&path)? {
let text = match proxmox_sys::fs::file_read_optional_string(path)? {
Some(text) => text,
None => bail!("could not parse '{}' stat file", objset_id),
};

View File

@ -1,5 +1,7 @@
include ../defines.mk
ESLINT ?= $(if $(shell command -v pve-eslint), pve-eslint, eslint)
IMAGES := \
images/icon-tape.svg \
images/icon-tape-drive.svg \
@ -133,11 +135,11 @@ js/proxmox-backup-gui.js: js OnlineHelpInfo.js ${JSSRC}
.PHONY: check
check:
eslint --strict ${JSSRC}
$(ESLINT) --strict ${JSSRC}
touch ".lint-incremental"
.lint-incremental: ${JSSRC}
eslint $?
$(ESLINT) $?
touch "$@"
.PHONY: clean

View File

@ -1,7 +1,7 @@
Ext.define('pmx-users', {
extend: 'Ext.data.Model',
fields: [
'userid', 'firstname', 'lastname', 'email', 'comment',
'userid', 'firstname', 'lastname', 'email', 'comment', 'totp-locked',
{ type: 'boolean', name: 'enable', defaultValue: true },
{ type: 'date', dateFormat: 'timestamp', name: 'expire' },
],
@ -100,6 +100,30 @@ Ext.define('PBS.config.UserView', {
init: function(view) {
Proxmox.Utils.monStoreErrors(view, view.getStore().rstore);
},
unlockTfa: function(btn, event, rec) {
let me = this;
let view = me.getView();
Ext.Msg.confirm(
Ext.String.format(gettext('Unlock TFA authentication for {0}'), rec.data.userid),
gettext("Locked 2nd factors can happen if the user's password was leaked. Are you sure you want to unlock the user?"),
function(btn_response) {
if (btn_response === 'yes') {
Proxmox.Utils.API2Request({
url: `/access/users/${rec.data.userid}/unlock-tfa`,
waitMsgTarget: view,
method: 'PUT',
failure: function(response, options) {
Ext.Msg.alert(gettext('Error'), response.htmlStatus);
},
success: function(response, options) {
me.reload();
},
});
}
},
);
},
},
listeners: {
@ -156,6 +180,14 @@ Ext.define('PBS.config.UserView', {
handler: 'showPermissions',
disabled: true,
},
'-',
{
xtype: 'proxmoxButton',
text: gettext('Unlock TFA'),
handler: 'unlockTfa',
enableFn: ({ data }) =>
data['totp-locked'] || (data['tfa-locked-until'] > (new Date().getTime() / 1000)),
},
],
viewConfig: {
@ -198,6 +230,27 @@ Ext.define('PBS.config.UserView', {
dataIndex: 'firstname',
renderer: 'renderName',
},
{
header: gettext('TFA Lock'),
width: 120,
sortable: true,
dataIndex: 'totp-locked',
renderer: function(v, metaData, record) {
let locked_until = record.data['tfa-locked-until'];
if (locked_until !== undefined) {
let now = new Date().getTime() / 1000;
if (locked_until > now) {
return gettext('Locked');
}
}
if (record.data['totp-locked']) {
return gettext('TOTP Locked');
}
return Proxmox.Utils.noText;
},
},
{
header: gettext('Comment'),
sortable: false,