Merge tag 'v3.1.2'

This commit is contained in:
Andrew A. Vasilyev 2023-12-04 16:45:48 +03:00
commit 9f635919d5
140 changed files with 3317 additions and 2095 deletions

5
.gitignore vendored
View File

@ -14,9 +14,14 @@
/*.deb
/*.dsc
/*.tar*
/Cargo.lock
/docs/output
/etc/proxmox-backup-proxy.service
/etc/proxmox-backup.service
/proxmox-backup-server-dpkg-contents.txt
/target
/www/.lint-incremental
/www/js/
__pycache__/
build/
local.mak

View File

@ -1,5 +1,5 @@
[workspace.package]
version = "3.0.2"
version = "3.1.2"
authors = [
"Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>",
@ -54,6 +54,7 @@ path = "src/lib.rs"
[workspace.dependencies]
# proxmox workspace
proxmox-apt = "0.10.5"
proxmox-async = "0.4"
proxmox-auth-api = "0.3"
proxmox-borrow = "1"
@ -63,27 +64,27 @@ proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "webs
proxmox-human-byte = "0.1"
proxmox-io = "1.0.1" # tools and client use "tokio" feature
proxmox-lang = "1.1"
proxmox-ldap = "0.2"
proxmox-ldap = "0.2.1"
proxmox-metrics = "0.3"
proxmox-rest-server = { version = "0.4.1", features = [ "templates" ] }
proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "2.0.0", default_features = false }
# everything but pbs-config and pbs-client ues "api-macro"
# everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "2.0.0"
proxmox-section-config = "2"
proxmox-serde = "0.1.1"
proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.4", features = [ "api-types" ] }
proxmox-sys = "0.5.0"
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
proxmox-sys = "0.5.2"
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2"
proxmox-uuid = "1"
# other proxmox crates
pathpatterns = "0.1.2"
pathpatterns = "0.3"
proxmox-acme-rs = "0.4"
proxmox-openid = "0.10.0"
pxar = "0.10.2"
# PBS workspace
@ -101,6 +102,8 @@ proxmox-rrd = { path = "proxmox-rrd" }
# regular crates
anyhow = "1.0"
async-trait = "0.1.56"
apt-pkg-native = "0.3.2"
base64 = "0.13"
bitflags = "1.2.1"
bytes = "1.0"
@ -108,7 +111,7 @@ cidr = "0.2.1"
crc32fast = "1"
crossbeam-channel = "0.5"
endian_trait = { version = "0.6", features = ["arrays"] }
env_logger = "0.9"
env_logger = "0.10"
flate2 = "1.0"
foreign-types = "0.3"
futures = "0.3"
@ -151,6 +154,8 @@ zstd = { version = "0.12", features = [ "bindgen" ] }
[dependencies]
anyhow.workspace = true
async-trait.workspace = true
apt-pkg-native.workspace = true
base64.workspace = true
bitflags.workspace = true
bytes.workspace = true
@ -195,6 +200,8 @@ zstd.workspace = true
#valgrind_request = { git = "https://github.com/edef1c/libvalgrind_request", version = "1.1.0", optional = true }
# proxmox workspace
proxmox-apt.workspace = true
proxmox-async.workspace = true
proxmox-auth-api = { workspace = true, features = [ "api", "pam-authenticator" ] }
proxmox-compression.workspace = true
@ -204,6 +211,7 @@ proxmox-io.workspace = true
proxmox-lang.workspace = true
proxmox-ldap.workspace = true
proxmox-metrics.workspace = true
proxmox-openid.workspace = true
proxmox-rest-server = { workspace = true, features = [ "rate-limited-stream" ] }
proxmox-router = { workspace = true, features = [ "cli", "server"] }
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
@ -217,11 +225,12 @@ proxmox-tfa.workspace = true
proxmox-time.workspace = true
proxmox-uuid.workspace = true
# in their respective repo
pathpatterns.workspace = true
proxmox-acme-rs.workspace = true
proxmox-openid.workspace = true
pxar.workspace = true
# proxmox-backup workspace/internal crates
pbs-api-types.workspace = true
pbs-buildcfg.workspace = true
pbs-client.workspace = true
@ -235,30 +244,35 @@ proxmox-rrd.workspace = true
# Local path overrides
# NOTE: You must run `cargo update` after changing this for it to take effect!
[patch.crates-io]
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
#proxmox-apt = { path = "../proxmox/proxmox-apt" }
#proxmox-async = { path = "../proxmox/proxmox-async" }
#proxmox-auth-api = { path = "../proxmox/proxmox-auth-api" }
#proxmox-borrow = { path = "../proxmox/proxmox-borrow" }
#proxmox-compression = { path = "../proxmox/proxmox-compression" }
#proxmox-fuse = { path = "../proxmox-fuse" }
#proxmox-http = { path = "../proxmox/proxmox-http" }
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
#proxmox-io = { path = "../proxmox/proxmox-io" }
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
#proxmox-openid = { path = "../proxmox/proxmox-openid" }
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
#proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" }
#proxmox-shared-memory = { path = "../proxmox/proxmox-shared-memory" }
#proxmox-sortable-macro = { path = "../proxmox/proxmox-sortable-macro" }
#proxmox-subscription = { path = "../proxmox/proxmox-subscription" }
#proxmox-sys = { path = "../proxmox/proxmox-sys" }
#proxmox-tfa = { path = "../proxmox/proxmox-tfa" }
#proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-apt = { path = "../proxmox-apt" }
#proxmox-openid = { path = "../proxmox-openid-rs" }
#proxmox-acme-rs = { path = "../proxmox-acme-rs" }
#pathpatterns = {path = "../pathpatterns" }
#pxar = { path = "../pxar" }
[features]

View File

@ -62,7 +62,7 @@ RESTORE_DBG_DEB=proxmox-backup-file-restore-dbgsym_$(DEB_VERSION)_$(ARCH).deb
DOC_DEB=$(PACKAGE)-docs_$(DEB_VERSION)_all.deb
DEBS=$(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(RESTORE_DEB) $(RESTORE_DBG_DEB) $(DEBUG_DEB) $(DEBUG_DBG_DEB)
$(RESTORE_DEB) $(RESTORE_DBG_DEB)
DSC = rust-$(PACKAGE)_$(DEB_VERSION).dsc
@ -145,8 +145,7 @@ clean-deb:
rm -f *.deb *.dsc *.tar.* *.buildinfo *.build *.changes
.PHONY: dinstall
dinstall: $(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
$(DEBUG_DEB) $(DEBUG_DBG_DEB)
dinstall: $(SERVER_DEB) $(SERVER_DBG_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB)
dpkg -i $^
# make sure we build binaries before docs
@ -215,11 +214,10 @@ install: $(COMPILED_BINS)
.PHONY: upload
upload: UPLOAD_DIST ?= $(DEB_DISTRIBUTION)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB) $(DEBUG_DEB)
upload: $(SERVER_DEB) $(CLIENT_DEB) $(RESTORE_DEB) $(DOC_DEB)
# check if working directory is clean
git diff --exit-code --stat && git diff --exit-code --stat --staged
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) \
$(CLIENT_DBG_DEB) $(DEBUG_DEB) $(DEBUG_DBG_DEB) \
tar cf - $(SERVER_DEB) $(SERVER_DBG_DEB) $(DOC_DEB) $(CLIENT_DEB) $(CLIENT_DBG_DEB) \
| ssh -X repoman@repo.proxmox.com upload --product pbs --dist $(UPLOAD_DIST)
tar cf - $(CLIENT_DEB) $(CLIENT_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve,pmg,pbs-client" --dist $(UPLOAD_DIST)
tar cf - $(RESTORE_DEB) $(RESTORE_DBG_DEB) | ssh -X repoman@repo.proxmox.com upload --product "pve" --dist $(UPLOAD_DIST)

View File

@ -44,7 +44,7 @@ example for proxmox crate above).
Build
=====
on Debian 11 Bullseye
on Debian 12 Bookworm
Setup:
1. # echo 'deb http://download.proxmox.com/debian/devel/ bookworm main' | sudo tee /etc/apt/sources.list.d/proxmox-devel.list
@ -58,7 +58,7 @@ Note: 2. may be skipped if you already added the PVE or PBS package repository
You are now able to build using the Makefile or cargo itself, e.g.::
# make deb-all
# make deb
# # or for a non-package build
# cargo build --all --release

139
debian/changelog vendored
View File

@ -1,3 +1,142 @@
rust-proxmox-backup (3.1.2-1) bookworm; urgency=medium
* sync: fix recent regression with recursive remote sync
* sync: fix source store+namespace printing to refer to the actual namespace
-- Proxmox Support Team <support@proxmox.com> Thu, 30 Nov 2023 11:56:43 +0100
rust-proxmox-backup (3.1.1-1) bookworm; urgency=medium
* ui: fix deleting datastore without 'keep-job-configs'
-- Proxmox Support Team <support@proxmox.com> Thu, 30 Nov 2023 11:05:01 +0100
rust-proxmox-backup (3.1.0-1) bookworm; urgency=medium
* ui: fix some edge cases with editing local sync jobs
* ui: datastore content: add context menu to groups and snapshots
* fix #3690: support wiping disks
- api: add endpoint for wipe block devices
- manager cli: add wipe commands to disk group
- ui: enable wipe-disk feature in system disk view
* ui: add 'keep configuration' checkbox to datastore removal window
* docs: add further secure boot information
* ui: disable rate-limit for local sync jobs for now
* pbs2to3: add check for dkms modules
* pbs2to3: check for proper grub meta-package for boot-mode
* ui: dashboard: show the current boot-mode
* ui: dashboard: nicely display kernel version
* ui: add 'show connection information' button for datastores
-- Proxmox Support Team <support@proxmox.com> Wed, 29 Nov 2023 17:55:22 +0100
rust-proxmox-backup (3.0.5-1) bookworm; urgency=medium
* ui: tape restore: fix default namespace mapping
* fix #4260: ui: fallback to note of last snapshot for backup group comment
* fix #4971: client: Improve output on successful snapshot deletion
* fix #4779: client: add missing "Connection" header for HTTP2 upgrade as
specified by RFC 9110, fixing issues with some strict proxies.
* system report: switch to markdown-like output syntax to make it easier to
digest
* system report: add information about block devices, basic uptime, usage
and process info, all apt repo files, proxmox-boot-tool status output and
ldap and oidc realm list
* cli: add option to remove systemd mount unit
* ui: add Remove button for directory-based storage types
* debug CLI tool: show the size of chunks and if they are compressed when
using the inspect command
* fix #4977: ui tape: restore: rework snapshot selection logic to avoid some
confusing edge cases
* docs: faq: add entries for how to do minor and major upgrades
* api apt: use `apt changelog` for changelog fetching
* fix #4374: create a prune job upon datastore creation
* docs: add a link to the Proxmox Backup Server wiki to sidebar
* docs: various fixes for typos, wording and some layout issues
* pull: add support for pulling from local datastore
* tape: library status: don't fail if the library does not support DVCID
* manager: check if offline subscription is for the correct product
* proxy: redirect HTTP requests to HTTPS
* fix #3211: document the possible values to the "notify" parameter
* docs: sysadmin: add section about Secure Boot
-- Proxmox Support Team <support@proxmox.com> Tue, 28 Nov 2023 12:41:56 +0100
rust-proxmox-backup (3.0.4-1) bookworm; urgency=medium
* rebuild with env_logger 0.10, proxmox-api-macro 1.0.6 and syn 2
* update dependencies (tokio, libc, serde, openssl, futures, bindgen)
-- Proxmox Support Team <support@proxmox.com> Mon, 02 Oct 2023 10:19:32 +0200
rust-proxmox-backup (3.0.3-1) bookworm; urgency=medium
* fix #4380: client: check if file is excluded before running `stat()` to
get metadata
* improve error messages when parsing a chunk fails
* add support for LTO-9 tape density code
* pbs2to3: fix boot-mode detection, "/sys/firmware/efi" either doesn't exist
(legacy boot) or is a directory (EFI boot), but never a file.
* fix #4761: client: restore: unlink existing entries for hard/symlinks
when overwrite-existing flag is enabled
* client: restore: provide finer-grained controls for what (files,
sym-links, hard-links, or all) to overwrite on restore if the target
already exists.
* ui: don't show form-reset button on ZFS creation
* close #3777: backup: add client-ip information to worker task log
* fix #4343: client cli: exit with failure-code if a worker-task finished
with a unknown, or an error status.
* fix #4823: datastore: ignore vanished files when walking over namespace
and groups hierarchy. This avoids a rare chance of failing a running
garbage-collection when a snapshot is removed by the user or a prune job
during the same time.
* fix #4895: scheduled jobs: ignore task-log not found error to avoid a
stuck job after, e.g., the system was shutdown uncleanly, for example, due
to a powerless.
-- Proxmox Support Team <support@proxmox.com> Wed, 27 Sep 2023 18:19:49 +0200
rust-proxmox-backup (3.0.2-1) bookworm; urgency=medium
* docs: update FAQ release support table, add PBS 2.x EOL date

29
debian/control vendored
View File

@ -16,6 +16,7 @@ Build-Depends: bash-completion,
libfuse3-dev,
librust-anyhow-1+default-dev,
librust-apt-pkg-native-0.3+default-dev (>= 0.3.2-~~),
librust-async-trait-0.1+default-dev (>= 0.1.56-~~),
librust-base64-0.13+default-dev,
librust-bitflags-1+default-dev (>= 1.2.1-~~),
librust-bytes-1+default-dev,
@ -24,7 +25,7 @@ Build-Depends: bash-completion,
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
librust-env-logger-0.9+default-dev,
librust-env-logger-0.10+default-dev,
librust-flate2-1+default-dev,
librust-foreign-types-0.3+default-dev,
librust-futures-0.3+default-dev,
@ -44,11 +45,11 @@ Build-Depends: bash-completion,
librust-num-traits-0.2+default-dev,
librust-once-cell-1+default-dev (>= 1.3.1-~~),
librust-openssl-0.10+default-dev (>= 0.10.40-~~),
librust-pathpatterns-0.1+default-dev (>= 0.1.2-~~),
librust-pathpatterns-0.3+default-dev,
librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-rs-0.4+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.2-~~),
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.3+api-dev,
librust-proxmox-auth-api-0.3+api-types-dev,
@ -69,12 +70,12 @@ Build-Depends: bash-completion,
librust-proxmox-io-1+default-dev (>= 1.0.1-~~),
librust-proxmox-io-1+tokio-dev (>= 1.0.1-~~),
librust-proxmox-lang-1+default-dev (>= 1.1-~~),
librust-proxmox-ldap-0.2+default-dev,
librust-proxmox-ldap-0.2+default-dev (>= 0.2.1-~~),
librust-proxmox-metrics-0.3+default-dev,
librust-proxmox-openid-0.10+default-dev,
librust-proxmox-rest-server-0.4+default-dev (>= 0.4.1-~~),
librust-proxmox-rest-server-0.4+rate-limited-stream-dev (>= 0.4.1-~~),
librust-proxmox-rest-server-0.4+templates-dev (>= 0.4.1-~~),
librust-proxmox-rest-server-0.5+default-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+rate-limited-stream-dev (>= 0.5.1-~~),
librust-proxmox-rest-server-0.5+templates-dev (>= 0.5.1-~~),
librust-proxmox-router-2+cli-dev,
librust-proxmox-router-2+default-dev,
librust-proxmox-router-2+server-dev,
@ -85,13 +86,13 @@ Build-Depends: bash-completion,
librust-proxmox-serde-0.1+serde-json-dev (>= 0.1.1-~~),
librust-proxmox-shared-memory-0.3+default-dev,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.4+api-types-dev,
librust-proxmox-subscription-0.4+default-dev,
librust-proxmox-sys-0.5+acl-dev,
librust-proxmox-sys-0.5+crypt-dev,
librust-proxmox-sys-0.5+default-dev,
librust-proxmox-sys-0.5+logrotate-dev,
librust-proxmox-sys-0.5+timer-dev,
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
librust-proxmox-sys-0.5+acl-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.2-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.2-~~),
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),

View File

@ -1,7 +1,7 @@
Backup Client Usage
===================
The command-line client for Proxmox Backup Server is called
The command-line client for `Proxmox Backup`_ Server is called
:command:`proxmox-backup-client`.
.. _client_repository:
@ -26,6 +26,9 @@ brackets (for example, `[fe80::01]`).
You can pass the repository with the ``--repository`` command-line option, or
by setting the ``PBS_REPOSITORY`` environment variable.
The web interface provides copyable repository text in the datastore summary
with the `Show Connection Information` button.
Below are some examples of valid repositories and their corresponding real
values:

View File

@ -1,7 +1,7 @@
Backup Protocol
===============
Proxmox Backup Server uses a REST-based API. While the management
`Proxmox Backup`_ Server uses a REST-based API. While the management
interface uses normal HTTP, the actual backup and restore interface uses
HTTP/2 for improved performance. Both HTTP and HTTP/2 are well known
standards, so the following section assumes that you are familiar with

View File

@ -7,7 +7,7 @@ Introduction and Format
-----------------------
Certain tasks, for example pruning and garbage collection, need to be
performed on a regular basis. Proxmox Backup Server uses a format inspired
performed on a regular basis. `Proxmox Backup`_ Server uses a format inspired
by the systemd Time and Date Specification (see `systemd.time manpage`_)
called `calendar events` for its schedules.
@ -89,11 +89,11 @@ Not all features of systemd calendar events are implemented:
Notes on Scheduling
-------------------
In `Proxmox Backup`_, scheduling for most tasks is done in the
In Proxmox Backup, scheduling for most tasks is done in the
`proxmox-backup-proxy`. This daemon checks all job schedules
every minute, to see if any are due. This means that even though
`calendar events` can contain seconds, it will only be checked
once per minute.
Also, all schedules will be checked against the timezone set
in the `Proxmox Backup`_ server.
in the Proxmox Backup Server.

View File

@ -9,7 +9,7 @@ own (self-signed) certificate. This certificate is used for encrypted
communication with the hosts ``proxmox-backup-proxy`` service, for any API
call between a user or backup-client and the web-interface.
Certificate verification when sending backups to a `Proxmox Backup`_ server
Certificate verification when sending backups to a Proxmox Backup Server
is either done based on pinning the certificate fingerprints in the storage/remote
configuration, or by using certificates, signed by a trusted certificate authority.
@ -18,7 +18,7 @@ configuration, or by using certificates, signed by a trusted certificate authori
Certificates for the API and SMTP
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ stores its certificate and key in:
Proxmox Backup stores its certificate and key in:
- ``/etc/proxmox-backup/proxy.pem``
@ -33,11 +33,11 @@ You have the following options for the certificate:
commercial Certificate Authority (CA)).
3. Use an ACME provider like Lets Encrypt to get a trusted certificate
with automatic renewal; this is also integrated in the `Proxmox Backup`_
with automatic renewal; this is also integrated in the Proxmox Backup
API and web interface.
Certificates are managed through the `Proxmox Backup`_
web-interface/API or using the the ``proxmox-backup-manager`` CLI tool.
Certificates are managed through the Proxmox Backup
web-interface/API or using the ``proxmox-backup-manager`` CLI tool.
.. _sysadmin_certs_upload_custom:
@ -61,9 +61,9 @@ Note that any certificate key files must not be password protected.
Trusted certificates via Lets Encrypt (ACME)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`Proxmox Backup`_ includes an implementation of the **A**\ utomatic
Proxmox Backup includes an implementation of the **A**\ utomatic
**C**\ ertificate **M**\ anagement **E**\ nvironment (**ACME**)
protocol, allowing `Proxmox Backup`_ admins to use an ACME provider
protocol, allowing Proxmox Backup admins to use an ACME provider
like Lets Encrypt for easy setup of TLS certificates, which are
accepted and trusted by modern operating systems and web browsers out of
the box.
@ -112,7 +112,7 @@ ACME Plugins
^^^^^^^^^^^^
The ACME plugins role is to provide automatic verification that you,
and thus the `Proxmox Backup`_ server under your operation, are the
and thus the Proxmox Backup Server under your operation, are the
real owner of a domain. This is the basic building block of automatic
certificate management.
@ -129,7 +129,7 @@ DNS record in the domains zone.
:align: right
:alt: Create ACME Account
`Proxmox Backup`_ supports both of those challenge types out of the
Proxmox Backup supports both of those challenge types out of the
box, you can configure plugins either over the web interface under
``Certificates -> ACME Challenges``, or using the
``proxmox-backup-manager acme plugin add`` command.
@ -180,7 +180,7 @@ with Lets Encrypts ACME.
- There **must** be no other listener on port 80.
- The requested (sub)domain needs to resolve to a public IP of the
`Proxmox Backup`_ host.
Proxmox Backup host.
.. _sysadmin_certs_acme_dns_challenge:
@ -197,7 +197,7 @@ allows provisioning of ``TXT`` records via an API.
Configuring ACME DNS APIs for validation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
`Proxmox Backup`_ re-uses the DNS plugins developed for the
Proxmox Backup re-uses the DNS plugins developed for the
``acme.sh`` [1]_ project. Please refer to its documentation for details
on configuration of specific APIs.
@ -215,7 +215,7 @@ and entering the credential data to access your account over their API.
your provider. Configuration values do not need to be quoted with
single or double quotes; for some plugins that is even an error.
As there are many DNS providers and API endpoints, `Proxmox Backup`_
As there are many DNS providers and API endpoints, Proxmox Backup
automatically generates the form for the credentials, but not all
providers are annotated yet. For those you will see a bigger text area,
into which you simply need to copy all the credentials
@ -231,7 +231,7 @@ domain/DNS server, in case your primary/real DNS does not support
provisioning via an API. Manually set up a permanent ``CNAME`` record
for ``_acme-challenge.domain1.example`` pointing to
``_acme-challenge.domain2.example``, and set the ``alias`` property in
the `Proxmox Backup`_ node configuration file ``/etc/proxmox-backup/node.cfg``
the Proxmox Backup node configuration file ``/etc/proxmox-backup/node.cfg``
to ``domain2.example`` to allow the DNS server of ``domain2.example`` to
validate all challenges for ``domain1.example``.
@ -279,12 +279,12 @@ expired or if it will expire in the next 30 days.
.. _manually_change_certificate_over_command_line:
Manually Change Certificate over the Command Line
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to get rid of certificate verification warnings, you have to
generate a valid certificate for your server.
Log in to your `Proxmox Backup`_ via ssh or use the console:
Log in to your Proxmox Backup via ssh or use the console:
::
@ -309,9 +309,9 @@ Follow the instructions on the screen, for example:
After you have finished the certificate request, you have to send the
file ``req.pem`` to your Certification Authority (CA). The CA will issue
the certificate (BASE64 encoded), based on your request save this file
as ``cert.pem`` to your `Proxmox Backup`_.
as ``cert.pem`` to your Proxmox Backup.
To activate the new certificate, do the following on your `Proxmox Backup`_
To activate the new certificate, do the following on your Proxmox Backup
::
@ -328,7 +328,7 @@ Test your new certificate, using your browser.
.. note::
To transfer files to and from your `Proxmox Backup`_, you can use
To transfer files to and from your Proxmox Backup, you can use
secure copy: If your desktop runs Linux, you can use the ``scp``
command-line tool. If your desktop PC runs windows, please use an scp
client like WinSCP (see https://winscp.net/).

View File

@ -71,7 +71,7 @@ master_doc = 'index'
# General information about the project.
project = 'Proxmox Backup'
copyright = '2019-2022, Proxmox Server Solutions GmbH'
copyright = '2019-2023, Proxmox Server Solutions GmbH'
author = 'Proxmox Support Team'
# The version info for the project you're documenting acts as a replacement for
@ -122,7 +122,7 @@ man_pages = [
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
@ -202,6 +202,7 @@ html_theme_options = {
'API Viewer' : 'api-viewer/index.html',
'Prune Simulator' : 'prune-simulator/index.html',
'LTO Barcode Generator' : 'lto-barcode/index.html',
'Proxmox Backup Server Wiki' : 'https://pbs.proxmox.com'
},
'sidebar_width': '320px',
@ -413,6 +414,8 @@ latex_logo = "images/proxmox-logo.png"
#
# latex_domain_indices = True
latex_table_style = ['booktabs', 'colorrows']
# -- Options for Epub output ----------------------------------------------

View File

@ -1,7 +1,7 @@
Configuration Files
===================
All Proxmox Backup Server configuration files reside in the directory
All `Proxmox Backup`_ Server configuration files reside in the directory
``/etc/proxmox-backup/``.

View File

@ -10,15 +10,15 @@
.. _Zstandard: https://en.wikipedia.org/wiki/Zstandard
.. _Proxmox: https://www.proxmox.com
.. _Proxmox Community Forum: https://forum.proxmox.com
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-ve
.. _Proxmox Backup: https://pbs.proxmox.com/wiki/index.php/Main_Page
.. _PBS Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _Proxmox Virtual Environment: https://www.proxmox.com/proxmox-virtual-environment
.. _Proxmox Backup: https://www.proxmox.com/proxmox-backup-server
.. _Proxmox Backup Server Development List: https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
.. _reStructuredText: https://www.sphinx-doc.org/en/master/usage/restructuredtext/index.html
.. _Rust: https://www.rust-lang.org/
.. _SHA-256: https://en.wikipedia.org/wiki/SHA-2
.. _Sphinx: https://www.sphinx-doc.org
.. _Virtual machine: https://en.wikipedia.org/wiki/Virtual_machine
.. _APT: http://en.wikipedia.org/wiki/Advanced_Packaging_Tool
.. _APT: https://en.wikipedia.org/wiki/Advanced_Packaging_Tool
.. _QEMU: https://www.qemu.org/
.. _LXC: https://linuxcontainers.org/lxc/introduction/
@ -27,7 +27,7 @@
.. _GCM: https://en.wikipedia.org/wiki/Galois/Counter_Mode
.. _AGPL3: https://www.gnu.org/licenses/agpl-3.0.en.html
.. _Debian: https://www.debian.org/index.html
.. _Debian Administrator's Handbook: https://debian-handbook.info/download/stable/debian-handbook.pdf
.. _Debian Administrator's Handbook: https://debian-handbook.info/
.. _LVM: https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)
.. _ZFS: https://en.wikipedia.org/wiki/ZFS
@ -37,4 +37,4 @@
.. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time
.. _ISO Week date: https://en.wikipedia.org/wiki/ISO_week_date
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
.. _systemd.time manpage: https://manpages.debian.org/stable/systemd/systemd.time.7.en.html

View File

@ -0,0 +1,4 @@
Proxmox Backup Version , Debian Version , First Release , Debian EOL , Proxmox Backup EOL
Proxmox Backup 3 , Debian 12 (Bookworm) , 2023-06 , TBA , TBA
Proxmox Backup 2 , Debian 11 (Bullseye) , 2021-07 , 2024-07 , 2024-07
Proxmox Backup 1 , Debian 10 (Buster) , 2020-11 , 2022-08 , 2022-07
1 Proxmox Backup Version Debian Version First Release Debian EOL Proxmox Backup EOL
2 Proxmox Backup 3 Debian 12 (Bookworm) 2023-06 TBA TBA
3 Proxmox Backup 2 Debian 11 (Bullseye) 2021-07 2024-07 2024-07
4 Proxmox Backup 1 Debian 10 (Buster) 2020-11 2022-08 2022-07

View File

@ -4,7 +4,7 @@ FAQ
What distribution is Proxmox Backup Server (PBS) based on?
----------------------------------------------------------
Proxmox Backup Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
`Proxmox Backup`_ Server is based on `Debian GNU/Linux <https://www.debian.org/>`_.
Which platforms are supported as a backup source (client)?
@ -21,19 +21,54 @@ Proxmox Backup Server only supports 64-bit CPUs (AMD or Intel). There are no
future plans to support 32-bit processors.
.. _faq-support-table:
How long will my Proxmox Backup Server version be supported?
------------------------------------------------------------
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup Version | Debian Version | First Release | Debian EOL | Proxmox Backup EOL |
+=======================+======================+===============+============+====================+
|Proxmox Backup 3.x | Debian 12 (Bookworm) | 2023-06 | tba | tba |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 2.x | Debian 11 (Bullseye) | 2021-07 | 2024-07 | 2024-07 |
+-----------------------+----------------------+---------------+------------+--------------------+
|Proxmox Backup 1.x | Debian 10 (Buster) | 2020-11 | 2022-08 | 2022-07 |
+-----------------------+----------------------+---------------+------------+--------------------+
.. csv-table::
:file: faq-release-support-table.csv
:widths: 30 26 13 13 18
:header-rows: 1
How can I upgrade Proxmox Backup Server to the next point release?
------------------------------------------------------------------
Minor version upgrades, for example upgrading from Proxmox Backup Server in
version 3.1 to 3.2 or 3.3, can be done just like any normal update.
But, you should still check the `release notes
<https://pbs.proxmox.com/wiki/index.php/Roadmap>`_ for any relevant notable,
or breaking change.
For the update itself use either the Web UI *Node -> Updates* panel or
through the CLI with:
.. code-block:: console
apt update
apt full-upgrade
.. note:: Always ensure you correctly setup the
:ref:`package repositories <sysadmin_package_repositories>` and only
continue with the actual upgrade if `apt update` did not hit any error.
.. _faq-upgrade-major:
How can I upgrade Proxmox Backup Server to the next major release?
------------------------------------------------------------------
Major version upgrades, for example going from Proxmox Backup Server 2.4 to
3.1, are also supported.
They must be carefully planned and tested and should **never** be started
without having an off-site copy of the important backups, e.g., via remote sync
or tape, ready.
Although the specific upgrade steps depend on your respective setup, we provide
general instructions and advice of how a upgrade should be performed:
* `Upgrade from Proxmox Backup Server 2 to 3 <https://pbs.proxmox.com/wiki/index.php/Upgrade_from_2_to_3>`_
* `Upgrade from Proxmox Backup Server 1 to 2 <https://pbs.proxmox.com/wiki/index.php/Upgrade_from_1.1_to_2.x>`_
Can I copy or synchronize my datastore to another location?
-----------------------------------------------------------
@ -76,3 +111,7 @@ data is then deduplicated on the server. This minimizes both the storage
consumed and the impact on the network. Each backup still references all
data and such is a full backup. For details see the
:ref:`Technical Overview <tech_design_overview>`
.. todo:: document our stability guarantees, i.e., the separate one for, in
increasing duration of how long we'll support it: api compat, backup
protocol compat and backup format compat

View File

@ -1,7 +1,7 @@
Graphical User Interface
========================
Proxmox Backup Server offers an integrated, web-based interface to manage the
`Proxmox Backup`_ Server offers an integrated, web-based interface to manage the
server. This means that you can carry out all administration tasks through your
web browser, and that you don't have to worry about installing extra management
tools. The web interface also provides a built-in console, so if you prefer the

View File

@ -16,10 +16,10 @@ The backup server stores the actual backed up data and provides a web based GUI
for various management tasks such as disk management.
.. note:: You always need a backup server. It is not possible to use
`Proxmox Backup`_ without the server part.
Proxmox Backup without the server part.
The disk image (ISO file) provided by Proxmox includes a complete Debian system
as well as all necessary packages for the `Proxmox Backup`_ Server.
as well as all necessary packages for the Proxmox Backup Server.
The installer will guide you through the setup process and allow
you to partition the local disk(s), apply basic system configuration
@ -27,7 +27,7 @@ you to partition the local disk(s), apply basic system configuration
The provided ISO will get you started in just a few minutes, and is the
recommended method for new and existing users.
Alternatively, `Proxmox Backup`_ Server can be installed on top of an
Alternatively, Proxmox Backup Server can be installed on top of an
existing Debian system.
Install `Proxmox Backup`_ Server using the Installer
@ -36,7 +36,7 @@ Install `Proxmox Backup`_ Server using the Installer
Download the ISO from |DOWNLOADS|.
It includes the following:
* The `Proxmox Backup`_ Server installer, which partitions the local
* The Proxmox Backup Server installer, which partitions the local
disk(s) with ext4, xfs or ZFS, and installs the operating system
* Complete operating system (Debian Linux, 64-bit)
@ -60,8 +60,8 @@ standard Debian installation. After configuring the
.. code-block:: console
# apt-get update
# apt-get install proxmox-backup-server
# apt update
# apt install proxmox-backup-server
The above commands keep the current (Debian) kernel and install a minimal
set of required packages.
@ -71,13 +71,13 @@ does, please use the following:
.. code-block:: console
# apt-get update
# apt-get install proxmox-backup
# apt update
# apt install proxmox-backup
This will install all required packages, the Proxmox kernel with ZFS_
support, and a set of common and useful packages.
.. caution:: Installing `Proxmox Backup`_ on top of an existing Debian_
.. caution:: Installing Proxmox Backup on top of an existing Debian_
installation looks easy, but it assumes that the base system and local
storage have been set up correctly. In general this is not trivial, especially
when LVM_ or ZFS_ is used. The network configuration is completely up to you
@ -95,8 +95,8 @@ After configuring the
.. code-block:: console
# apt-get update
# apt-get install proxmox-backup-server
# apt update
# apt install proxmox-backup-server
.. caution:: Installing the backup server directly on the hypervisor
is not recommended. It is safer to use a separate physical
@ -110,7 +110,7 @@ After configuring the
Client Installation
-------------------
Install `Proxmox Backup`_ Client on Debian
Install Proxmox Backup Client on Debian
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Proxmox ships as a set of Debian packages to be installed on top of a standard
@ -119,8 +119,8 @@ you need to run:
.. code-block:: console
# apt-get update
# apt-get install proxmox-backup-client
# apt update
# apt install proxmox-backup-client
.. note:: The client-only repository should be usable by most recent Debian and

View File

@ -4,8 +4,8 @@ Introduction
What is Proxmox Backup Server?
------------------------------
Proxmox Backup Server is an enterprise-class, client-server backup solution that
is capable of backing up :term:`virtual machine<Virtual machine>`\ s,
`Proxmox Backup`_ Server is an enterprise-class, client-server backup solution
that is capable of backing up :term:`virtual machine<Virtual machine>`\ s,
:term:`container<Container>`\ s, and physical hosts. It is specially optimized
for the `Proxmox Virtual Environment`_ platform and allows you to back up your
data securely, even between remote sites, providing easy management through a
@ -178,7 +178,7 @@ Mailing Lists
Proxmox Backup Server is fully open-source and contributions are welcome! Here
is the primary communication channel for developers:
:Mailing list for developers: `Proxmox Backup Sever Development List`_
:Mailing list for developers: `Proxmox Backup Server Development List`_
Bug Tracker
~~~~~~~~~~~

View File

@ -172,7 +172,7 @@ Changing a failed device
Changing a failed bootable device
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Depending on how Proxmox Backup was installed, it is either using `grub` or
Depending on how `Proxmox Backup`_ was installed, it is either using `grub` or
`systemd-boot` as a bootloader.
In either case, the first steps of copying the partition table, reissuing GUIDs
@ -195,7 +195,7 @@ With `systemd-boot`:
# proxmox-boot-tool init <new ESP>
.. NOTE:: `ESP` stands for EFI System Partition, which is setup as partition #2 on
bootable disks setup by the `Proxmox Backup`_ installer. For details, see
bootable disks setup by the Proxmox Backup installer. For details, see
:ref:`Setting up a new partition for use as synced ESP <systembooting-proxmox-boot-setup>`.
With `grub`:
@ -214,17 +214,17 @@ Activate e-mail notification
ZFS comes with an event daemon, ``ZED``, which monitors events generated by the
ZFS kernel module. The daemon can also send emails upon ZFS events, such as pool
errors. Newer ZFS packages ship the daemon in a separate package ``zfs-zed``,
which should already be installed by default in `Proxmox Backup`_.
which should already be installed by default in Proxmox Backup.
You can configure the daemon via the file ``/etc/zfs/zed.d/zed.rc``, using your
preferred editor. The required setting for email notfication is
preferred editor. The required setting for email notification is
``ZED_EMAIL_ADDR``, which is set to ``root`` by default.
.. code-block:: console
ZED_EMAIL_ADDR="root"
Please note that `Proxmox Backup`_ forwards mails to `root` to the email address
Please note that Proxmox Backup forwards mails to `root` to the email address
configured for the root user.
@ -290,17 +290,17 @@ an editor of your choice and add the following line:
vm.swappiness = 10
.. table:: Linux kernel `swappiness` parameter values
:widths:auto
:widths: 1, 3
==================== ===============================================================
Value Strategy
==================== ===============================================================
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
vm.swappiness = 60 The default value.
vm.swappiness = 100 The kernel will swap aggressively.
==================== ===============================================================
=================== ===============================================================
Value Strategy
=================== ===============================================================
vm.swappiness = 0 The kernel will swap only to avoid an 'out of memory' condition
vm.swappiness = 1 Minimum amount of swapping without disabling it entirely.
vm.swappiness = 10 Sometimes recommended to improve performance when sufficient memory exists in a system.
vm.swappiness = 60 The default value.
vm.swappiness = 100 The kernel will swap aggressively.
=================== ===============================================================
ZFS compression
^^^^^^^^^^^^^^^

View File

@ -171,7 +171,7 @@ start.
GC Background
^^^^^^^^^^^^^
In Proxmox Backup Server, backup data is not saved directly, but rather as
In `Proxmox Backup`_ Server, backup data is not saved directly, but rather as
chunks that are referred to by the indexes of each backup snapshot. This
approach enables reuse of chunks through deduplication, among other benefits
that are detailed in the :ref:`tech_design_overview`.

View File

@ -6,11 +6,11 @@ Managing Remotes & Sync
:term:`Remote`
--------------
A remote refers to a separate Proxmox Backup Server installation and a user on that
installation, from which you can `sync` datastores to a local datastore with a
`Sync Job`. You can configure remotes in the web interface, under **Configuration
-> Remotes**. Alternatively, you can use the ``remote`` subcommand. The
configuration information for remotes is stored in the file
A remote refers to a separate `Proxmox Backup`_ Server installation and a user
on that installation, from which you can `sync` datastores to a local datastore
with a `Sync Job`. You can configure remotes in the web interface, under
**Configuration -> Remotes**. Alternatively, you can use the ``remote``
subcommand. The configuration information for remotes is stored in the file
``/etc/proxmox-backup/remote.cfg``.
.. image:: images/screenshots/pbs-gui-remote-add.png

View File

@ -5,12 +5,12 @@ Markdown Primer
"Markdown is a text-to-HTML conversion tool for web writers. Markdown allows
you to write using an easy-to-read, easy-to-write plain text format, then
convertit to structurally valid XHTML (or HTML)."
convert it to structurally valid XHTML (or HTML)."
-- John Gruber, https://daringfireball.net/projects/markdown/
The "Notes" panel of the Proxmox Backup Server web-interface supports
The "Notes" panel of the `Proxmox Backup`_ Server web-interface supports
rendering Markdown text.
Proxmox Backup Server supports CommonMark with most extensions of GFM (GitHub

View File

@ -8,8 +8,8 @@ Network Management
:align: right
:alt: System and Network Configuration Overview
Proxmox Backup Server provides both a web interface and a command-line tool for
network configuration. You can find the configuration options in the web
`Proxmox Backup`_ Server provides both a web interface and a command-line tool
for network configuration. You can find the configuration options in the web
interface under the **Network Interfaces** section of the **Configuration** menu
tree item. The command-line tool is accessed via the ``network`` subcommand.
These interfaces allow you to carry out some basic network management tasks,

View File

@ -70,7 +70,7 @@ and the md5sum, with the expected output below:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is the stable, recommended repository. It is available for
all `Proxmox Backup`_ subscription users. It contains the most stable packages,
all Proxmox Backup subscription users. It contains the most stable packages,
and is suitable for production use. The ``pbs-enterprise`` repository is
enabled by default:
@ -140,7 +140,7 @@ You can access this repository by adding the following line to
Proxmox Backup Client-only Repository
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you want to :ref:`use the the Proxmox Backup Client <client_creating_backups>`
If you want to :ref:`use the Proxmox Backup Client <client_creating_backups>`
on systems using a Linux distribution not based on Proxmox projects, you can
use the client-only repository.

View File

@ -6,9 +6,9 @@ pbs2to3
Description
===========
This tool will help you to detect common pitfalls and misconfguration before,
and during the upgrade of a Proxmox VE system Any failure must be addressed
before the upgrade, and any waring must be addressed, or at least carefully
evaluated, if a false-positive is suspected
This tool will help you to detect common pitfalls and misconfiguration before,
and during the upgrade of a Proxmox Backup Server system. Any failures or
warnings must be addressed prior to the upgrade. If you suspect that a message
is a false positive, you have to make carefully sure that it really is.
.. include:: ../pbs-copyright.rst

View File

@ -749,7 +749,7 @@ Ext.onReady(function() {
fieldLabel: 'End Time',
allowBlank: false,
format: 'H:i',
// cant bind value because ExtJS sets the year to 2008 to
// can't bind value because ExtJS sets the year to 2008 to
// protect against DST issues and date picker zeroes hour/minute
value: vm.get('now'),
listeners: {

View File

@ -3,8 +3,8 @@
`Proxmox VE`_ Integration
-------------------------
Proxmox Backup Server can be integrated into a Proxmox VE standalone or cluster
setup, by adding it as a storage in Proxmox VE.
`Proxmox Backup`_ Server can be integrated into a Proxmox VE standalone or
cluster setup, by adding it as a storage in Proxmox VE.
See also the `Proxmox VE Storage - Proxmox Backup Server
<https://pve.proxmox.com/pve-docs/pve-admin-guide.html#storage_pbs>`_ section

View File

@ -3,8 +3,8 @@
It is inspired by `casync file archive format
<http://0pointer.net/blog/casync-a-tool-for-distributing-file-system-images.html>`_,
which caters to a similar use-case.
The ``.pxar`` format is adapted to fulfill the specific needs of the Proxmox
Backup Server, for example, efficient storage of hard links.
The ``.pxar`` format is adapted to fulfill the specific needs of the
`Proxmox Backup`_ Server, for example, efficient storage of hard links.
The format is designed to reduce the required storage on the server by
achieving a high level of deduplication.

View File

@ -11,7 +11,7 @@ Disk Management
:align: right
:alt: List of disks
Proxmox Backup Server comes with a set of disk utilities, which are
`Proxmox Backup`_ Server comes with a set of disk utilities, which are
accessed using the ``disk`` subcommand or the web interface. This subcommand
allows you to initialize disks, create various filesystems, and get information
about the disks.
@ -263,7 +263,7 @@ categorized by checksum, after a backup operation has been executed.
Once you've uploaded some backups or created namespaces, you may see the backup
type (`ct`, `vm`, `host`) and the start of the namespace hierachy (`ns`).
type (`ct`, `vm`, `host`) and the start of the namespace hierarchy (`ns`).
.. _storage_namespaces:
@ -335,11 +335,11 @@ There are some tuning related options for the datastore that are more advanced:
index file (.fidx/.didx). While this might slow down iterating on many slow
storages, on very fast ones (for example: NVMEs) the collecting and sorting
can take more time than gained through the sorted iterating.
This option can be set with:
This option can be set with:
.. code-block:: console
.. code-block:: console
# proxmox-backup-manager datastore update <storename> --tuning 'chunk-order=none'
# proxmox-backup-manager datastore update <storename> --tuning 'chunk-order=none'
* ``sync-level``: Datastore fsync level:

View File

@ -9,7 +9,7 @@ Debian packages, and that the base system is well documented. The `Debian
Administrator's Handbook`_ is available online, and provides a
comprehensive introduction to the Debian operating system.
A standard `Proxmox Backup`_ installation uses the default
A standard Proxmox Backup installation uses the default
repositories from Debian, so you get bug fixes and security updates
through that channel. In addition, we provide our own package
repository to roll out all Proxmox related packages. This includes
@ -19,8 +19,8 @@ We also deliver a specially optimized Linux kernel, based on the Ubuntu
kernel. This kernel includes drivers for ZFS_.
The following sections will concentrate on backup related topics. They
will explain things which are different on `Proxmox Backup`_, or
tasks which are commonly used on `Proxmox Backup`_. For other topics,
will explain things which are different on Proxmox Backup, or
tasks which are commonly used on Proxmox Backup. For other topics,
please refer to the standard Debian documentation.

View File

@ -8,8 +8,9 @@ Host Bootloader
selected in the installer.
For EFI Systems installed with ZFS as the root filesystem ``systemd-boot`` is
used. All other deployments use the standard ``grub`` bootloader (this usually
also applies to systems which are installed on top of Debian).
used, unless Secure Boot is enabled. All other deployments use the standard
``grub`` bootloader (this usually also applies to systems which are installed
on top of Debian).
.. _systembooting-installer-part-scheme:
@ -17,7 +18,7 @@ also applies to systems which are installed on top of Debian).
Partitioning Scheme Used by the Installer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The `Proxmox Backup`_ installer creates 3 partitions on all disks selected for
The Proxmox Backup installer creates 3 partitions on all disks selected for
installation.
The created partitions are:
@ -30,9 +31,10 @@ The created partitions are:
remaining space available for the chosen storage type
Systems using ZFS as a root filesystem are booted with a kernel and initrd image
stored on the 512 MB EFI System Partition. For legacy BIOS systems, ``grub`` is
used, for EFI systems ``systemd-boot`` is used. Both are installed and configured
to point to the ESPs.
stored on the 512 MB EFI System Partition. For legacy BIOS systems, and EFI
systems with Secure Boot enabled, ``grub`` is used, for EFI systems without
Secure Boot, ``systemd-boot`` is used. Both are installed and configured to
point to the ESPs.
``grub`` in BIOS mode (``--target i386-pc``) is installed onto the BIOS Boot
Partition of all selected disks on all systems booted with ``grub`` (that is,
@ -86,8 +88,8 @@ Setting up a New Partition for use as Synced ESP
To format and initialize a partition as synced ESP, for example, after replacing a
failed vdev in an rpool, ``proxmox-boot-tool`` from ``proxmox-kernel-helper`` can be used.
WARNING: the ``format`` command will format the ``<partition>``. Make sure to pass
in the right device/partition!
.. WARNING:: the ``format`` command will format the ``<partition>``. Make sure
to pass in the right device/partition!
For example, to format an empty partition ``/dev/sda2`` as ESP, run the following:
@ -96,12 +98,21 @@ For example, to format an empty partition ``/dev/sda2`` as ESP, run the followin
# proxmox-boot-tool format /dev/sda2
To setup an existing, unmounted ESP located on ``/dev/sda2`` for inclusion in
`Proxmox Backup`_'s kernel update synchronization mechanism, use the following:
Proxmox Backup's kernel update synchronization mechanism, use the following:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2
or
.. code-block:: console
# proxmox-boot-tool init /dev/sda2 grub
to force initialization with Grub instead of systemd-boot, for example for
Secure Boot support.
Following this, `/etc/kernel/proxmox-boot-uuids`` should contain a new line with the
UUID of the newly added partition. The ``init`` command will also automatically
trigger a refresh of all configured ESPs.
@ -187,7 +198,7 @@ Determine which Bootloader is Used
:alt: Grub boot screen
The simplest and most reliable way to determine which bootloader is used, is to
watch the boot process of the `Proxmox Backup`_ node.
watch the boot process of the Proxmox Backup node.
You will either see the blue box of ``grub`` or the simple black on white
@ -243,6 +254,8 @@ and is quite well documented
(see the `Grub Manual
<https://www.gnu.org/software/grub/manual/grub/grub.html>`_).
.. _systembooting-grub-config:
Configuration
^^^^^^^^^^^^^
@ -265,8 +278,8 @@ Systemd-boot
``systemd-boot`` is a lightweight EFI bootloader. It reads the kernel and initrd
images directly from the EFI Service Partition (ESP) where it is installed.
The main advantage of directly loading the kernel from the ESP is that it does
not need to reimplement the drivers for accessing the storage. In `Proxmox
Backup`_, :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
not need to reimplement the drivers for accessing the storage. In Proxmox
Backup, :ref:`proxmox-boot-tool <systembooting-proxmox-boot-tool>` is used to
keep the configuration on the ESPs synchronized.
.. _systembooting-systemd-boot-config:
@ -300,6 +313,8 @@ Editing the Kernel Commandline
You can modify the kernel commandline in the following places, depending on the
bootloader used:
.. _systembooting-kernel-cmdline-grub:
Grub
^^^^
@ -308,6 +323,8 @@ The kernel commandline needs to be placed in the variable
``update-grub`` appends its content to all ``linux`` entries in
``/boot/grub/grub.cfg``.
.. _systembooting-kernel-cmdline-systemd-boot:
systemd-boot
^^^^^^^^^^^^
@ -342,7 +359,7 @@ would run:
# proxmox-boot-tool kernel pin 5.15.30-1-pve
.. TIP:: The pinning functionality works for all `Proxmox Backup`_ systems, not only those using
.. TIP:: The pinning functionality works for all Proxmox Backup systems, not only those using
``proxmox-boot-tool`` to synchronize the contents of the ESPs, if your system
does not use ``proxmox-boot-tool`` for synchronizing, you can also skip the
``proxmox-boot-tool refresh`` call in the end.
@ -375,3 +392,188 @@ content and configuration on the ESPs by running the ``refresh`` subcommand.
.. code-block:: console
# proxmox-boot-tool refresh
.. _systembooting-secure-boot:
Secure Boot
~~~~~~~~~~~
Since Proxmox Backup 3.1, Secure Boot is supported out of the box via signed
packages and integration in ``proxmox-boot-tool``.
The following packages need to be installed for Secure Boot to be enabled:
* ``shim-signed`` (shim bootloader signed by Microsoft)
* ``shim-helpers-amd64-signed`` (fallback bootloader and MOKManager, signed by Proxmox)
* ``grub-efi-amd64-signed`` (Grub EFI bootloader, signed by Proxmox)
* ``proxmox-kernel-6.X.Y-Z-pve-signed`` (Kernel image, signed by Proxmox)
Only Grub as bootloader is supported out of the box, since there are no other
pre-signed bootloader packages available. Any new installation of Proxmox Backup
will automatically have all of the above packages included.
More details about how Secure Boot works, and how to customize the setup, are
available in `our wiki <https://pve.proxmox.com/wiki/Secure_Boot_Setup>`_.
.. _systembooting-secure-boot-existing-installation:
Switching an Existing Installation to Secure Boot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. WARNING:: This can lead to an unbootable installation in some cases if not
done correctly. Reinstalling the host will setup Secure Boot automatically if
available, without any extra interactions. **Make sure you have a working and
well-tested backup of your Proxmox Backup host!**
An existing UEFI installation can be switched over to Secure Boot if desired,
without having to reinstall Proxmox Backup from scratch.
First, ensure all your system is up-to-date. Next, install all the required
pre-signed packages as listed above. Grub automatically creates the needed EFI
boot entry for booting via the default shim.
.. _systembooting-secure-boot-existing-systemd-boot:
**systemd-boot**
""""""""""""""""
If ``systemd-boot`` is used as a bootloader (see
:ref:`Determine which Bootloader is used <systembooting-determine-bootloader>`),
some additional setup is needed. This is only the case if Proxmox Backup was
installed with ZFS-on-root.
To check the latter, run:
.. code-block:: console
# findmnt /
If the host is indeed using ZFS as root filesystem, the ``FSTYPE`` column should
contain ``zfs``:
.. code-block:: console
TARGET SOURCE FSTYPE OPTIONS
/ rpool/ROOT/pbs-1 zfs rw,relatime,xattr,noacl
Next, a suitable potential ESP (EFI system partition) must be found. This can be
done using the ``lsblk`` command as following:
.. code-block:: console
# lsblk -o +FSTYPE
The output should look something like this:
.. code-block:: console
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS FSTYPE
sda 8:0 0 32G 0 disk
├─sda1 8:1 0 1007K 0 part
├─sda2 8:2 0 512M 0 part vfat
└─sda3 8:3 0 31.5G 0 part zfs_member
sdb 8:16 0 32G 0 disk
├─sdb1 8:17 0 1007K 0 part
├─sdb2 8:18 0 512M 0 part vfat
└─sdb3 8:19 0 31.5G 0 part zfs_member
In this case, the partitions ``sda2`` and ``sdb2`` are the targets. They can be
identified by the their size of 512M and their ``FSTYPE`` being ``vfat``, in
this case on a ZFS RAID-1 installation.
These partitions must be properly set up for booting through Grub using
``proxmox-boot-tool``. This command (using ``sda2`` as an example) must be run
separately for each individual ESP:
.. code-block:: console
# proxmox-boot-tool init /dev/sda2 grub
Afterwards, you can sanity-check the setup by running the following command:
.. code-block:: console
# efibootmgr -v
This list should contain an entry looking similar to this:
.. code-block:: console
[..]
Boot0009* proxmox HD(2,GPT,..,0x800,0x100000)/File(\EFI\proxmox\shimx64.efi)
[..]
.. NOTE:: The old ``systemd-boot`` bootloader will be kept, but Grub will be
preferred. This way, if booting using Grub in Secure Boot mode does not work
for any reason, the system can still be booted using ``systemd-boot`` with
Secure Boot turned off.
Now the host can be rebooted and Secure Boot enabled in the UEFI firmware setup
utility.
On reboot, a new entry named ``proxmox`` should be selectable in the UEFI
firmware boot menu, which boots using the pre-signed EFI shim.
If, for any reason, no ``proxmox`` entry can be found in the UEFI boot menu, you
can try adding it manually (if supported by the firmware), by adding the file
``\EFI\proxmox\shimx64.efi`` as a custom boot entry.
.. NOTE:: Some UEFI firmwares are known to drop the ``proxmox`` boot option on
reboot. This can happen if the ``proxmox`` boot entry is pointing to a Grub
installation on a disk, where the disk itself is not a boot option. If
possible, try adding the disk as a boot option in the UEFI firmware setup
utility and run ``proxmox-boot-tool`` again.
.. TIP:: To enroll custom keys, see the accompanying `Secure Boot wiki page
<https://pve.proxmox.com/wiki/Secure_Boot_Setup#Setup_instructions_for_db_key_variant>`_.
.. _systembooting-secure-boot-other-modules:
Using DKMS/Third Party Modules With Secure Boot
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
On systems with Secure Boot enabled, the kernel will refuse to load modules
which are not signed by a trusted key. The default set of modules shipped with
the kernel packages is signed with an ephemeral key embedded in the kernel
image which is trusted by that specific version of the kernel image.
In order to load other modules, such as those built with DKMS or manually, they
need to be signed with a key trusted by the Secure Boot stack. The easiest way
to achieve this is to enroll them as Machine Owner Key (``MOK``) with
``mokutil``.
The ``dkms`` tool will automatically generate a keypair and certificate in
``/var/lib/dkms/mok.key`` and ``/var/lib/dkms/mok.pub`` and use it for signing
the kernel modules it builds and installs.
You can view the certificate contents with
.. code-block:: console
# openssl x509 -in /var/lib/dkms/mok.pub -noout -text
and enroll it on your system using the following command:
.. code-block:: console
# mokutil --import /var/lib/dkms/mok.pub
input password:
input password again:
The ``mokutil`` command will ask for a (temporary) password twice, this password
needs to be entered one more time in the next step of the process! Rebooting
the system should automatically boot into the ``MOKManager`` EFI binary, which
allows you to verify the key/certificate and confirm the enrollment using the
password selected when starting the enrollment using ``mokutil``. Afterwards,
the kernel should allow loading modules built with DKMS (which are signed with
the enrolled ``MOK``). The ``MOK`` can also be used to sign custom EFI binaries
and kernel images if desired.
The same procedure can also be used for custom/third-party modules not managed
with DKMS, but the key/certificate generation and signing steps need to be done
manually in that case.

View File

@ -65,7 +65,7 @@ tape compression feature has no advantage.
Supported Hardware
------------------
Proxmox Backup Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
`Proxmox Backup`_ Server supports `Linear Tape-Open`_ generation 5 (LTO-5)
or later and has best-effort support for generation 4 (LTO-4). While
many LTO-4 systems are known to work, some might need firmware updates or
do not implement necessary features to work with Proxmox Backup Server.
@ -567,7 +567,7 @@ a single media pool, so a job only uses tapes from that pool.
will be double encrypted.
The password protected key is stored on each medium, so that it is
possbible to `restore the key <tape_restore_encryption_key_>`_ using
possible to `restore the key <tape_restore_encryption_key_>`_ using
the password. Please make sure to remember the password, in case
you need to restore the key.
@ -998,7 +998,7 @@ into the drive. Then run:
.. code-block:: console
# proxmox-tape key restore
Tepe Encryption Key Password: ***********
Tape Encryption Key Password: ***********
If the password is correct, the key will get imported to the
database. Further restore jobs automatically use any available key.

View File

@ -234,8 +234,8 @@ Restore without a Running Proxmox Backup Server
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
It's possible to restore specific files from a snapshot, without a running
Proxmox Backup Server instance, using the ``recover`` subcommand, provided you
have access to the intact index and chunk files. Note that you also need the
`Proxmox Backup`_ Server instance, using the ``recover`` subcommand, provided
you have access to the intact index and chunk files. Note that you also need the
corresponding key file if the backup was encrypted.
.. code-block:: console

View File

@ -17,7 +17,7 @@ backup virtual machine images.
Variable sized chunking needs more CPU power, but is essential to get
good deduplication rates for file archives.
The Proxmox Backup Server supports both strategies.
The `Proxmox Backup`_ Server supports both strategies.
Image Archives: ``<name>.img``

View File

@ -12,7 +12,7 @@ User Configuration
:align: right
:alt: User management
Proxmox Backup Server supports several authentication realms, and you need to
`Proxmox Backup`_ Server supports several authentication realms, and you need to
choose the realm when you add a new user. Possible realms are:
:pam: Linux PAM standard authentication. Use this if you want to
@ -329,16 +329,19 @@ references are specified in curly brackets.
Some examples are:
* `/datastore`: Access to *all* datastores on a Proxmox Backup server
* `/datastore/{store}`: Access to a specific datastore on a Proxmox Backup
server
* `/datastore/{store}/{ns}`: Access to a specific namespace on a specific
datastore
* `/remote`: Access to all remote entries
* `/system/network`: Access to configure the host network
* `/tape/`: Access to tape devices, pools and jobs
* `/access/users`: User administration
* `/access/openid/{id}`: Administrative access to a specific OpenID Connect realm
.. table::
:align: left
=========================== =========================================================
``/datastore`` Access to *all* datastores on a Proxmox Backup server
``/datastore/{store}`` Access to a specific datastore on a Proxmox Backup server
``/datastore/{store}/{ns}`` Access to a specific namespace on a specific datastore
``/remote`` Access to all remote entries
``/system/network`` Access to configure the host network
``/tape/`` Access to tape devices, pools and jobs
``/access/users`` User administration
``/access/openid/{id}`` Administrative access to a specific OpenID Connect realm
=========================== =========================================================
Inheritance
^^^^^^^^^^^

View File

@ -34,7 +34,7 @@ async fn run() -> Result<(), Error> {
let backup_time = proxmox_time::parse_rfc3339("2019-06-28T10:49:48Z")?;
let client = BackupReader::start(
client,
&client,
None,
"store2",
&BackupNamespace::root(),

View File

@ -280,7 +280,7 @@ pub const ACL_UGID_TYPE_SCHEMA: Schema = StringSchema::new("Type of 'ugid' prope
}
}
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// ACL list entry.
pub struct AclListItem {
pub path: String,

View File

@ -195,7 +195,7 @@ pub enum DatastoreFSyncLevel {
/// while reducing the impact on many file systems in contrast to the file level sync.
/// Depending on the setup, it might have a negative impact on unrelated write operations
/// of the underlying filesystem, but it is generally a good compromise between performance
/// and consitency.
/// and consistency.
#[default]
Filesystem,
}
@ -213,7 +213,9 @@ pub enum DatastoreFSyncLevel {
/// Datastore tuning options
pub struct DatastoreTuning {
/// Iterate chunks in this order
#[serde(skip_serializing_if = "Option::is_none")]
pub chunk_order: Option<ChunkOrder>,
#[serde(skip_serializing_if = "Option::is_none")]
pub sync_level: Option<DatastoreFSyncLevel>,
}
@ -376,7 +378,7 @@ pub struct DataStoreListItem {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Basic information about archive files inside a backup snapshot.
pub struct BackupContent {
@ -410,7 +412,7 @@ pub enum VerifyState {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// Task properties.
pub struct SnapshotVerifyState {
/// UPID of the verify task
@ -1074,7 +1076,7 @@ impl std::str::FromStr for BackupPart {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Basic information about backup snapshot.
pub struct SnapshotListItem {
@ -1120,7 +1122,7 @@ pub struct SnapshotListItem {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a backup group.
pub struct GroupListItem {

View File

@ -17,8 +17,8 @@ const_regex! {
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
/// Regex for sync jobs 'REMOTE:REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:");
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
@ -138,21 +138,26 @@ pub enum Notify {
/// Datastore notify settings
pub struct DatastoreNotify {
/// Garbage collection settings
#[serde(skip_serializing_if = "Option::is_none")]
pub gc: Option<Notify>,
/// Verify job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub verify: Option<Notify>,
/// Sync job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub sync: Option<Notify>,
/// Prune job setting
#[serde(skip_serializing_if = "Option::is_none")]
pub prune: Option<Notify>,
}
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema =
StringSchema::new("Datastore notification setting")
.format(&ApiStringFormat::PropertyString(
&DatastoreNotify::API_SCHEMA,
))
.schema();
pub const DATASTORE_NOTIFY_STRING_SCHEMA: Schema = StringSchema::new(
"Datastore notification setting, enum can be one of 'always', 'never', or 'error'.",
)
.format(&ApiStringFormat::PropertyString(
&DatastoreNotify::API_SCHEMA,
))
.schema();
pub const IGNORE_VERIFIED_BACKUPS_SCHEMA: Schema = BooleanSchema::new(
"Do not verify backups that are already verified if their verification is not outdated.",
@ -199,7 +204,7 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
},
}
)]
#[derive(Serialize, Deserialize, Updater)]
#[derive(Serialize, Deserialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Verification Job
pub struct VerificationJobConfig {
@ -248,7 +253,7 @@ impl VerificationJobConfig {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Verification Job
pub struct VerificationJobStatus {
@ -302,7 +307,7 @@ pub struct VerificationJobStatus {
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job Setup
pub struct TapeBackupJobSetup {
@ -344,7 +349,7 @@ pub struct TapeBackupJobSetup {
},
}
)]
#[derive(Serialize, Deserialize, Clone, Updater)]
#[derive(Serialize, Deserialize, Clone, Updater, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Tape Backup Job
pub struct TapeBackupJobConfig {
@ -368,7 +373,7 @@ pub struct TapeBackupJobConfig {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of Tape Backup Job
pub struct TapeBackupJobStatus {
@ -467,6 +472,7 @@ pub const TRANSFER_LAST_SCHEMA: Schema =
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
@ -515,7 +521,9 @@ pub struct SyncJobConfig {
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
pub remote: String,
#[serde(skip_serializing_if = "Option::is_none")]
/// None implies local sync.
pub remote: Option<String>,
pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
@ -639,7 +647,7 @@ impl KeepOptions {
},
}
)]
#[derive(Serialize, Deserialize, Default, Updater)]
#[derive(Serialize, Deserialize, Default, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct PruneJobOptions {
@ -682,7 +690,6 @@ impl PruneJobOptions {
},
schedule: {
schema: PRUNE_SCHEDULE_SCHEMA,
optional: true,
},
comment: {
optional: true,
@ -693,7 +700,7 @@ impl PruneJobOptions {
},
},
)]
#[derive(Deserialize, Serialize, Updater)]
#[derive(Deserialize, Serialize, Updater, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Prune configuration.
pub struct PruneJobConfig {
@ -737,7 +744,7 @@ fn is_false(b: &bool) -> bool {
},
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Status of prune job
pub struct PruneJobStatus {

View File

@ -84,6 +84,9 @@ pub use maintenance::*;
mod network;
pub use network::*;
mod node;
pub use node::*;
pub use proxmox_auth_api::types as userid;
pub use proxmox_auth_api::types::{Authid, Userid};
pub use proxmox_auth_api::types::{Realm, RealmRef};
@ -188,6 +191,7 @@ const_regex! {
);
pub BLOCKDEVICE_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+)|(?:nvme\d+n\d+)$";
pub BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+\d*)|(?:nvme\d+n\d+(p\d+)?)$";
pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
}
@ -202,6 +206,8 @@ pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_
pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX);
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
@ -282,6 +288,13 @@ pub const BLOCKDEVICE_NAME_SCHEMA: Schema =
.max_length(64)
.schema();
pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA: Schema =
StringSchema::new("(Partition) block device name (/sys/class/block/<name>).")
.format(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const DISK_ARRAY_SCHEMA: Schema =
ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema();
@ -395,8 +408,6 @@ pub struct APTUpdateInfo {
pub priority: String,
/// Package section
pub section: String,
/// URL under which the package's changelog can be retrieved
pub change_log_url: String,
/// Custom extra field for additional package information
#[serde(skip_serializing_if = "Option::is_none")]
pub extra_info: Option<String>,
@ -432,7 +443,7 @@ pub enum TaskStateType {
upid: { schema: UPID::API_SCHEMA },
},
)]
#[derive(Serialize, Deserialize)]
#[derive(Serialize, Deserialize, Clone, PartialEq)]
/// Task properties.
pub struct TaskListItem {
pub upid: String,

View File

@ -146,3 +146,45 @@ pub struct InfluxDbHttp {
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api]
#[derive(Copy, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
/// Type of the metric server
pub enum MetricServerType {
/// InfluxDB HTTP
#[serde(rename = "influxdb-http")]
InfluxDbHttp,
/// InfluxDB UDP
#[serde(rename = "influxdb-udp")]
InfluxDbUdp,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
"type": {
type: MetricServerType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a metric server that's available for all types
pub struct MetricServerInfo {
pub name: String,
#[serde(rename = "type")]
pub ty: MetricServerType,
/// Enables or disables the metrics server
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
/// The target server
pub server: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}

View File

@ -238,7 +238,7 @@ pub const NETWORK_INTERFACE_LIST_SCHEMA: Schema =
},
}
)]
#[derive(Debug, Serialize, Deserialize)]
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
/// Network Interface configuration
pub struct Interface {
/// Autostart interface
@ -268,9 +268,9 @@ pub struct Interface {
/// IPv6 gateway
pub gateway6: Option<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub options6: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
@ -295,6 +295,7 @@ pub struct Interface {
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "bond-primary")]
pub bond_primary: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub bond_xmit_hash_policy: Option<BondXmitHashPolicy>,
}

162
pbs-api-types/src/node.rs Normal file
View File

@ -0,0 +1,162 @@
use std::ffi::OsStr;
use proxmox_schema::*;
use serde::{Deserialize, Serialize};
use crate::StorageStatus;
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node memory usage counters
pub struct NodeMemoryCounters {
/// Total memory
pub total: u64,
/// Used memory
pub used: u64,
/// Free memory
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node swap usage counters
pub struct NodeSwapCounters {
/// Total swap
pub total: u64,
/// Used swap
pub used: u64,
/// Free swap
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint`
pub struct NodeInformation {
/// The SSL Fingerprint
pub fingerprint: String,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
/// The current kernel version (output of `uname`)
pub struct KernelVersionInformation {
/// The systemname/nodename
pub sysname: String,
/// The kernel release number
pub release: String,
/// The kernel version
pub version: String,
/// The machine architecture
pub machine: String,
}
impl KernelVersionInformation {
pub fn from_uname_parts(
sysname: &OsStr,
release: &OsStr,
version: &OsStr,
machine: &OsStr,
) -> Self {
KernelVersionInformation {
sysname: sysname.to_str().map(String::from).unwrap_or_default(),
release: release.to_str().map(String::from).unwrap_or_default(),
version: version.to_str().map(String::from).unwrap_or_default(),
machine: machine.to_str().map(String::from).unwrap_or_default(),
}
}
pub fn get_legacy(&self) -> String {
format!("{} {} {}", self.sysname, self.release, self.version)
}
}
#[api]
#[derive(Serialize, Deserialize, Copy, Clone)]
#[serde(rename_all = "kebab-case")]
/// The possible BootModes
pub enum BootMode {
/// The BootMode is EFI/UEFI
Efi,
/// The BootMode is Legacy BIOS
LegacyBios,
}
#[api]
#[derive(Serialize, Deserialize, Clone)]
#[serde(rename_all = "lowercase")]
/// Holds the Bootmodes
pub struct BootModeInformation {
/// The BootMode, either Efi or Bios
pub mode: BootMode,
/// SecureBoot status
pub secureboot: bool,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Information about the CPU
pub struct NodeCpuInformation {
/// The CPU model
pub model: String,
/// The number of CPU sockets
pub sockets: usize,
/// The number of CPU cores (incl. threads)
pub cpus: usize,
}
#[api(
properties: {
memory: {
type: NodeMemoryCounters,
},
root: {
type: StorageStatus,
},
swap: {
type: NodeSwapCounters,
},
loadavg: {
type: Array,
items: {
type: Number,
description: "the load",
}
},
cpuinfo: {
type: NodeCpuInformation,
},
info: {
type: NodeInformation,
}
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// The Node status
pub struct NodeStatus {
pub memory: NodeMemoryCounters,
pub root: StorageStatus,
pub swap: NodeSwapCounters,
/// The current uptime of the server.
pub uptime: u64,
/// Load for 1, 5 and 15 minutes.
pub loadavg: [f64; 3],
/// The current kernel version (NEW struct type).
pub current_kernel: KernelVersionInformation,
/// The current kernel version (LEGACY string type).
pub kversion: String,
/// Total CPU usage since last query.
pub cpu: f64,
/// Total IO wait since last query.
pub wait: f64,
pub cpuinfo: NodeCpuInformation,
pub info: NodeInformation,
/// Current boot mode
pub boot_info: BootModeInformation,
}

View File

@ -130,6 +130,8 @@ pub enum TapeDensity {
LTO7M8,
/// LTO8
LTO8,
/// LTO9
LTO9,
}
impl TryFrom<u8> for TapeDensity {
@ -147,6 +149,7 @@ impl TryFrom<u8> for TapeDensity {
0x5c => TapeDensity::LTO7,
0x5d => TapeDensity::LTO7M8,
0x5e => TapeDensity::LTO8,
0x60 => TapeDensity::LTO9,
_ => bail!("unknown tape density code 0x{:02x}", value),
};
Ok(density)

View File

@ -8,7 +8,7 @@ use crate::{
};
pub const TRAFFIC_CONTROL_TIMEFRAME_SCHEMA: Schema =
StringSchema::new("Timeframe to specify when the rule is actice.")
StringSchema::new("Timeframe to specify when the rule is active.")
.format(&DAILY_DURATION_FORMAT)
.schema();
@ -100,7 +100,7 @@ impl RateLimitConfig {
},
},
)]
#[derive(Serialize, Deserialize, Updater)]
#[derive(Clone, Serialize, Deserialize, PartialEq, Updater)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule
pub struct TrafficControlRule {
@ -120,3 +120,22 @@ pub struct TrafficControlRule {
#[serde(skip_serializing_if = "Option::is_none")]
pub timeframe: Option<Vec<String>>,
}
#[api(
properties: {
config: {
type: TrafficControlRule,
},
},
)]
#[derive(Clone, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate {
#[serde(flatten)]
pub config: TrafficControlRule,
/// Current ingress rate in bytes/second
pub cur_rate_in: u64,
/// Current egress rate in bytes/second
pub cur_rate_out: u64,
}

View File

@ -1,716 +0,0 @@
//! Types for user handling.
//!
//! We have [`Username`]s, [`Realm`]s and [`Tokenname`]s. To uniquely identify a user/API token, they
//! must be combined into a [`Userid`] or [`Authid`].
//!
//! Since they're all string types, they're organized as follows:
//!
//! * [`Username`]: an owned user name. Internally a `String`.
//! * [`UsernameRef`]: a borrowed user name. Pairs with a `Username` the same way a `str` pairs
//! with `String`, meaning you can only make references to it.
//! * [`Realm`]: an owned realm (`String` equivalent).
//! * [`RealmRef`]: a borrowed realm (`str` equivalent).
//! * [`Tokenname`]: an owned API token name (`String` equivalent)
//! * [`TokennameRef`]: a borrowed `Tokenname` (`str` equivalent).
//! * [`Userid`]: an owned user id (`"user@realm"`).
//! * [`Authid`]: an owned Authentication ID (a `Userid` with an optional `Tokenname`).
//! Note that `Userid` and `Authid` do not have a separate borrowed type.
//!
//! Note that `Username`s are not unique, therefore they do not implement `Eq` and cannot be
//! compared directly. If a direct comparison is really required, they can be compared as strings
//! via the `as_str()` method. [`Realm`]s, [`Userid`]s and [`Authid`]s on the other hand can be
//! compared with each other, as in those cases the comparison has meaning.
use std::borrow::Borrow;
use std::fmt;
use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static;
use serde::{Deserialize, Serialize};
use proxmox_schema::{
api, const_regex, ApiStringFormat, ApiType, Schema, StringSchema, UpdaterType,
};
// we only allow a limited set of characters
// colon is not allowed, because we store usernames in
// colon separated lists)!
// slash is not allowed because it is used as pve API delimiter
// also see "man useradd"
#[macro_export]
macro_rules! USER_NAME_REGEX_STR {
() => {
r"(?:[^\s:/[:cntrl:]]+)"
};
}
#[macro_export]
macro_rules! GROUP_NAME_REGEX_STR {
() => {
USER_NAME_REGEX_STR!()
};
}
#[macro_export]
macro_rules! TOKEN_NAME_REGEX_STR {
() => {
PROXMOX_SAFE_ID_REGEX_STR!()
};
}
#[macro_export]
macro_rules! USER_ID_REGEX_STR {
() => {
concat!(USER_NAME_REGEX_STR!(), r"@", PROXMOX_SAFE_ID_REGEX_STR!())
};
}
#[macro_export]
macro_rules! APITOKEN_ID_REGEX_STR {
() => {
concat!(USER_ID_REGEX_STR!(), r"!", TOKEN_NAME_REGEX_STR!())
};
}
const_regex! {
pub PROXMOX_USER_NAME_REGEX = concat!(r"^", USER_NAME_REGEX_STR!(), r"$");
pub PROXMOX_TOKEN_NAME_REGEX = concat!(r"^", TOKEN_NAME_REGEX_STR!(), r"$");
pub PROXMOX_USER_ID_REGEX = concat!(r"^", USER_ID_REGEX_STR!(), r"$");
pub PROXMOX_APITOKEN_ID_REGEX = concat!(r"^", APITOKEN_ID_REGEX_STR!(), r"$");
pub PROXMOX_AUTH_ID_REGEX = concat!(r"^", r"(?:", USER_ID_REGEX_STR!(), r"|", APITOKEN_ID_REGEX_STR!(), r")$");
pub PROXMOX_GROUP_ID_REGEX = concat!(r"^", GROUP_NAME_REGEX_STR!(), r"$");
}
pub const PROXMOX_USER_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_NAME_REGEX);
pub const PROXMOX_TOKEN_NAME_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_TOKEN_NAME_REGEX);
pub const PROXMOX_USER_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_USER_ID_REGEX);
pub const PROXMOX_TOKEN_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_APITOKEN_ID_REGEX);
pub const PROXMOX_AUTH_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_AUTH_ID_REGEX);
pub const PROXMOX_TOKEN_ID_SCHEMA: Schema = StringSchema::new("API Token ID")
.format(&PROXMOX_TOKEN_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_TOKEN_NAME_SCHEMA: Schema = StringSchema::new("API Token name")
.format(&PROXMOX_TOKEN_NAME_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_GROUP_ID_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&PROXMOX_GROUP_ID_REGEX);
pub const PROXMOX_GROUP_ID_SCHEMA: Schema = StringSchema::new("Group ID")
.format(&PROXMOX_GROUP_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
pub const PROXMOX_AUTH_REALM_STRING_SCHEMA: StringSchema =
StringSchema::new("Authentication domain ID")
.format(&super::PROXMOX_SAFE_ID_FORMAT)
.min_length(3)
.max_length(32);
pub const PROXMOX_AUTH_REALM_SCHEMA: Schema = PROXMOX_AUTH_REALM_STRING_SCHEMA.schema();
#[api(
type: String,
format: &PROXMOX_USER_NAME_FORMAT,
min_length: 1,
)]
/// The user name part of a user id.
///
/// This alone does NOT uniquely identify the user and therefore does not implement `Eq`. In order
/// to compare user names directly, they need to be explicitly compared as strings by calling
/// `.as_str()`.
///
/// ```compile_fail
/// fn test(a: Username, b: Username) -> bool {
/// a == b // illegal and does not compile
/// }
/// ```
#[derive(Clone, Debug, Hash, Deserialize, Serialize)]
pub struct Username(String);
/// A reference to a user name part of a user id. This alone does NOT uniquely identify the user.
///
/// This is like a `str` to the `String` of a [`Username`].
#[derive(Debug, Hash)]
pub struct UsernameRef(str);
impl UsernameRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const UsernameRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Username {
type Target = UsernameRef;
fn deref(&self) -> &UsernameRef {
self.borrow()
}
}
impl Borrow<UsernameRef> for Username {
fn borrow(&self) -> &UsernameRef {
UsernameRef::new(self.0.as_str())
}
}
impl AsRef<UsernameRef> for Username {
fn as_ref(&self) -> &UsernameRef {
self.borrow()
}
}
impl ToOwned for UsernameRef {
type Owned = Username;
fn to_owned(&self) -> Self::Owned {
Username(self.0.to_owned())
}
}
impl TryFrom<String> for Username {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
if !PROXMOX_USER_NAME_REGEX.is_match(&s) {
bail!("invalid user name");
}
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a UsernameRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a UsernameRef, Error> {
if !PROXMOX_USER_NAME_REGEX.is_match(s) {
bail!("invalid name in user id");
}
Ok(UsernameRef::new(s))
}
}
#[api(schema: PROXMOX_AUTH_REALM_SCHEMA)]
/// An authentication realm.
#[derive(Clone, Debug, Eq, PartialEq, Hash, Deserialize, Serialize)]
pub struct Realm(String);
/// A reference to an authentication realm.
///
/// This is like a `str` to the `String` of a `Realm`.
#[derive(Debug, Hash, Eq, PartialEq)]
pub struct RealmRef(str);
impl RealmRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const RealmRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Realm {
type Target = RealmRef;
fn deref(&self) -> &RealmRef {
self.borrow()
}
}
impl Borrow<RealmRef> for Realm {
fn borrow(&self) -> &RealmRef {
RealmRef::new(self.0.as_str())
}
}
impl AsRef<RealmRef> for Realm {
fn as_ref(&self) -> &RealmRef {
self.borrow()
}
}
impl ToOwned for RealmRef {
type Owned = Realm;
fn to_owned(&self) -> Self::Owned {
Realm(self.0.to_owned())
}
}
impl TryFrom<String> for Realm {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA
.check_constraints(&s)
.map_err(|_| format_err!("invalid realm"))?;
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a RealmRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a RealmRef, Error> {
PROXMOX_AUTH_REALM_STRING_SCHEMA
.check_constraints(s)
.map_err(|_| format_err!("invalid realm"))?;
Ok(RealmRef::new(s))
}
}
impl PartialEq<str> for Realm {
fn eq(&self, rhs: &str) -> bool {
self.0 == rhs
}
}
impl PartialEq<&str> for Realm {
fn eq(&self, rhs: &&str) -> bool {
self.0 == *rhs
}
}
impl PartialEq<str> for RealmRef {
fn eq(&self, rhs: &str) -> bool {
self.0 == *rhs
}
}
impl PartialEq<&str> for RealmRef {
fn eq(&self, rhs: &&str) -> bool {
self.0 == **rhs
}
}
impl PartialEq<RealmRef> for Realm {
fn eq(&self, rhs: &RealmRef) -> bool {
self.0 == rhs.0
}
}
impl PartialEq<Realm> for RealmRef {
fn eq(&self, rhs: &Realm) -> bool {
self.0 == rhs.0
}
}
impl PartialEq<Realm> for &RealmRef {
fn eq(&self, rhs: &Realm) -> bool {
self.0 == rhs.0
}
}
#[api(
type: String,
format: &PROXMOX_TOKEN_NAME_FORMAT,
)]
/// The token ID part of an API token authentication id.
///
/// This alone does NOT uniquely identify the API token - use a full `Authid` for such use cases.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialOrd, PartialEq, Deserialize, Serialize)]
pub struct Tokenname(String);
/// A reference to a token name part of an authentication id. This alone does NOT uniquely identify
/// the user.
///
/// This is like a `str` to the `String` of a [`Tokenname`].
#[derive(Debug, Hash)]
pub struct TokennameRef(str);
#[doc(hidden)]
/// ```compile_fail
/// let a: Username = unsafe { std::mem::zeroed() };
/// let b: Username = unsafe { std::mem::zeroed() };
/// let _ = <Username as PartialEq>::eq(&a, &b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(a, b);
/// ```
///
/// ```compile_fail
/// let a: &UsernameRef = unsafe { std::mem::zeroed() };
/// let b: &UsernameRef = unsafe { std::mem::zeroed() };
/// let _ = <&UsernameRef as PartialEq>::eq(&a, &b);
/// ```
struct _AssertNoEqImpl;
impl TokennameRef {
fn new(s: &str) -> &Self {
unsafe { &*(s as *const str as *const TokennameRef) }
}
pub fn as_str(&self) -> &str {
&self.0
}
}
impl std::ops::Deref for Tokenname {
type Target = TokennameRef;
fn deref(&self) -> &TokennameRef {
self.borrow()
}
}
impl Borrow<TokennameRef> for Tokenname {
fn borrow(&self) -> &TokennameRef {
TokennameRef::new(self.0.as_str())
}
}
impl AsRef<TokennameRef> for Tokenname {
fn as_ref(&self) -> &TokennameRef {
self.borrow()
}
}
impl ToOwned for TokennameRef {
type Owned = Tokenname;
fn to_owned(&self) -> Self::Owned {
Tokenname(self.0.to_owned())
}
}
impl TryFrom<String> for Tokenname {
type Error = Error;
fn try_from(s: String) -> Result<Self, Error> {
if !PROXMOX_TOKEN_NAME_REGEX.is_match(&s) {
bail!("invalid token name");
}
Ok(Self(s))
}
}
impl<'a> TryFrom<&'a str> for &'a TokennameRef {
type Error = Error;
fn try_from(s: &'a str) -> Result<&'a TokennameRef, Error> {
if !PROXMOX_TOKEN_NAME_REGEX.is_match(s) {
bail!("invalid token name in user id");
}
Ok(TokennameRef::new(s))
}
}
/// A complete user id consisting of a user name and a realm
#[derive(Clone, Debug, PartialEq, Eq, Hash, Ord, PartialOrd, UpdaterType)]
pub struct Userid {
data: String,
name_len: usize,
}
impl ApiType for Userid {
const API_SCHEMA: Schema = StringSchema::new("User ID")
.format(&PROXMOX_USER_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
}
impl Userid {
const fn new(data: String, name_len: usize) -> Self {
Self { data, name_len }
}
pub fn name(&self) -> &UsernameRef {
UsernameRef::new(&self.data[..self.name_len])
}
pub fn realm(&self) -> &RealmRef {
RealmRef::new(&self.data[(self.name_len + 1)..])
}
pub fn as_str(&self) -> &str {
&self.data
}
/// Get the "root@pam" user id.
pub fn root_userid() -> &'static Self {
&ROOT_USERID
}
}
lazy_static! {
pub static ref ROOT_USERID: Userid = Userid::new("root@pam".to_string(), 4);
}
impl From<Authid> for Userid {
fn from(authid: Authid) -> Self {
authid.user
}
}
impl From<(Username, Realm)> for Userid {
fn from(parts: (Username, Realm)) -> Self {
Self::from((parts.0.as_ref(), parts.1.as_ref()))
}
}
impl From<(&UsernameRef, &RealmRef)> for Userid {
fn from(parts: (&UsernameRef, &RealmRef)) -> Self {
let data = format!("{}@{}", parts.0.as_str(), parts.1.as_str());
let name_len = parts.0.as_str().len();
Self { data, name_len }
}
}
impl fmt::Display for Userid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.data.fmt(f)
}
}
impl std::str::FromStr for Userid {
type Err = Error;
fn from_str(id: &str) -> Result<Self, Error> {
let name_len = id
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let name = &id[..name_len];
let realm = &id[(name_len + 1)..];
if !PROXMOX_USER_NAME_REGEX.is_match(name) {
bail!("invalid user name in user id");
}
PROXMOX_AUTH_REALM_STRING_SCHEMA
.check_constraints(realm)
.map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self::from((UsernameRef::new(name), RealmRef::new(realm))))
}
}
impl TryFrom<String> for Userid {
type Error = Error;
fn try_from(data: String) -> Result<Self, Error> {
let name_len = data
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
if !PROXMOX_USER_NAME_REGEX.is_match(&data[..name_len]) {
bail!("invalid user name in user id");
}
PROXMOX_AUTH_REALM_STRING_SCHEMA
.check_constraints(&data[(name_len + 1)..])
.map_err(|_| format_err!("invalid realm in user id"))?;
Ok(Self { data, name_len })
}
}
impl PartialEq<str> for Userid {
fn eq(&self, rhs: &str) -> bool {
self.data == *rhs
}
}
impl PartialEq<&str> for Userid {
fn eq(&self, rhs: &&str) -> bool {
*self == **rhs
}
}
impl PartialEq<String> for Userid {
fn eq(&self, rhs: &String) -> bool {
self == rhs.as_str()
}
}
/// A complete authentication id consisting of a user id and an optional token name.
#[derive(Clone, Debug, Eq, PartialEq, Hash, UpdaterType, Ord, PartialOrd)]
pub struct Authid {
user: Userid,
tokenname: Option<Tokenname>,
}
impl ApiType for Authid {
const API_SCHEMA: Schema = StringSchema::new("Authentication ID")
.format(&PROXMOX_AUTH_ID_FORMAT)
.min_length(3)
.max_length(64)
.schema();
}
impl Authid {
const fn new(user: Userid, tokenname: Option<Tokenname>) -> Self {
Self { user, tokenname }
}
pub fn user(&self) -> &Userid {
&self.user
}
pub fn is_token(&self) -> bool {
self.tokenname.is_some()
}
pub fn tokenname(&self) -> Option<&TokennameRef> {
self.tokenname.as_deref()
}
/// Get the "root@pam" auth id.
pub fn root_auth_id() -> &'static Self {
&ROOT_AUTHID
}
}
lazy_static! {
pub static ref ROOT_AUTHID: Authid = Authid::from(Userid::new("root@pam".to_string(), 4));
}
impl From<Userid> for Authid {
fn from(parts: Userid) -> Self {
Self::new(parts, None)
}
}
impl From<(Userid, Option<Tokenname>)> for Authid {
fn from(parts: (Userid, Option<Tokenname>)) -> Self {
Self::new(parts.0, parts.1)
}
}
impl fmt::Display for Authid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match &self.tokenname {
Some(token) => write!(f, "{}!{}", self.user, token.as_str()),
None => self.user.fmt(f),
}
}
}
impl std::str::FromStr for Authid {
type Err = Error;
fn from_str(id: &str) -> Result<Self, Error> {
let name_len = id
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let realm_end = id
.as_bytes()
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { id.len() } else { pos })
.unwrap_or_else(|| id.len());
if realm_end == id.len() - 1 {
bail!("empty token name in userid");
}
let user = Userid::from_str(&id[..realm_end])?;
if id.len() > realm_end {
let token = Tokenname::try_from(id[(realm_end + 1)..].to_string())?;
Ok(Self::new(user, Some(token)))
} else {
Ok(Self::new(user, None))
}
}
}
impl TryFrom<String> for Authid {
type Error = Error;
fn try_from(mut data: String) -> Result<Self, Error> {
let name_len = data
.as_bytes()
.iter()
.rposition(|&b| b == b'@')
.ok_or_else(|| format_err!("not a valid user id"))?;
let realm_end = data
.as_bytes()
.iter()
.rposition(|&b| b == b'!')
.map(|pos| if pos < name_len { data.len() } else { pos })
.unwrap_or_else(|| data.len());
if realm_end == data.len() - 1 {
bail!("empty token name in userid");
}
let tokenname = if data.len() > realm_end {
Some(Tokenname::try_from(data[(realm_end + 1)..].to_string())?)
} else {
None
};
data.truncate(realm_end);
let user: Userid = data.parse()?;
Ok(Self { user, tokenname })
}
}
#[test]
fn test_token_id() {
let userid: Userid = "test@pam".parse().expect("parsing Userid failed");
assert_eq!(userid.name().as_str(), "test");
assert_eq!(userid.realm(), "pam");
assert_eq!(userid, "test@pam");
let auth_id: Authid = "test@pam".parse().expect("parsing user Authid failed");
assert_eq!(auth_id.to_string(), "test@pam".to_string());
assert!(!auth_id.is_token());
assert_eq!(auth_id.user(), &userid);
let user_auth_id = Authid::from(userid.clone());
assert_eq!(user_auth_id, auth_id);
assert!(!user_auth_id.is_token());
let auth_id: Authid = "test@pam!bar".parse().expect("parsing token Authid failed");
let token_userid = auth_id.user();
assert_eq!(&userid, token_userid);
assert!(auth_id.is_token());
assert_eq!(
auth_id.tokenname().expect("Token has tokenname").as_str(),
TokennameRef::new("bar").as_str()
);
assert_eq!(auth_id.to_string(), "test@pam!bar".to_string());
}
proxmox_serde::forward_deserialize_to_from_str!(Userid);
proxmox_serde::forward_serialize_to_display!(Userid);
proxmox_serde::forward_deserialize_to_from_str!(Authid);
proxmox_serde::forward_serialize_to_display!(Authid);

View File

@ -44,7 +44,7 @@ impl BackupReader {
/// Create a new instance by upgrading the connection at '/api2/json/reader'
pub async fn start(
client: HttpClient,
client: &HttpClient,
crypt_config: Option<Arc<CryptConfig>>,
datastore: &str,
ns: &BackupNamespace,

View File

@ -374,7 +374,7 @@ pub struct Shell {
/// Interactive prompt.
prompt: String,
/// Calalog reader instance to navigate
/// Catalog reader instance to navigate
catalog: CatalogReader,
/// List of selected paths for restore
@ -633,7 +633,7 @@ impl Shell {
/// This assumes that there are no more symlinks in the path stack.
async fn walk_pxar_archive(
accessor: &Accessor,
mut stack: &mut [PathStackEntry],
stack: &mut [PathStackEntry],
) -> Result<FileEntry, Error> {
if stack[0].pxar.is_none() {
stack[0].pxar = Some(accessor.open_root().await?.lookup_self().await?);
@ -987,8 +987,13 @@ impl Shell {
.metadata()
.clone();
let extractor =
crate::pxar::extract::Extractor::new(rootdir, root_meta, true, false, Flags::DEFAULT);
let extractor = crate::pxar::extract::Extractor::new(
rootdir,
root_meta,
true,
crate::pxar::extract::OverwriteFlags::empty(),
Flags::DEFAULT,
);
let mut extractor = ExtractorState::new(
&mut self.catalog,
@ -1133,14 +1138,14 @@ impl<'a> ExtractorState<'a> {
pub async fn handle_entry(&mut self, entry: catalog::DirEntry) -> Result<(), Error> {
let match_result = self.match_list.matches(&self.path, entry.get_file_mode());
let did_match = match match_result {
Some(MatchType::Include) => true,
Some(MatchType::Exclude) => false,
None => self.matches,
Ok(Some(MatchType::Include)) => true,
Ok(Some(MatchType::Exclude)) => false,
_ => self.matches,
};
match (did_match, &entry.attr) {
(_, DirEntryAttribute::Directory { .. }) => {
self.handle_new_directory(entry, match_result).await?;
self.handle_new_directory(entry, match_result?).await?;
}
(true, DirEntryAttribute::File { .. }) => {
self.dir_stack.push(PathStackEntry::new(entry));

View File

@ -1,4 +1,4 @@
use std::io::Write;
use std::io::{IsTerminal, Write};
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
@ -458,7 +458,7 @@ impl HttpClient {
&auth.ticket,
&auth.token,
) {
if tty::stdout_isatty() {
if std::io::stdout().is_terminal() {
log::error!("storing login ticket failed: {}", err);
}
}
@ -496,7 +496,7 @@ impl HttpClient {
&auth.ticket,
&auth.token,
) {
if tty::stdout_isatty() {
if std::io::stdout().is_terminal() {
log::error!("storing login ticket failed: {}", err);
}
}
@ -548,7 +548,7 @@ impl HttpClient {
fn get_password(username: &Userid, interactive: bool) -> Result<String, Error> {
// If we're on a TTY, query the user for a password
if interactive && tty::stdin_isatty() {
if interactive && std::io::stdin().is_terminal() {
let msg = format!("Password for \"{}\": ", username);
return Ok(String::from_utf8(tty::read_password(&msg)?)?);
}
@ -599,7 +599,7 @@ impl HttpClient {
}
// If we're on a TTY, query the user
if interactive && tty::stdin_isatty() {
if interactive && std::io::stdin().is_terminal() {
log::info!("fingerprint: {}", fp_string);
loop {
eprint!("Are you sure you want to continue connecting? (y/n): ");
@ -764,6 +764,8 @@ impl HttpClient {
);
}
req.headers_mut()
.insert("Connection", HeaderValue::from_str("upgrade").unwrap());
req.headers_mut()
.insert("UPGRADE", HeaderValue::from_str(&protocol_name).unwrap());

View File

@ -16,12 +16,12 @@ use nix::fcntl::OFlag;
use nix::sys::stat::{FileStat, Mode};
use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
use proxmox_sys::error::SysError;
use pxar::encoder::{LinkOffset, SeqWrite};
use pxar::Metadata;
use proxmox_io::vec;
use proxmox_lang::c_str;
use proxmox_sys::error::SysError;
use proxmox_sys::fs::{self, acl, xattr};
use pbs_datastore::catalog::BackupCatalogWriter;
@ -420,7 +420,7 @@ impl Archiver {
for file in dir.iter() {
let file = file?;
let file_name = file.file_name().to_owned();
let file_name = file.file_name();
let file_name_bytes = file_name.to_bytes();
if file_name_bytes == b"." || file_name_bytes == b".." {
continue;
@ -434,25 +434,37 @@ impl Archiver {
assert_single_path_component(os_file_name)?;
let full_path = self.path.join(os_file_name);
let stat = match nix::sys::stat::fstatat(
dir_fd,
file_name.as_c_str(),
nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW,
) {
Ok(stat) => stat,
Err(ref err) if err.not_found() => continue,
Err(err) => return Err(err).context(format!("stat failed on {:?}", full_path)),
let match_path = PathBuf::from("/").join(full_path.clone());
let mut stat_results: Option<FileStat> = None;
let get_file_mode = || {
nix::sys::stat::fstatat(dir_fd, file_name, nix::fcntl::AtFlags::AT_SYMLINK_NOFOLLOW)
};
let match_path = PathBuf::from("/").join(full_path.clone());
if self
let match_result = self
.patterns
.matches(match_path.as_os_str().as_bytes(), Some(stat.st_mode))
== Some(MatchType::Exclude)
{
continue;
.matches(match_path.as_os_str().as_bytes(), || {
Ok::<_, Errno>(match &stat_results {
Some(result) => result.st_mode,
None => stat_results.insert(get_file_mode()?).st_mode,
})
});
match match_result {
Ok(Some(MatchType::Exclude)) => continue,
Ok(_) => (),
Err(err) if err.not_found() => continue,
Err(err) => {
return Err(err).with_context(|| format!("stat failed on {full_path:?}"))
}
}
let stat = stat_results
.map(Ok)
.unwrap_or_else(get_file_mode)
.with_context(|| format!("stat failed on {full_path:?}"))?;
self.entry_counter += 1;
if self.entry_counter > self.entry_limit {
bail!(
@ -462,7 +474,7 @@ impl Archiver {
}
file_list.push(FileListEntry {
name: file_name,
name: file_name.to_owned(),
path: full_path,
stat,
});
@ -533,7 +545,7 @@ impl Archiver {
let match_path = PathBuf::from("/").join(self.path.clone());
if self
.patterns
.matches(match_path.as_os_str().as_bytes(), Some(stat.st_mode))
.matches(match_path.as_os_str().as_bytes(), stat.st_mode)?
== Some(MatchType::Exclude)
{
return Ok(());

View File

@ -9,6 +9,7 @@ use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Context, Error};
use bitflags::bitflags;
use nix::dir::Dir;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
@ -33,10 +34,22 @@ pub struct PxarExtractOptions<'a> {
pub match_list: &'a [MatchEntry],
pub extract_match_default: bool,
pub allow_existing_dirs: bool,
pub overwrite: bool,
pub overwrite_flags: OverwriteFlags,
pub on_error: Option<ErrorHandler>,
}
bitflags! {
#[derive(Default)]
pub struct OverwriteFlags: u8 {
/// Overwrite existing entries file content
const FILE = 0x1;
/// Overwrite existing entry with symlink
const SYMLINK = 0x2;
/// Overwrite existing entry with hardlink
const HARDLINK = 0x4;
}
}
pub type ErrorHandler = Box<dyn FnMut(Error) -> Result<(), Error> + Send>;
pub fn extract_archive<T, F>(
@ -141,7 +154,7 @@ where
dir,
root.metadata().clone(),
options.allow_existing_dirs,
options.overwrite,
options.overwrite_flags,
feature_flags,
);
@ -238,10 +251,14 @@ where
self.extractor.set_path(entry.path().as_os_str().to_owned());
let match_result = self.match_list.matches(
entry.path().as_os_str().as_bytes(),
Some(metadata.file_type() as u32),
);
// We can `unwrap()` safely here because we get a `Result<_, std::convert::Infallible>`
let match_result = self
.match_list
.matches(
entry.path().as_os_str().as_bytes(),
metadata.file_type() as u32,
)
.unwrap();
let did_match = match match_result {
Some(MatchType::Include) => true,
@ -345,7 +362,9 @@ where
metadata,
*size,
&mut contents,
self.extractor.overwrite,
self.extractor
.overwrite_flags
.contains(OverwriteFlags::FILE),
)
} else {
Err(format_err!(
@ -438,7 +457,7 @@ impl std::fmt::Display for PxarExtractContext {
pub struct Extractor {
feature_flags: Flags,
allow_existing_dirs: bool,
overwrite: bool,
overwrite_flags: OverwriteFlags,
dir_stack: PxarDirStack,
/// For better error output we need to track the current path in the Extractor state.
@ -455,13 +474,13 @@ impl Extractor {
root_dir: Dir,
metadata: Metadata,
allow_existing_dirs: bool,
overwrite: bool,
overwrite_flags: OverwriteFlags,
feature_flags: Flags,
) -> Self {
Self {
dir_stack: PxarDirStack::new(root_dir, metadata),
allow_existing_dirs,
overwrite,
overwrite_flags,
feature_flags,
current_path: Arc::new(Mutex::new(OsString::new())),
on_error: Box::new(Err),
@ -547,7 +566,20 @@ impl Extractor {
link: &OsStr,
) -> Result<(), Error> {
let parent = self.parent_fd()?;
nix::unistd::symlinkat(link, Some(parent), file_name)?;
match nix::unistd::symlinkat(link, Some(parent), file_name) {
Ok(()) => {}
Err(nix::errno::Errno::EEXIST)
if self.overwrite_flags.contains(OverwriteFlags::SYMLINK) =>
{
// Never unlink directories
let flag = nix::unistd::UnlinkatFlags::NoRemoveDir;
nix::unistd::unlinkat(Some(parent), file_name, flag)?;
nix::unistd::symlinkat(link, Some(parent), file_name)?;
}
Err(err) => return Err(err.into()),
}
metadata::apply_at(
self.feature_flags,
metadata,
@ -564,13 +596,28 @@ impl Extractor {
let parent = self.parent_fd()?;
let root = self.dir_stack.root_dir_fd()?;
let target = CString::new(link.as_bytes())?;
nix::unistd::linkat(
Some(root.as_raw_fd()),
target.as_c_str(),
Some(parent),
file_name,
nix::unistd::LinkatFlags::NoSymlinkFollow,
)?;
let dolink = || {
nix::unistd::linkat(
Some(root.as_raw_fd()),
target.as_c_str(),
Some(parent),
file_name,
nix::unistd::LinkatFlags::NoSymlinkFollow,
)
};
match dolink() {
Ok(()) => {}
Err(nix::errno::Errno::EEXIST)
if self.overwrite_flags.contains(OverwriteFlags::HARDLINK) =>
{
// Never unlink directories
let flag = nix::unistd::UnlinkatFlags::NoRemoveDir;
nix::unistd::unlinkat(Some(parent), file_name, flag)?;
dolink()?;
}
Err(err) => return Err(err.into()),
}
Ok(())
}
@ -874,7 +921,7 @@ where
tarencoder
.add_entry(&mut header, path, tokio::io::empty())
.await
.context("coult not send fifo entry")?;
.context("could not send fifo entry")?;
}
EntryKind::Directory => {
log::debug!("adding '{}' to tar", path.display());
@ -1032,7 +1079,13 @@ where
)
.with_context(|| format!("unable to open target directory {:?}", destination.as_ref()))?;
Ok(Extractor::new(dir, metadata, false, false, Flags::DEFAULT))
Ok(Extractor::new(
dir,
metadata,
false,
OverwriteFlags::empty(),
Flags::DEFAULT,
))
}
pub async fn extract_sub_dir<T, DEST, PATH>(
@ -1166,7 +1219,7 @@ where
.contents()
.await
.context("found regular file entry without contents in archive")?,
extractor.overwrite,
extractor.overwrite_flags.contains(OverwriteFlags::FILE),
)
.await?
}
@ -1214,7 +1267,7 @@ where
&mut decoder
.contents()
.context("found regular file entry without contents in archive")?,
extractor.overwrite,
extractor.overwrite_flags.contains(OverwriteFlags::FILE),
)
.await?
}

View File

@ -59,7 +59,7 @@ pub use flags::Flags;
pub use create::{create_archive, PxarCreateOptions};
pub use extract::{
create_tar, create_zip, extract_archive, extract_sub_dir, extract_sub_dir_seq, ErrorHandler,
PxarExtractContext, PxarExtractOptions,
OverwriteFlags, PxarExtractContext, PxarExtractOptions,
};
/// The format requires to build sorted directory lookup tables in

View File

@ -3,7 +3,7 @@ use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use anyhow::{bail, Error};
use anyhow::{bail, format_err, Error};
use proxmox_async::runtime::block_on;
@ -51,7 +51,8 @@ impl RemoteChunkReader {
self.client.download_chunk(digest, &mut chunk_data).await?;
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])?;
let chunk = DataBlob::load_from_reader(&mut &chunk_data[..])
.map_err(|err| format_err!("Failed to parse chunk {} - {err}", hex::encode(digest)))?;
match self.crypt_mode {
CryptMode::Encrypt => match chunk.crypt_mode()? {

View File

@ -96,6 +96,16 @@ pub async fn display_task_log(
}
}
let status_path = format!("api2/json/nodes/localhost/tasks/{upid_encoded}/status");
let task_result = &client.get(&status_path, None).await?["data"];
if task_result["status"].as_str() == Some("stopped") {
match task_result["exitstatus"].as_str() {
None => bail!("task stopped with unknown status"),
Some(status) if status == "OK" || status.starts_with("WARNINGS") => (),
Some(status) => bail!("task failed (status {status})"),
}
}
Ok(())
};

View File

@ -1,4 +1,4 @@
use std::io::Read;
use std::io::{IsTerminal, Read};
use std::os::unix::io::{FromRawFd, RawFd};
use std::path::PathBuf;
@ -351,7 +351,7 @@ pub fn get_encryption_key_password() -> Result<Vec<u8>, Error> {
}
// If we're on a TTY, query the user for a password
if tty::stdin_isatty() {
if std::io::stdin().is_terminal() {
return tty::read_password("Encryption Key Password: ");
}

View File

@ -661,11 +661,11 @@ impl<R: Read + Seek> CatalogReader<R> {
/// Finds all entries matching the given match patterns and calls the
/// provided callback on them.
pub fn find(
pub fn find<'a>(
&mut self,
parent: &DirEntry,
file_path: &mut Vec<u8>,
match_list: &impl MatchList, //&[MatchEntry],
match_list: &'a impl MatchList<'a>, //&[MatchEntry],
callback: &mut dyn FnMut(&[u8]) -> Result<(), Error>,
) -> Result<(), Error> {
let file_len = file_path.len();
@ -678,9 +678,9 @@ impl<R: Read + Seek> CatalogReader<R> {
}
file_path.extend(&e.name);
match match_list.matches(&file_path, e.get_file_mode()) {
Some(MatchType::Exclude) => continue,
Some(MatchType::Include) => callback(file_path)?,
None => (),
Ok(Some(MatchType::Exclude)) => continue,
Ok(Some(MatchType::Include)) => callback(file_path)?,
_ => (),
}
if is_dir {
self.find(&e, file_path, match_list, callback)?;

View File

@ -867,26 +867,26 @@ impl DataStore {
.unwrap_or(false)
}
let handle_entry_err = |err: walkdir::Error| {
if let Some(inner) = err.io_error() {
if let Some(path) = err.path() {
if inner.kind() == io::ErrorKind::PermissionDenied {
// only allow to skip ext4 fsck directory, avoid GC if, for example,
// a user got file permissions wrong on datastore rsync to new server
if err.depth() > 1 || !path.ends_with("lost+found") {
bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
}
} else {
bail!(
"unexpected error on datastore traversal: {} - {:?}",
inner,
path
)
}
} else {
bail!("unexpected error on datastore traversal: {}", inner)
// first, extract the actual IO error and the affected path
let (inner, path) = match (err.io_error(), err.path()) {
(None, _) => return Ok(()), // not an IO-error
(Some(inner), Some(path)) => (inner, path),
(Some(inner), None) => bail!("unexpected error on datastore traversal: {inner}"),
};
if inner.kind() == io::ErrorKind::PermissionDenied {
if err.depth() <= 1 && path.ends_with("lost+found") {
// allow skipping of (root-only) ext4 fsck-directory on EPERM ..
return Ok(());
}
// .. but do not ignore EPERM in general, otherwise we might prune too many chunks.
// E.g., if users messed up with owner/perms on a rsync
bail!("cannot continue garbage-collection safely, permission denied on: {path:?}");
} else if inner.kind() == io::ErrorKind::NotFound {
log::info!("ignoring vanished file: {path:?}");
return Ok(());
} else {
bail!("unexpected error on datastore traversal: {inner} - {path:?}");
}
Ok(())
};
for entry in walker.filter_entry(|e| !is_hidden(e)) {
let path = match entry {

View File

@ -35,8 +35,8 @@ pub const DYNAMIC_SIZED_CHUNK_INDEX_1_0: [u8; 8] = [28, 145, 78, 165, 25, 186, 1
///
/// (MAGIC || CRC32 || Data)
///
/// This is basically the same format we use for chunks, but
/// with other magic numbers so that we can distinguish them.
/// This format is used for blobs (stored in a BackupDir and accessed directly) and chunks (stored
/// in a chunk store and accessed via a ChunkReader / index file).
#[derive(Endian)]
#[repr(C, packed)]
pub struct DataBlobHeader {

View File

@ -21,7 +21,7 @@
//! chunks (VMs, whole block devices).
//!
//! A chunk is defined as a binary blob, which is stored inside a
//! [ChunkStore](struct.ChunkStore.html) instead of the backup directory
//! [ChunkStore] instead of the backup directory
//! directly, and can be addressed by its SHA256 digest.
//!
//!
@ -98,7 +98,7 @@
//! still referenced.
//!
//! To do this we use the
//! [ProcessLocker](../tools/struct.ProcessLocker.html).
//! [ProcessLocker](proxmox_sys::process_locker::ProcessLocker).
//!
//! ### ChunkStore-wide
//!

View File

@ -14,7 +14,7 @@ pub trait ReadChunk {
fn read_chunk(&self, digest: &[u8; 32]) -> Result<Vec<u8>, Error>;
}
pub trait AsyncReadChunk: Send {
pub trait AsyncReadChunk: Send + Sync {
/// Returns the encoded chunk data
fn read_raw_chunk<'a>(
&'a self,

View File

@ -398,20 +398,22 @@ pub fn read_element_status<F: AsRawFd>(file: &mut F) -> Result<MtxStatus, Error>
// get the serial + vendor + model,
// some changer require this to be an extra scsi command
let page = get_element(
// some changers don't support this
if let Ok(page) = get_element(
&mut sg_raw,
ElementType::DataTransferWithDVCID,
allocation_len,
false,
)?;
// should be in same order and same count, but be on the safe side.
// there should not be too many drives normally
for drive in drives.iter_mut() {
for drive2 in &page.drives {
if drive2.element_address == drive.element_address {
drive.vendor = drive2.vendor.clone();
drive.model = drive2.model.clone();
drive.drive_serial_number = drive2.drive_serial_number.clone();
) {
// should be in same order and same count, but be on the safe side.
// there should not be too many drives normally
for drive in drives.iter_mut() {
for drive2 in &page.drives {
if drive2.element_address == drive.element_address {
drive.vendor = drive2.vendor.clone();
drive.model = drive2.model.clone();
drive.drive_serial_number = drive2.drive_serial_number.clone();
}
}
}
}

View File

@ -56,7 +56,7 @@ fn decode_volume_statistics(data: &[u8]) -> Result<Lp17VolumeStatistics, Error>
let read_be_counter = |reader: &mut &[u8], len: u8| {
let len = len as usize;
if len == 0 || len > 8 {
bail!("invalid conter size '{}'", len);
bail!("invalid counter size '{}'", len);
}
let mut buffer = [0u8; 8];
reader.read_exact(&mut buffer[..len])?;

View File

@ -893,7 +893,7 @@ fn scsi_cmd_mode_sense(
/// True if the given sense info is INVALID COMMAND OPERATION CODE
/// means that the device does not know/support the command
/// https://www.t10.org/lists/asc-num.htm#ASC_20
/// <https://www.t10.org/lists/asc-num.htm#ASC_20>
pub fn sense_err_is_invalid_command(err: &SenseInfo) -> bool {
err.sense_key == SENSE_KEY_ILLEGAL_REQUEST && err.asc == 0x20 && err.ascq == 0x00
}

View File

@ -75,7 +75,7 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let client = connect(&repo)?;
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&backup_ns,
@ -187,7 +187,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
};
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&backup_ns,

View File

@ -1,3 +1,4 @@
use std::io::IsTerminal;
use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
@ -100,7 +101,7 @@ fn create(kdf: Option<Kdf>, path: Option<String>, hint: Option<String>) -> Resul
}
Kdf::Scrypt | Kdf::PBKDF2 => {
// always read passphrase from tty
if !tty::stdin_isatty() {
if !std::io::stdin().is_terminal() {
bail!("unable to read passphrase - no tty");
}
@ -236,7 +237,7 @@ fn change_passphrase(
let kdf = kdf.unwrap_or_default();
if !tty::stdin_isatty() {
if !std::io::stdin().is_terminal() {
bail!("unable to change passphrase - no tty");
}
@ -359,7 +360,7 @@ fn import_master_pubkey(path: String) -> Result<(), Error> {
/// encryption key onto the backup server along with each backup.
fn create_master_key() -> Result<(), Error> {
// we need a TTY to query the new password
if !tty::stdin_isatty() {
if !std::io::stdin().is_terminal() {
bail!("unable to create master key - no tty");
}

View File

@ -1034,7 +1034,7 @@ async fn create_backup(
}
if dry_run {
log::info!("dry-run: no upload happend");
log::info!("dry-run: no upload happened");
return Ok(Value::Null);
}
@ -1234,6 +1234,21 @@ We do not extract '.pxar' archives when writing to standard output.
optional: true,
default: false,
},
"overwrite-files": {
description: "overwrite already existing files",
optional: true,
default: false,
},
"overwrite-symlinks": {
description: "overwrite already existing entries by archives symlink",
optional: true,
default: false,
},
"overwrite-hardlinks": {
description: "overwrite already existing entries by archives hardlink",
optional: true,
default: false,
},
"ignore-extract-device-errors": {
type: Boolean,
description: "ignore errors that occur during device node extraction",
@ -1252,6 +1267,9 @@ async fn restore(
ignore_ownership: bool,
ignore_permissions: bool,
overwrite: bool,
overwrite_files: bool,
overwrite_symlinks: bool,
overwrite_hardlinks: bool,
ignore_extract_device_errors: bool,
) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
@ -1295,7 +1313,7 @@ async fn restore(
};
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&ns,
@ -1388,11 +1406,25 @@ async fn restore(
None
};
let mut overwrite_flags = pbs_client::pxar::OverwriteFlags::empty();
overwrite_flags.set(pbs_client::pxar::OverwriteFlags::FILE, overwrite_files);
overwrite_flags.set(
pbs_client::pxar::OverwriteFlags::SYMLINK,
overwrite_symlinks,
);
overwrite_flags.set(
pbs_client::pxar::OverwriteFlags::HARDLINK,
overwrite_hardlinks,
);
if overwrite {
overwrite_flags.insert(pbs_client::pxar::OverwriteFlags::all());
}
let options = pbs_client::pxar::PxarExtractOptions {
match_list: &[],
extract_match_default: true,
allow_existing_dirs,
overwrite,
overwrite_flags,
on_error,
};
@ -1421,7 +1453,7 @@ async fn restore(
},
options,
)
.map_err(|err| format_err!("error extracting archive - {}", err))?;
.map_err(|err| format_err!("error extracting archive - {:#}", err))?;
} else {
let mut writer = std::fs::OpenOptions::new()
.write(true)

View File

@ -234,7 +234,7 @@ async fn mount_do(param: Value, pipe: Option<OwnedFd>) -> Result<Value, Error> {
};
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&backup_ns,

View File

@ -177,7 +177,7 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
}
)]
/// Forget (remove) backup snapshots.
async fn forget_snapshots(param: Value) -> Result<Value, Error> {
async fn forget_snapshots(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?;
let backup_ns = optional_ns_param(&param)?;
@ -188,13 +188,13 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client
client
.delete(&path, Some(snapshot_args(&backup_ns, &snapshot)?))
.await?;
record_repository(&repo);
Ok(result)
Ok(())
}
#[api(

View File

@ -107,7 +107,7 @@ async fn list_files(
) -> Result<Vec<ArchiveEntry>, Error> {
let client = connect(&repo)?;
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&namespace,
@ -430,7 +430,7 @@ async fn extract(
let client = connect(&repo)?;
let client = BackupReader::start(
client,
&client,
crypt_config.clone(),
repo.store(),
&namespace,

View File

@ -3,7 +3,6 @@ mod api;
pub use api::*;
pub mod auth;
pub use auth::*;
mod watchdog;
pub use watchdog::*;

View File

@ -124,10 +124,10 @@ pub fn update_rrd(path: String, time: Option<u64>, value: f64) -> Result<(), Err
type: CF,
},
resolution: {
description: "Time resulution",
description: "Time resolution",
},
start: {
description: "Start time. If not sepecified, we simply extract 10 data points.",
description: "Start time. If not specified, we simply extract 10 data points.",
optional: true,
},
end: {
@ -292,11 +292,11 @@ pub fn resize_rrd(path: String, rra_index: usize, slots: i64) -> Result<(), Erro
let new_slots = (rra.data.len() as i64) + slots;
if new_slots < 1 {
bail!("numer of new slots is too small ('{}' < 1)", new_slots);
bail!("number of new slots is too small ('{}' < 1)", new_slots);
}
if new_slots > 1024 * 1024 {
bail!("numer of new slots is too big ('{}' > 1M)", new_slots);
bail!("number of new slots is too big ('{}' > 1M)", new_slots);
}
let rra_end = rra.slot_end_time(rrd.source.last_update as u64);

View File

@ -102,7 +102,7 @@ impl RRDCache {
/// * cf=average,r=7*86400,n=570 => 10years
/// * cf=maximum,r=7*86400,n=570 => 10year
///
/// The resultion data file size is about 80KB.
/// The resulting data file size is about 80KB.
pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
let rra_list = vec![
// 1 min * 1440 => 1 day
@ -207,7 +207,7 @@ impl RRDCache {
/// Extract data from cached RRD
///
/// `start`: Start time. If not sepecified, we simply extract 10 data points.
/// `start`: Start time. If not specified, we simply extract 10 data points.
///
/// `end`: End time. Default is to use the current time.
pub fn extract_cached_data(

View File

@ -147,7 +147,7 @@ impl DataSource {
// we update last_value anyways, so that we can compute the diff
// next time.
self.last_value = value;
bail!("conter overflow/reset detected");
bail!("counter overflow/reset detected");
} else {
value - self.last_value
};

View File

@ -13,7 +13,7 @@ pub const PROXMOX_RRD_MAGIC_1_0: [u8; 8] = [206, 46, 26, 212, 172, 158, 5, 186];
use crate::rrd::{DataSource, CF, DST, RRA, RRD};
bitflags! {
/// Flags to specify the data soure type and consolidation function
/// Flags to specify the data source type and consolidation function
pub struct RRAFlags: u64 {
// Data Source Types
const DST_GAUGE = 1;
@ -34,9 +34,9 @@ bitflags! {
/// RRD files.
#[repr(C)]
pub struct RRAv1 {
/// Defined the data soure type and consolidation function
/// Defined the data source type and consolidation function
pub flags: RRAFlags,
/// Resulution (seconds)
/// Resolution (seconds)
pub resolution: u64,
/// Last update time (epoch)
pub last_update: f64,
@ -213,7 +213,7 @@ impl RRDv1 {
let (start, reso, data) = self.hour_max.extract_data();
day_max.insert_data(start, reso, data)?;
// compute montly average (merge old self.month_avg,
// compute monthly average (merge old self.month_avg,
// self.week_avg and self.day_avg)
let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
@ -228,7 +228,7 @@ impl RRDv1 {
let (start, reso, data) = self.day_avg.extract_data();
month_avg.insert_data(start, reso, data)?;
// compute montly maximum (merge old self.month_max,
// compute monthly maximum (merge old self.month_max,
// self.week_max and self.day_max)
let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);

View File

@ -12,7 +12,9 @@ use futures::select;
use tokio::signal::unix::{signal, SignalKind};
use pathpatterns::{MatchEntry, MatchType, PatternFlag};
use pbs_client::pxar::{format_single_line_entry, Flags, PxarExtractOptions, ENCODER_MAX_ENTRIES};
use pbs_client::pxar::{
format_single_line_entry, Flags, OverwriteFlags, PxarExtractOptions, ENCODER_MAX_ENTRIES,
};
use proxmox_router::cli::*;
use proxmox_schema::api;
@ -74,10 +76,25 @@ fn extract_archive_from_reader<R: std::io::Read>(
default: false,
},
"overwrite": {
description: "overwrite already existing files, symlinks and hardlinks",
optional: true,
default: false,
},
"overwrite-files": {
description: "overwrite already existing files",
optional: true,
default: false,
},
"overwrite-symlinks": {
description: "overwrite already existing entries by archives symlink",
optional: true,
default: false,
},
"overwrite-hardlinks": {
description: "overwrite already existing entries by archives hardlink",
optional: true,
default: false,
},
"files-from": {
description: "File containing match pattern for files to restore.",
optional: true,
@ -116,6 +133,9 @@ fn extract_archive(
no_acls: bool,
allow_existing_dirs: bool,
overwrite: bool,
overwrite_files: bool,
overwrite_symlinks: bool,
overwrite_hardlinks: bool,
files_from: Option<String>,
no_device_nodes: bool,
no_fifos: bool,
@ -142,6 +162,14 @@ fn extract_archive(
feature_flags.remove(Flags::WITH_SOCKETS);
}
let mut overwrite_flags = OverwriteFlags::empty();
overwrite_flags.set(OverwriteFlags::FILE, overwrite_files);
overwrite_flags.set(OverwriteFlags::SYMLINK, overwrite_symlinks);
overwrite_flags.set(OverwriteFlags::HARDLINK, overwrite_hardlinks);
if overwrite {
overwrite_flags.insert(OverwriteFlags::all());
}
let pattern = pattern.unwrap_or_default();
let target = target.as_ref().map_or_else(|| ".", String::as_str);
@ -183,7 +211,7 @@ fn extract_archive(
let options = PxarExtractOptions {
match_list: &match_list,
allow_existing_dirs,
overwrite,
overwrite_flags,
extract_match_default,
on_error,
};

View File

@ -73,7 +73,7 @@ fn pxar_create_and_extract() {
.unwrap_or_else(|err| panic!("Failed to invoke 'rm': {}", err));
// If source and destination folder contain the same content,
// the output of the rsync invokation should yield no lines.
// the output of the rsync invocation should yield no lines.
if linecount != 0 {
panic!("pxar create and extract did not yield the same contents");
}

View File

@ -1,55 +1,12 @@
use anyhow::Error;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{METRIC_SERVER_ID_SCHEMA, PRIV_SYS_AUDIT, SINGLE_LINE_COMMENT_SCHEMA};
use pbs_api_types::{MetricServerInfo, PRIV_SYS_AUDIT};
use pbs_config::metrics;
#[api]
#[derive(Deserialize, Serialize, PartialEq, Eq)]
/// Type of the metric server
pub enum MetricServerType {
/// InfluxDB HTTP
#[serde(rename = "influxdb-http")]
InfluxDbHttp,
/// InfluxDB UDP
#[serde(rename = "influxdb-udp")]
InfluxDbUdp,
}
#[api(
properties: {
name: {
schema: METRIC_SERVER_ID_SCHEMA,
},
"type": {
type: MetricServerType,
},
comment: {
optional: true,
schema: SINGLE_LINE_COMMENT_SCHEMA,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Basic information about a metric server thats available for all types
pub struct MetricServerInfo {
pub name: String,
#[serde(rename = "type")]
pub ty: MetricServerType,
/// Enables or disables the metrics server
#[serde(skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
/// The target server
pub server: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>,
}
#[api(
input: {
properties: {},

View File

@ -34,13 +34,13 @@ use crate::backup::{check_ns_modification_privs, check_ns_privs, NS_PRIVS_OK};
},
},
},
returns: pbs_api_types::ADMIN_DATASTORE_LIST_NAMESPACE_RETURN_TYPE,
returns: { type: BackupNamespace },
access: {
permission: &Permission::Anybody,
description: "Requires on /datastore/{store}[/{parent}] DATASTORE_MODIFY"
},
)]
/// List the namespaces of a datastore.
/// Create a new datastore namespace.
pub fn create_namespace(
store: String,
name: String,

View File

@ -1,32 +1,12 @@
use anyhow::Error;
use serde::{Deserialize, Serialize};
use proxmox_router::{Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{TrafficControlRule, PRIV_SYS_AUDIT};
use pbs_api_types::{TrafficControlCurrentRate, TrafficControlRule, PRIV_SYS_AUDIT};
use crate::traffic_control_cache::TRAFFIC_CONTROL_CACHE;
#[api(
properties: {
config: {
type: TrafficControlRule,
},
},
)]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Traffic control rule config with current rates
pub struct TrafficControlCurrentRate {
#[serde(flatten)]
config: TrafficControlRule,
/// Current ingress rate in bytes/second
cur_rate_in: u64,
/// Current egress rate in bytes/second
cur_rate_out: u64,
}
#[api(
input: {
properties: {},

View File

@ -179,7 +179,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
let mut data = match state.fixed_writers.get_mut(&wid) {
let data = match state.fixed_writers.get_mut(&wid) {
Some(data) => data,
None => bail!("fixed writer '{}' not registered", wid),
};
@ -233,7 +233,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
let mut data = match state.dynamic_writers.get_mut(&wid) {
let data = match state.dynamic_writers.get_mut(&wid) {
Some(data) => data,
None => bail!("dynamic writer '{}' not registered", wid),
};
@ -328,7 +328,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
let mut data = match state.dynamic_writers.get_mut(&wid) {
let data = match state.dynamic_writers.get_mut(&wid) {
Some(data) => data,
None => bail!("dynamic writer '{}' not registered", wid),
};
@ -362,7 +362,7 @@ impl BackupEnvironment {
state.ensure_unfinished()?;
let mut data = match state.fixed_writers.get_mut(&wid) {
let data = match state.fixed_writers.get_mut(&wid) {
Some(data) => data,
None => bail!("fixed writer '{}' not registered", wid),
};

View File

@ -214,9 +214,12 @@ fn upgrade_to_backup_protocol(
env.debug = debug;
env.last_backup = last_backup;
let origin = match rpcenv.get_client_ip().map(|addr| addr.ip()) {
Some(ip) => format!(" from {ip}"),
None => "".into(),
};
env.log(format!(
"starting new {} on datastore '{}': {:?}",
worker_type, store, path
"starting new {worker_type} on datastore '{store}'{origin}: {path:?}",
));
let service =

View File

@ -9,11 +9,12 @@ use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironme
use proxmox_schema::{api, param_bail, ApiType};
use proxmox_section_config::SectionConfigData;
use proxmox_sys::{task_warn, WorkerTaskContext};
use proxmox_uuid::Uuid;
use pbs_api_types::{
Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning,
DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
};
use pbs_config::BackupLockGuard;
use pbs_datastore::chunk_store::ChunkStore;
@ -21,7 +22,7 @@ use pbs_datastore::chunk_store::ChunkStore;
use crate::api2::admin::{
prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs,
};
use crate::api2::config::prune::delete_prune_job;
use crate::api2::config::prune::{delete_prune_job, do_create_prune_job};
use crate::api2::config::sync::delete_sync_job;
use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs};
use crate::api2::config::verify::delete_verification_job;
@ -91,10 +92,7 @@ pub(crate) fn do_create_datastore(
pbs_config::datastore::save_config(&config)?;
jobstate::create_state_file("prune", &datastore.name)?;
jobstate::create_state_file("garbage_collection", &datastore.name)?;
Ok(())
jobstate::create_state_file("garbage_collection", &datastore.name)
}
#[api(
@ -127,12 +125,45 @@ pub fn create_datastore(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let prune_job_config = config.prune_schedule.as_ref().map(|schedule| {
let mut id = format!("default-{}-{}", config.name, Uuid::generate());
id.truncate(32);
PruneJobConfig {
id,
store: config.name.clone(),
comment: None,
disable: false,
schedule: schedule.clone(),
options: PruneJobOptions {
keep: config.keep.clone(),
max_depth: None,
ns: None,
},
}
});
// clearing prune settings in the datastore config, as they are now handled by prune jobs
let config = DataStoreConfig {
prune_schedule: None,
keep: KeepOptions::default(),
..config
};
WorkerTask::new_thread(
"create-datastore",
Some(config.name.to_string()),
auth_id.to_string(),
to_stdout,
move |worker| do_create_datastore(lock, section_config, config, Some(&worker)),
move |worker| {
do_create_datastore(lock, section_config, config, Some(&worker))?;
if let Some(prune_job_config) = prune_job_config {
do_create_prune_job(prune_job_config, Some(&worker))
} else {
Ok(())
}
},
)
}

View File

@ -17,7 +17,7 @@ use pbs_config::metrics;
async fn test_server(address: &str) -> Result<(), Error> {
test_influxdb_udp(address)
.await
.map_err(|err| format_err!("cannot conect to {}: {}", address, err))
.map_err(|err| format_err!("cannot connect to {}: {}", address, err))
}
#[api(

View File

@ -1,5 +1,7 @@
use anyhow::Error;
use hex::FromHex;
use proxmox_sys::task_log;
use proxmox_sys::WorkerTaskContext;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@ -56,6 +58,31 @@ pub fn list_prune_jobs(
Ok(list)
}
pub fn do_create_prune_job(
config: PruneJobConfig,
worker: Option<&dyn WorkerTaskContext>,
) -> Result<(), Error> {
let _lock = prune::lock_config()?;
let (mut section_config, _digest) = prune::config()?;
if section_config.sections.get(&config.id).is_some() {
param_bail!("id", "job '{}' already exists.", config.id);
}
section_config.set_data(&config.id, "prune", &config)?;
prune::save_config(&section_config)?;
crate::server::jobstate::create_state_file("prunejob", &config.id)?;
if let Some(worker) = worker {
task_log!(worker, "Prune job created: {}", config.id);
}
Ok(())
}
#[api(
protected: true,
input: {
@ -81,21 +108,7 @@ pub fn create_prune_job(
user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
let _lock = prune::lock_config()?;
let (mut section_config, _digest) = prune::config()?;
if section_config.sections.get(&config.id).is_some() {
param_bail!("id", "job '{}' already exists.", config.id);
}
section_config.set_data(&config.id, "prune", &config)?;
prune::save_config(&section_config)?;
crate::server::jobstate::create_state_file("prunejob", &config.id)?;
Ok(())
do_create_prune_job(config, None)
}
#[api(

View File

@ -268,7 +268,7 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
if job.remote == name {
if job.remote.map_or(false, |id| id == name) {
param_bail!(
"name",
"remote '{}' is used by sync job '{}' (datastore '{}')",
@ -300,8 +300,8 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
Ok(())
}
/// Helper to get client for remote.cfg entry
pub async fn remote_client(
/// Helper to get client for remote.cfg entry without login, just config
pub fn remote_client_config(
remote: &Remote,
limit: Option<RateLimitConfig>,
) -> Result<HttpClient, Error> {
@ -320,6 +320,16 @@ pub async fn remote_client(
&remote.config.auth_id,
options,
)?;
Ok(client)
}
/// Helper to get client for remote.cfg entry
pub async fn remote_client(
remote: &Remote,
limit: Option<RateLimitConfig>,
) -> Result<HttpClient, Error> {
let client = remote_client_config(remote, limit)?;
let _auth_info = client
.login() // make sure we can auth
.await

View File

@ -25,8 +25,13 @@ pub fn check_sync_job_read_access(
return false;
}
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
remote_privs & PRIV_REMOTE_AUDIT != 0
if let Some(remote) = &job.remote {
let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]);
remote_privs & PRIV_REMOTE_AUDIT != 0
} else {
let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
source_ds_privs & PRIV_DATASTORE_AUDIT != 0
}
}
/// checks whether user can run the corresponding pull job
@ -63,8 +68,11 @@ pub fn check_sync_job_modify_access(
return false;
}
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
remote_privs & PRIV_REMOTE_READ != 0
if let Some(remote) = &job.remote {
let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
return remote_privs & PRIV_REMOTE_READ != 0;
}
true
}
#[api(
@ -131,6 +139,10 @@ pub fn create_sync_job(
bail!("permission check failed");
}
if config.remote.is_none() && config.store.eq(&config.remote_store) {
bail!("source and target datastore can't be the same");
}
if let Some(max_depth) = config.max_depth {
if let Some(ref ns) = config.ns {
ns.check_max_depth(max_depth)?;
@ -191,6 +203,8 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Sync
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the remote property(-> meaning local).
Remote,
/// Delete the owner property.
Owner,
/// Delete the comment property.
@ -275,6 +289,9 @@ pub fn update_sync_job(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::Remote => {
data.remote = None;
}
DeletableProperty::Owner => {
data.owner = None;
}
@ -334,7 +351,7 @@ pub fn update_sync_job(
data.ns = Some(ns);
}
if let Some(remote) = update.remote {
data.remote = remote;
data.remote = Some(remote);
}
if let Some(remote_store) = update.remote_store {
data.remote_store = remote_store;
@ -503,7 +520,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
let mut job = SyncJobConfig {
id: "regular".to_string(),
remote: "remote0".to_string(),
remote: Some("remote0".to_string()),
remote_store: "remotestore1".to_string(),
remote_ns: None,
store: "localstore0".to_string(),
@ -538,11 +555,11 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
// reading without proper read permissions on local end must fail
job.remote = "remote1".to_string();
job.remote = Some("remote1".to_string());
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
// reading without proper read permissions on remote end must fail
job.remote = "remote0".to_string();
job.remote = Some("remote0".to_string());
job.store = "localstore1".to_string();
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
@ -555,10 +572,10 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
));
// writing without proper write permissions on local end must fail
job.remote = "remote1".to_string();
job.remote = Some("remote1".to_string());
// writing without proper write permissions on remote end must fail
job.remote = "remote0".to_string();
job.remote = Some("remote0".to_string());
job.store = "localstore1".to_string();
assert!(!check_sync_job_modify_access(
&user_info,
@ -567,7 +584,7 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
));
// reset remote to one where users have access
job.remote = "remote1".to_string();
job.remote = Some("remote1".to_string());
// user with read permission can only read, but not modify/run
assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));

View File

@ -1,6 +1,5 @@
use anyhow::{bail, format_err, Error};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::os::unix::prelude::OsStrExt;
use proxmox_router::{
@ -19,10 +18,9 @@ use pbs_api_types::{
APTUpdateInfo, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
UPID_SCHEMA,
};
use pbs_buildcfg::PROXMOX_BACKUP_SUBSCRIPTION_FN;
use crate::config::node;
use crate::tools::{apt, pbs_simple_http};
use crate::tools::apt;
use proxmox_rest_server::WorkerTask;
#[api(
@ -224,81 +222,17 @@ pub fn apt_update_database(
},
)]
/// Retrieve the changelog of the specified package.
fn apt_get_changelog(param: Value) -> Result<Value, Error> {
let name = pbs_tools::json::required_string_param(&param, "name")?.to_owned();
let version = param["version"].as_str();
let pkg_info = apt::list_installed_apt_packages(
|data| match version {
Some(version) => version == data.active_version,
None => data.active_version == data.candidate_version,
},
Some(&name),
);
if pkg_info.is_empty() {
bail!("Package '{}' not found", name);
}
let proxy_config = read_and_update_proxy_config()?;
let client = pbs_simple_http(proxy_config);
let changelog_url = &pkg_info[0].change_log_url;
// FIXME: use 'apt-get changelog' for proxmox packages as well, once repo supports it
if changelog_url.starts_with("http://download.proxmox.com/") {
let changelog = proxmox_async::runtime::block_on(client.get_string(changelog_url, None))
.map_err(|err| {
format_err!(
"Error downloading changelog from '{}': {}",
changelog_url,
err
)
})?;
Ok(json!(changelog))
} else if changelog_url.starts_with("https://enterprise.proxmox.com/") {
let sub = match proxmox_subscription::files::read_subscription(
PROXMOX_BACKUP_SUBSCRIPTION_FN,
&[proxmox_subscription::files::DEFAULT_SIGNING_KEY],
)? {
Some(sub) => sub,
None => {
bail!("cannot retrieve changelog from enterprise repo: no subscription info found")
}
};
let (key, id) = match sub.key {
Some(key) => match sub.serverid {
Some(id) => (key, id),
None => bail!("cannot retrieve changelog from enterprise repo: no server id found"),
},
None => {
bail!("cannot retrieve changelog from enterprise repo: no subscription key found")
}
};
let mut auth_header = HashMap::new();
auth_header.insert(
"Authorization".to_owned(),
format!("Basic {}", base64::encode(format!("{}:{}", key, id))),
);
let changelog =
proxmox_async::runtime::block_on(client.get_string(changelog_url, Some(&auth_header)))
.map_err(|err| {
format_err!(
"Error downloading changelog from '{}': {}",
changelog_url,
err
)
})?;
Ok(json!(changelog))
fn apt_get_changelog(name: String, version: Option<String>) -> Result<Value, Error> {
let mut command = std::process::Command::new("apt-get");
command.arg("changelog");
command.arg("-qq"); // don't display download progress
if let Some(ver) = version {
command.arg(format!("{name}={ver}"));
} else {
let mut command = std::process::Command::new("apt-get");
command.arg("changelog");
command.arg("-qq"); // don't display download progress
command.arg(name);
let output = proxmox_sys::command::run_command(command, None)?;
Ok(json!(output))
}
let output = proxmox_sys::command::run_command(command, None)?;
Ok(json!(output))
}
#[api(
@ -347,7 +281,6 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
origin: "unknown".into(),
priority: "unknown".into(),
section: "unknown".into(),
change_log_url: "unknown".into(),
extra_info,
}
}

View File

@ -135,7 +135,7 @@ pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// Create a Filesystem on an unused disk. Will be mounted under '/mnt/datastore/<name>'.".
/// Create a Filesystem on an unused disk. Will be mounted under `/mnt/datastore/<name>`.
pub fn create_datastore_disk(
name: String,
disk: String,
@ -235,7 +235,7 @@ pub fn create_datastore_disk(
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// Remove a Filesystem mounted under '/mnt/datastore/<name>'.".
/// Remove a Filesystem mounted under `/mnt/datastore/<name>`.
pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
let path = format!("{}{}", BASE_MOUNT_DIR, name);
// path of datastore cannot be changed

View File

@ -9,12 +9,13 @@ use proxmox_sortable_macro::sortable;
use proxmox_sys::task_log;
use pbs_api_types::{
BLOCKDEVICE_NAME_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, NODE_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA,
};
use crate::tools::disks::{
get_smart_data, inititialize_gpt_disk, DiskManage, DiskUsageInfo, DiskUsageQuery,
DiskUsageType, SmartData,
get_smart_data, inititialize_gpt_disk, wipe_blockdev, DiskManage, DiskUsageInfo,
DiskUsageQuery, DiskUsageType, SmartData,
};
use proxmox_rest_server::WorkerTask;
@ -178,6 +179,51 @@ pub fn initialize_disk(
Ok(json!(upid_str))
}
#[api(
protected: true,
input: {
properties: {
node: {
schema: NODE_SCHEMA,
},
disk: {
schema: BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA,
},
},
},
returns: {
schema: UPID_SCHEMA,
},
access: {
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// wipe disk
pub fn wipe_disk(disk: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let auth_id = rpcenv.get_auth_id().unwrap();
let upid_str = WorkerTask::new_thread(
"wipedisk",
Some(disk.clone()),
auth_id,
to_stdout,
move |worker| {
task_log!(worker, "wipe disk {}", disk);
let disk_manager = DiskManage::new();
let disk_info = disk_manager.partition_by_name(&disk)?;
wipe_blockdev(&disk_info, worker)?;
Ok(())
},
)?;
Ok(json!(upid_str))
}
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
// ("lvm", &lvm::ROUTER),
@ -186,6 +232,7 @@ const SUBDIRS: SubdirMap = &sorted!([
("initgpt", &Router::new().post(&API_METHOD_INITIALIZE_DISK)),
("list", &Router::new().get(&API_METHOD_LIST_DISKS)),
("smart", &Router::new().get(&API_METHOD_SMART_STATUS)),
("wipedisk", &Router::new().put(&API_METHOD_WIPE_DISK)),
]);
pub const ROUTER: Router = Router::new()

View File

@ -147,7 +147,7 @@ pub fn zpool_details(name: String) -> Result<Value, Error> {
permission: &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false),
},
)]
/// Create a new ZFS pool. Will be mounted under '/mnt/datastore/<name>'.
/// Create a new ZFS pool. Will be mounted under `/mnt/datastore/<name>`.
pub fn create_zpool(
name: String,
devices: String,

View File

@ -1,29 +1,48 @@
use std::os::unix::prelude::OsStrExt;
use std::process::Command;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_sys::boot_mode;
use proxmox_sys::linux::procfs;
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use pbs_api_types::{
NodePowerCommand, StorageStatus, NODE_SCHEMA, PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT,
BootModeInformation, KernelVersionInformation, NodePowerCommand, StorageStatus, NODE_SCHEMA,
PRIV_SYS_AUDIT, PRIV_SYS_POWER_MANAGEMENT,
};
use crate::api2::types::{
use pbs_api_types::{
NodeCpuInformation, NodeInformation, NodeMemoryCounters, NodeStatus, NodeSwapCounters,
};
impl std::convert::From<procfs::ProcFsCPUInfo> for NodeCpuInformation {
fn from(info: procfs::ProcFsCPUInfo) -> Self {
Self {
model: info.model,
sockets: info.sockets,
cpus: info.cpus,
}
fn procfs_to_node_cpu_info(info: procfs::ProcFsCPUInfo) -> NodeCpuInformation {
NodeCpuInformation {
model: info.model,
sockets: info.sockets,
cpus: info.cpus,
}
}
fn boot_mode_to_info(bm: boot_mode::BootMode, sb: boot_mode::SecureBoot) -> BootModeInformation {
use boot_mode::BootMode;
use boot_mode::SecureBoot;
match (bm, sb) {
(BootMode::Efi, SecureBoot::Enabled) => BootModeInformation {
mode: pbs_api_types::BootMode::Efi,
secureboot: true,
},
(BootMode::Efi, SecureBoot::Disabled) => BootModeInformation {
mode: pbs_api_types::BootMode::Efi,
secureboot: false,
},
(BootMode::Bios, _) => BootModeInformation {
mode: pbs_api_types::BootMode::LegacyBios,
secureboot: false,
},
}
}
@ -69,18 +88,20 @@ async fn get_status(
let loadavg = [loadavg.one(), loadavg.five(), loadavg.fifteen()];
let cpuinfo = procfs::read_cpuinfo()?;
let cpuinfo = cpuinfo.into();
let cpuinfo = procfs_to_node_cpu_info(cpuinfo);
let uname = nix::sys::utsname::uname()?;
let kversion = format!(
"{} {} {}",
std::str::from_utf8(uname.sysname().as_bytes())?,
std::str::from_utf8(uname.release().as_bytes())?,
std::str::from_utf8(uname.version().as_bytes())?
let kernel_version = KernelVersionInformation::from_uname_parts(
uname.sysname(),
uname.release(),
uname.version(),
uname.machine(),
);
let disk = crate::tools::fs::fs_info_static(proxmox_lang::c_str!("/")).await?;
let boot_info = boot_mode_to_info(boot_mode::BootMode::query(), boot_mode::SecureBoot::query());
Ok(NodeStatus {
memory,
swap,
@ -91,13 +112,15 @@ async fn get_status(
},
uptime: procfs::read_proc_uptime()?.0 as u64,
loadavg,
kversion,
kversion: kernel_version.get_legacy(),
current_kernel: kernel_version,
cpuinfo,
cpu,
wait,
info: NodeInformation {
fingerprint: crate::cert_info()?.fingerprint()?,
},
boot_info,
})
}

View File

@ -78,11 +78,12 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
if let (Some(remote), Some(remote_store), Some(local_store)) =
(remote, remote_store, local_store)
{
let remote_str = remote.as_str();
return check_pull_privs(
auth_id,
local_store.as_str(),
local_ns,
remote.as_str(),
(remote_str != "-").then_some(remote_str),
remote_store.as_str(),
false,
);

View File

@ -1,5 +1,5 @@
//! Sync datastore from remote server
use anyhow::{format_err, Error};
use anyhow::{bail, format_err, Error};
use futures::{future::FutureExt, select};
use proxmox_router::{Permission, Router, RpcEnvironment};
@ -22,7 +22,7 @@ pub fn check_pull_privs(
auth_id: &Authid,
store: &str,
ns: Option<&str>,
remote: &str,
remote: Option<&str>,
remote_store: &str,
delete: bool,
) -> Result<(), Error> {
@ -39,12 +39,22 @@ pub fn check_pull_privs(
PRIV_DATASTORE_BACKUP,
false,
)?;
user_info.check_privs(
auth_id,
&["remote", remote, remote_store],
PRIV_REMOTE_READ,
false,
)?;
if let Some(remote) = remote {
user_info.check_privs(
auth_id,
&["remote", remote, remote_store],
PRIV_REMOTE_READ,
false,
)?;
} else {
user_info.check_privs(
auth_id,
&["datastore", remote_store],
PRIV_DATASTORE_BACKUP,
false,
)?;
}
if delete {
user_info.check_privs(
@ -65,7 +75,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
PullParameters::new(
&sync_job.store,
sync_job.ns.clone().unwrap_or_default(),
&sync_job.remote,
sync_job.remote.as_deref(),
&sync_job.remote_store,
sync_job.remote_ns.clone().unwrap_or_default(),
sync_job
@ -91,7 +101,7 @@ pub fn do_sync_job(
) -> Result<String, Error> {
let job_id = format!(
"{}:{}:{}:{}:{}",
sync_job.remote,
sync_job.remote.as_deref().unwrap_or("-"),
sync_job.remote_store,
sync_job.store,
sync_job.ns.clone().unwrap_or_default(),
@ -99,6 +109,10 @@ pub fn do_sync_job(
);
let worker_type = job.jobtype().to_string();
if sync_job.remote.is_none() && sync_job.store == sync_job.remote_store {
bail!("can't sync to same datastore");
}
let (email, notify) = crate::server::lookup_datastore_notify_settings(&sync_job.store);
let upid_str = WorkerTask::spawn(
@ -114,7 +128,6 @@ pub fn do_sync_job(
let worker_future = async move {
let pull_params = PullParameters::try_from(&sync_job)?;
let client = pull_params.client().await?;
task_log!(worker, "Starting datastore sync job '{}'", job_id);
if let Some(event_str) = schedule {
@ -122,13 +135,16 @@ pub fn do_sync_job(
}
task_log!(
worker,
"sync datastore '{}' from '{}/{}'",
"sync datastore '{}' from '{}{}'",
sync_job.store,
sync_job.remote,
sync_job
.remote
.as_deref()
.map_or(String::new(), |remote| format!("{remote}/")),
sync_job.remote_store,
);
pull_store(&worker, &client, pull_params).await?;
pull_store(&worker, pull_params).await?;
task_log!(worker, "sync job '{}' end", &job_id);
@ -180,6 +196,7 @@ pub fn do_sync_job(
},
remote: {
schema: REMOTE_ID_SCHEMA,
optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
@ -224,7 +241,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
async fn pull(
store: String,
ns: Option<BackupNamespace>,
remote: String,
remote: Option<String>,
remote_store: String,
remote_ns: Option<BackupNamespace>,
remove_vanished: Option<bool>,
@ -237,6 +254,10 @@ async fn pull(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let delete = remove_vanished.unwrap_or(false);
if remote.is_none() && store == remote_store {
bail!("can't sync to same datastore");
}
let ns = ns.unwrap_or_default();
let ns_str = if ns.is_root() {
None
@ -248,7 +269,7 @@ async fn pull(
&auth_id,
&store,
ns_str.as_deref(),
&remote,
remote.as_deref(),
&remote_store,
delete,
)?;
@ -256,7 +277,7 @@ async fn pull(
let pull_params = PullParameters::new(
&store,
ns,
&remote,
remote.as_deref(),
&remote_store,
remote_ns.unwrap_or_default(),
auth_id.clone(),
@ -266,7 +287,6 @@ async fn pull(
limit,
transfer_last,
)?;
let client = pull_params.client().await?;
// fixme: set to_stdout to false?
// FIXME: add namespace to worker id?
@ -280,11 +300,11 @@ async fn pull(
worker,
"pull datastore '{}' from '{}/{}'",
store,
remote,
remote.as_deref().unwrap_or("-"),
remote_store,
);
let pull_future = pull_store(&worker, &client, pull_params);
let pull_future = pull_store(&worker, pull_params);
(select! {
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,

View File

@ -1,12 +1,9 @@
//! API Type Definitions
use anyhow::bail;
use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use pbs_api_types::StorageStatus;
mod acme;
pub use acme::*;
@ -123,101 +120,6 @@ fn test_proxmox_user_id_schema() -> Result<(), anyhow::Error> {
Ok(())
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node memory usage counters
pub struct NodeMemoryCounters {
/// Total memory
pub total: u64,
/// Used memory
pub used: u64,
/// Free memory
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Node swap usage counters
pub struct NodeSwapCounters {
/// Total swap
pub total: u64,
/// Used swap
pub used: u64,
/// Free swap
pub free: u64,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Contains general node information such as the fingerprint`
pub struct NodeInformation {
/// The SSL Fingerprint
pub fingerprint: String,
}
#[api]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// Information about the CPU
pub struct NodeCpuInformation {
/// The CPU model
pub model: String,
/// The number of CPU sockets
pub sockets: usize,
/// The number of CPU cores (incl. threads)
pub cpus: usize,
}
#[api(
properties: {
memory: {
type: NodeMemoryCounters,
},
root: {
type: StorageStatus,
},
swap: {
type: NodeSwapCounters,
},
loadavg: {
type: Array,
items: {
type: Number,
description: "the load",
}
},
cpuinfo: {
type: NodeCpuInformation,
},
info: {
type: NodeInformation,
}
},
)]
#[derive(Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
/// The Node status
pub struct NodeStatus {
pub memory: NodeMemoryCounters,
pub root: StorageStatus,
pub swap: NodeSwapCounters,
/// The current uptime of the server.
pub uptime: u64,
/// Load for 1, 5 and 15 minutes.
pub loadavg: [f64; 3],
/// The current kernel version.
pub kversion: String,
/// Total CPU usage since last query.
pub cpu: f64,
/// Total IO wait since last query.
pub wait: f64,
pub cpuinfo: NodeCpuInformation,
pub info: NodeInformation,
}
pub const HTTP_PROXY_SCHEMA: Schema =
StringSchema::new("HTTP proxy configuration [http://]<host>[:port]")
.format(&ApiStringFormat::VerifyFn(|s| {

View File

@ -217,7 +217,7 @@ impl LdapAuthenticator {
}
}
/// Lookup the autenticator for the specified realm
/// Lookup the authenticator for the specified realm
pub(crate) fn lookup_authenticator(
realm: &RealmRef,
) -> Result<Box<dyn Authenticator + Send + Sync>, Error> {

View File

@ -125,7 +125,7 @@ pub fn dump_schema(schema: &Schema) -> Value {
data["format"] = dump_schema(subschema);
data["typetext"] = get_property_string_type_text(subschema).into();
}
_ => { /* do nothing - shouldnot happen */ }
_ => { /* do nothing - should not happen */ }
};
}
}

View File

@ -191,34 +191,42 @@ impl Checker {
self.output
.log_info("Checking bootloader configuration...")?;
// PBS packages version check needs to be run before
if !self.upgraded {
self.output
.log_skip("not yet upgraded, no need to check the presence of systemd-boot")?;
}
if !Path::new("/etc/kernel/proxmox-boot-uuids").is_file() {
self.output
.log_skip("proxmox-boot-tool not used for bootloader configuration")?;
return Ok(());
}
if !Path::new("/sys/firmware/efi").is_file() {
if !Path::new("/sys/firmware/efi").is_dir() {
self.output
.log_skip("System booted in legacy-mode - no need for systemd-boot")?;
return Ok(());
}
if Path::new("/usr/share/doc/systemd-boot/changelog.Debian.gz").is_file() {
self.output.log_pass("systemd-boot is installed")?;
} else {
if Path::new("/etc/kernel/proxmox-boot-uuids").is_file() {
// PBS packages version check needs to be run before
if !self.upgraded {
self.output
.log_skip("not yet upgraded, no need to check the presence of systemd-boot")?;
return Ok(());
}
if Path::new("/usr/share/doc/systemd-boot/changelog.Debian.gz").is_file() {
self.output
.log_pass("bootloader packages installed correctly")?;
return Ok(());
}
self.output.log_warn(
"proxmox-boot-tool is used for bootloader configuration in uefi mode \
but the separate systemd-boot package, existing in Debian Bookworm \
is not installed.\n\
initializing new ESPs will not work unitl the package is installed.",
but the separate systemd-boot package, is not installed.\n\
initializing new ESPs will not work until the package is installed.",
)?;
return Ok(());
} else if !Path::new("/usr/share/doc/grub-efi-amd64/changelog.Debian.gz").is_file() {
self.output.log_warn(
"System booted in uefi mode but grub-efi-amd64 meta-package not installed, \
new grub versions will not be installed to /boot/efi!
Install grub-efi-amd64.",
)?;
return Ok(());
} else {
self.output
.log_pass("bootloader packages installed correctly")?;
}
Ok(())
}
@ -263,12 +271,39 @@ impl Checker {
Ok(())
}
fn check_dkms_modules(&mut self) -> Result<(), Error> {
let kver = std::process::Command::new("uname")
.arg("-r")
.output()
.map_err(|err| format_err!("failed to retrieve running kernel version - {err}"))?;
let output = std::process::Command::new("dkms")
.arg("status")
.arg("-k")
.arg(std::str::from_utf8(&kver.stdout)?)
.output();
match output {
Err(_err) => self.output.log_skip("could not get dkms status")?,
Ok(ret) => {
let num_dkms_modules = std::str::from_utf8(&ret.stdout)?.lines().count();
if num_dkms_modules == 0 {
self.output.log_pass("no dkms modules found")?;
} else {
self.output
.log_warn("dkms modules found, this might cause issues during upgrade.")?;
}
}
}
Ok(())
}
pub fn check_misc(&mut self) -> Result<(), Error> {
self.output.print_header("MISCELLANEOUS CHECKS")?;
self.check_pbs_services()?;
self.check_time_sync()?;
self.check_apt_repos()?;
self.check_bootloader()?;
self.check_dkms_modules()?;
Ok(())
}

View File

@ -74,7 +74,7 @@ async fn run() -> Result<(), Error> {
proxmox_backup::auth_helpers::setup_auth_context(true);
let backup_user = pbs_config::backup_user()?;
let mut commando_sock = proxmox_rest_server::CommandSocket::new(
let mut command_sock = proxmox_rest_server::CommandSocket::new(
proxmox_rest_server::our_ctrl_sock(),
backup_user.gid,
);
@ -94,13 +94,13 @@ async fn run() -> Result<(), Error> {
pbs_buildcfg::API_ACCESS_LOG_FN,
Some(dir_opts.clone()),
Some(file_opts.clone()),
&mut commando_sock,
&mut command_sock,
)?
.enable_auth_log(
pbs_buildcfg::API_AUTH_LOG_FN,
Some(dir_opts.clone()),
Some(file_opts.clone()),
&mut commando_sock,
&mut command_sock,
)?;
let rest_server = RestServer::new(config);
@ -131,8 +131,8 @@ async fn run() -> Result<(), Error> {
proxmox_rest_server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
let init_result: Result<(), Error> = try_block!({
proxmox_rest_server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
proxmox_rest_server::register_task_control_commands(&mut command_sock)?;
command_sock.spawn()?;
proxmox_rest_server::catch_shutdown_signal()?;
proxmox_rest_server::catch_reload_signal()?;
Ok(())

View File

@ -489,12 +489,12 @@ async fn run() -> Result<(), Error> {
file_opts,
)?;
let mut commando_sock = proxmox_rest_server::CommandSocket::new(
let mut command_sock = proxmox_rest_server::CommandSocket::new(
proxmox_rest_server::our_ctrl_sock(),
backup_user.gid,
);
proxmox_rest_server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
proxmox_rest_server::register_task_control_commands(&mut command_sock)?;
command_sock.spawn()?;
}
let mut rpcenv = CliEnvironment::new();
@ -536,35 +536,33 @@ fn get_remote(param: &HashMap<String, String>) -> Option<String> {
param.get("remote").map(|r| r.to_owned()).or_else(|| {
if let Some(id) = param.get("id") {
if let Ok(job) = get_sync_job(id) {
return Some(job.remote);
return job.remote;
}
}
None
})
}
fn get_remote_store(param: &HashMap<String, String>) -> Option<(String, String)> {
fn get_remote_store(param: &HashMap<String, String>) -> Option<(Option<String>, String)> {
let mut job: Option<SyncJobConfig> = None;
let remote = param.get("remote").map(|r| r.to_owned()).or_else(|| {
if let Some(id) = param.get("id") {
job = get_sync_job(id).ok();
if let Some(ref job) = job {
return Some(job.remote.clone());
return job.remote.clone();
}
}
None
});
if let Some(remote) = remote {
let store = param
.get("remote-store")
.map(|r| r.to_owned())
.or_else(|| job.map(|job| job.remote_store));
let store = param
.get("remote-store")
.map(|r| r.to_owned())
.or_else(|| job.map(|job| job.remote_store));
if let Some(store) = store {
return Some((remote, store));
}
if let Some(store) = store {
return Some((remote, store));
}
None
@ -585,7 +583,7 @@ fn get_remote_ns(param: &HashMap<String, String>) -> Option<BackupNamespace> {
}
// shell completion helper
pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
pub fn complete_remote_datastore_name(arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut list = Vec::new();
if let Some(remote) = get_remote(param) {
@ -596,7 +594,9 @@ pub fn complete_remote_datastore_name(_arg: &str, param: &HashMap<String, String
list.push(item.store);
}
}
}
} else {
list = pbs_config::datastore::complete_datastore_name(arg, param);
};
list
}
@ -608,17 +608,25 @@ pub fn complete_remote_datastore_namespace(
) -> Vec<String> {
let mut list = Vec::new();
if let Some((remote, remote_store)) = get_remote_store(param) {
if let Ok(data) = proxmox_async::runtime::block_on(async move {
if let Some(data) = match get_remote_store(param) {
Some((Some(remote), remote_store)) => proxmox_async::runtime::block_on(async move {
crate::api2::config::remote::scan_remote_namespaces(
remote.clone(),
remote_store.clone(),
)
.await
}) {
for item in data {
list.push(item.ns.name());
}
.ok()
}),
Some((None, source_store)) => {
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam")));
crate::api2::admin::namespace::list_namespaces(source_store, None, None, &mut rpcenv)
.ok()
}
_ => None,
} {
for item in data {
list.push(item.ns.name());
}
}
@ -663,19 +671,26 @@ pub fn complete_sync_local_datastore_namespace(
pub fn complete_remote_datastore_group(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
let mut list = Vec::new();
if let Some((remote, remote_store)) = get_remote_store(param) {
let ns = get_remote_ns(param);
if let Ok(data) = proxmox_async::runtime::block_on(async move {
let ns = get_remote_ns(param);
if let Some(data) = match get_remote_store(param) {
Some((Some(remote), remote_store)) => proxmox_async::runtime::block_on(async move {
crate::api2::config::remote::scan_remote_groups(
remote.clone(),
remote_store.clone(),
ns,
)
.await
}) {
for item in data {
list.push(format!("{}/{}", item.backup.ty, item.backup.id));
}
.ok()
}),
Some((None, source_store)) => {
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam")));
crate::api2::admin::datastore::list_groups(source_store, ns, &mut rpcenv).ok()
}
_ => None,
} {
for item in data {
list.push(format!("{}/{}", item.backup.ty, item.backup.id));
}
}

View File

@ -1,7 +1,7 @@
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use anyhow::{bail, format_err, Error};
use anyhow::{bail, format_err, Context, Error};
use futures::*;
use http::request::Parts;
use http::Response;
@ -23,8 +23,8 @@ use proxmox_sys::{task_log, task_warn};
use pbs_datastore::DataStore;
use proxmox_rest_server::{
cleanup_old_tasks, cookie_from_header, rotate_task_log_archive, ApiConfig, RestEnvironment,
RestServer, WorkerTask,
cleanup_old_tasks, cookie_from_header, rotate_task_log_archive, ApiConfig, Redirector,
RestEnvironment, RestServer, WorkerTask,
};
use proxmox_backup::rrd_cache::{
@ -226,7 +226,7 @@ async fn run() -> Result<(), Error> {
]);
let backup_user = pbs_config::backup_user()?;
let mut commando_sock = proxmox_rest_server::CommandSocket::new(
let mut command_sock = proxmox_rest_server::CommandSocket::new(
proxmox_rest_server::our_ctrl_sock(),
backup_user.gid,
);
@ -243,16 +243,17 @@ async fn run() -> Result<(), Error> {
pbs_buildcfg::API_ACCESS_LOG_FN,
Some(dir_opts.clone()),
Some(file_opts.clone()),
&mut commando_sock,
&mut command_sock,
)?
.enable_auth_log(
pbs_buildcfg::API_AUTH_LOG_FN,
Some(dir_opts.clone()),
Some(file_opts.clone()),
&mut commando_sock,
&mut command_sock,
)?;
let rest_server = RestServer::new(config);
let redirector = Redirector::new();
proxmox_rest_server::init_worker_tasks(
pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!().into(),
file_opts.clone(),
@ -265,7 +266,7 @@ async fn run() -> Result<(), Error> {
let acceptor = Arc::new(Mutex::new(acceptor));
// to renew the acceptor we just add a command-socket handler
commando_sock.register_command("reload-certificate".to_string(), {
command_sock.register_command("reload-certificate".to_string(), {
let acceptor = Arc::clone(&acceptor);
move |_value| -> Result<_, Error> {
log::info!("reloading certificate");
@ -281,30 +282,54 @@ async fn run() -> Result<(), Error> {
})?;
// to remove references for not configured datastores
commando_sock.register_command("datastore-removed".to_string(), |_value| {
command_sock.register_command("datastore-removed".to_string(), |_value| {
if let Err(err) = DataStore::remove_unused_datastores() {
log::error!("could not refresh datastores: {err}");
}
Ok(Value::Null)
})?;
let connections = proxmox_rest_server::connection::AcceptBuilder::with_acceptor(acceptor)
let connections = proxmox_rest_server::connection::AcceptBuilder::new()
.debug(debug)
.rate_limiter_lookup(Arc::new(lookup_rate_limiter))
.tcp_keepalive_time(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
let server = daemon::create_daemon(
([0, 0, 0, 0, 0, 0, 0, 0], 8007).into(),
move |listener| {
let connections = connections.accept(listener);
let (secure_connections, insecure_connections) =
connections.accept_tls_optional(listener, acceptor);
Ok(async {
daemon::systemd_notify(daemon::SystemdNotify::Ready)?;
hyper::Server::builder(connections)
let secure_server = hyper::Server::builder(secure_connections)
.serve(rest_server)
.with_graceful_shutdown(proxmox_rest_server::shutdown_future())
.map_err(Error::from)
.await
.map_err(Error::from);
let insecure_server = hyper::Server::builder(insecure_connections)
.serve(redirector)
.with_graceful_shutdown(proxmox_rest_server::shutdown_future())
.map_err(Error::from);
let (secure_res, insecure_res) =
try_join!(tokio::spawn(secure_server), tokio::spawn(insecure_server))
.context("failed to complete REST server task")?;
let results = [secure_res, insecure_res];
if results.iter().any(Result::is_err) {
let cat_errors = results
.into_iter()
.filter_map(|res| res.err().map(|err| err.to_string()))
.collect::<Vec<_>>()
.join("\n");
bail!(cat_errors);
}
Ok(())
})
},
Some(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN),
@ -313,8 +338,8 @@ async fn run() -> Result<(), Error> {
proxmox_rest_server::write_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let init_result: Result<(), Error> = try_block!({
proxmox_rest_server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
proxmox_rest_server::register_task_control_commands(&mut command_sock)?;
command_sock.spawn()?;
proxmox_rest_server::catch_shutdown_signal()?;
proxmox_rest_server::catch_reload_signal()?;
Ok(())

Some files were not shown because too many files have changed in this diff Show More