Merge 3.1.4-1

This commit is contained in:
Andrew A. Vasilyev 2024-02-02 19:33:27 +03:00
commit 33961cbe87
61 changed files with 1416 additions and 2962 deletions

View File

@ -1,5 +1,5 @@
[workspace.package] [workspace.package]
version = "3.1.2" version = "3.1.4"
authors = [ authors = [
"Dietmar Maurer <dietmar@proxmox.com>", "Dietmar Maurer <dietmar@proxmox.com>",
"Dominik Csapak <d.csapak@proxmox.com>", "Dominik Csapak <d.csapak@proxmox.com>",
@ -43,7 +43,6 @@ members = [
"proxmox-backup-client", "proxmox-backup-client",
"proxmox-file-restore", "proxmox-file-restore",
"proxmox-restore-daemon", "proxmox-restore-daemon",
"proxmox-rrd",
"pxar-bin", "pxar-bin",
] ]
@ -70,6 +69,7 @@ proxmox-openid = "0.10.0"
proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] } proxmox-rest-server = { version = "0.5.1", features = [ "templates" ] }
# some use "cli", some use "cli" and "server", pbs-config uses nothing # some use "cli", some use "cli" and "server", pbs-config uses nothing
proxmox-router = { version = "2.0.0", default_features = false } proxmox-router = { version = "2.0.0", default_features = false }
proxmox-rrd = { version = "0.1" }
# everything but pbs-config and pbs-client use "api-macro" # everything but pbs-config and pbs-client use "api-macro"
proxmox-schema = "2.0.0" proxmox-schema = "2.0.0"
proxmox-section-config = "2" proxmox-section-config = "2"
@ -77,9 +77,9 @@ proxmox-serde = "0.1.1"
proxmox-shared-memory = "0.3.0" proxmox-shared-memory = "0.3.0"
proxmox-sortable-macro = "0.1.2" proxmox-sortable-macro = "0.1.2"
proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] } proxmox-subscription = { version = "0.4.2", features = [ "api-types" ] }
proxmox-sys = "0.5.2" proxmox-sys = "0.5.3"
proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] } proxmox-tfa = { version = "4.0.4", features = [ "api", "api-types" ] }
proxmox-time = "1.1.2" proxmox-time = "1.1.6"
proxmox-uuid = "1" proxmox-uuid = "1"
# other proxmox crates # other proxmox crates
@ -98,7 +98,6 @@ pbs-key-config = { path = "pbs-key-config" }
pbs-pxar-fuse = { path = "pbs-pxar-fuse" } pbs-pxar-fuse = { path = "pbs-pxar-fuse" }
pbs-tape = { path = "pbs-tape" } pbs-tape = { path = "pbs-tape" }
pbs-tools = { path = "pbs-tools" } pbs-tools = { path = "pbs-tools" }
proxmox-rrd = { path = "proxmox-rrd" }
# regular crates # regular crates
anyhow = "1.0" anyhow = "1.0"
@ -133,7 +132,6 @@ pin-project-lite = "0.2"
regex = "1.5.5" regex = "1.5.5"
rustyline = "9" rustyline = "9"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_cbor = "0.11.1"
serde_json = "1.0" serde_json = "1.0"
serde_plain = "1" serde_plain = "1"
siphasher = "0.3" siphasher = "0.3"
@ -260,6 +258,7 @@ proxmox-rrd.workspace = true
#proxmox-openid = { path = "../proxmox/proxmox-openid" } #proxmox-openid = { path = "../proxmox/proxmox-openid" }
#proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" } #proxmox-rest-server = { path = "../proxmox/proxmox-rest-server" }
#proxmox-router = { path = "../proxmox/proxmox-router" } #proxmox-router = { path = "../proxmox/proxmox-router" }
#proxmox-rrd = { path = "../proxmox/proxmox-rrd" }
#proxmox-schema = { path = "../proxmox/proxmox-schema" } #proxmox-schema = { path = "../proxmox/proxmox-schema" }
#proxmox-section-config = { path = "../proxmox/proxmox-section-config" } #proxmox-section-config = { path = "../proxmox/proxmox-section-config" }
#proxmox-serde = { path = "../proxmox/proxmox-serde" } #proxmox-serde = { path = "../proxmox/proxmox-serde" }
@ -271,7 +270,7 @@ proxmox-rrd.workspace = true
#proxmox-time = { path = "../proxmox/proxmox-time" } #proxmox-time = { path = "../proxmox/proxmox-time" }
#proxmox-uuid = { path = "../proxmox/proxmox-uuid" } #proxmox-uuid = { path = "../proxmox/proxmox-uuid" }
#proxmox-acme-rs = { path = "../proxmox-acme-rs" } #proxmox-acme = { path = "../proxmox/proxmox-acme" }
#pathpatterns = {path = "../pathpatterns" } #pathpatterns = {path = "../pathpatterns" }
#pxar = { path = "../pxar" } #pxar = { path = "../pxar" }

73
debian/changelog vendored
View File

@ -1,3 +1,76 @@
rust-proxmox-backup (3.1.4-1) bookworm; urgency=medium
* api: acme: skip serializing empty 'api' and 'data' option
* tape: fix regression in restoring an encryption key from medium, avoid
trying to load the key to the drive, which cannot work in this special
case.
-- Proxmox Support Team <support@proxmox.com> Thu, 01 Feb 2024 16:30:18 +0100
rust-proxmox-backup (3.1.3-1) bookworm; urgency=medium
* improve efficiency of detecting if a block device is a partition
* acme: api: add option for external account binding to account registration
endpoint
* ui: datastore summary handle non-existent 'avail' and 'used' status values
* tape: allow configuring the time out for "wait until ready" to better cope
with the long initialization duration that happens on the first use of
LTO 9+ tapes
* tape: improve error on decode element status page
* tape: improve LTO 9 compatibility
* fix #4904: tape changer: add option to explicitly eject the tape before
unloading it
* docs: tape: replace use of 'export-media' with correct 'export-media-set'
option
* docs: tape: add LTO 9 considerations
* fix #5117: ui: node info: avoid invalid array access for certain foreign
kernels
* d/control: explicitly depend on gdisk package to ensure it's available
when installing on top of a plain Debian installation
* tape: work around buggy changer implementations when reading the element
status
* system report: include prune.cfg
* fix #4315: jobs: modify GroupFilter so include/exclude is tracked
* ui: show if Filter includes or excludes
* datastore: add additional context for a parsing error when getting the
owner of a backup group
* api: tape: optionally accept uuid for destroying or moving a media, so
that one can uniquely identify existing tapes with duplicate labels.
* api: tape: don't allow duplicate media label-texts anymore
* ui: tape inventory: use uuid as id
* ui: tape: add button to remove a medium from the inventory, while not
touching the data
* api: custom certificate upload: make key optional and use the existing
key, if it's not specified.
* close #4819: ui: allow usernames shorter than 4 characters
* tape: rework on-drive encryption key handling and ensure this key does not
gets unloaded to early
-- Proxmox Support Team <support@proxmox.com> Mon, 22 Jan 2024 15:20:45 +0100
rust-proxmox-backup (3.1.2-1) bookworm; urgency=medium rust-proxmox-backup (3.1.2-1) bookworm; urgency=medium
* sync: fix recent regression with recursive remote sync * sync: fix recent regression with recursive remote sync

17
debian/control vendored
View File

@ -48,7 +48,7 @@ Build-Depends: bash-completion,
librust-pathpatterns-0.3+default-dev, librust-pathpatterns-0.3+default-dev,
librust-percent-encoding-2+default-dev (>= 2.1-~~), librust-percent-encoding-2+default-dev (>= 2.1-~~),
librust-pin-project-lite-0.2+default-dev, librust-pin-project-lite-0.2+default-dev,
librust-proxmox-acme-rs-0.4+default-dev, librust-proxmox-acme-0.5+default-dev,
librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~), librust-proxmox-apt-0.10+default-dev (>= 0.10.5-~~),
librust-proxmox-async-0.4+default-dev, librust-proxmox-async-0.4+default-dev,
librust-proxmox-auth-api-0.3+api-dev, librust-proxmox-auth-api-0.3+api-dev,
@ -79,6 +79,7 @@ Build-Depends: bash-completion,
librust-proxmox-router-2+cli-dev, librust-proxmox-router-2+cli-dev,
librust-proxmox-router-2+default-dev, librust-proxmox-router-2+default-dev,
librust-proxmox-router-2+server-dev, librust-proxmox-router-2+server-dev,
librust-proxmox-rrd-0.1+default-dev,
librust-proxmox-schema-2+api-macro-dev, librust-proxmox-schema-2+api-macro-dev,
librust-proxmox-schema-2+default-dev, librust-proxmox-schema-2+default-dev,
librust-proxmox-section-config-2+default-dev, librust-proxmox-section-config-2+default-dev,
@ -88,15 +89,15 @@ Build-Depends: bash-completion,
librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~), librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~),
librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~), librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~),
librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~), librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~),
librust-proxmox-sys-0.5+acl-dev (>= 0.5.2-~~), librust-proxmox-sys-0.5+acl-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+crypt-dev (>= 0.5.2-~~), librust-proxmox-sys-0.5+crypt-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+default-dev (>= 0.5.2-~~), librust-proxmox-sys-0.5+default-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.2-~~), librust-proxmox-sys-0.5+logrotate-dev (>= 0.5.3-~~),
librust-proxmox-sys-0.5+timer-dev (>= 0.5.2-~~), librust-proxmox-sys-0.5+timer-dev (>= 0.5.3-~~),
librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~), librust-proxmox-tfa-4+api-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~), librust-proxmox-tfa-4+api-types-dev (>= 4.0.4-~~),
librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~), librust-proxmox-tfa-4+default-dev (>= 4.0.4-~~),
librust-proxmox-time-1+default-dev (>= 1.1.2-~~), librust-proxmox-time-1+default-dev (>= 1.1.6-~~),
librust-proxmox-uuid-1+default-dev, librust-proxmox-uuid-1+default-dev,
librust-proxmox-uuid-1+serde-dev, librust-proxmox-uuid-1+serde-dev,
librust-pxar-0.10+default-dev (>= 0.10.2-~~), librust-pxar-0.10+default-dev (>= 0.10.2-~~),
@ -104,7 +105,6 @@ Build-Depends: bash-completion,
librust-rustyline-9+default-dev, librust-rustyline-9+default-dev,
librust-serde-1+default-dev, librust-serde-1+default-dev,
librust-serde-1+derive-dev, librust-serde-1+derive-dev,
librust-serde-cbor-0.11+default-dev (>= 0.11.1-~~),
librust-serde-json-1+default-dev, librust-serde-json-1+default-dev,
librust-serde-plain-1+default-dev, librust-serde-plain-1+default-dev,
librust-siphasher-0.3+default-dev, librust-siphasher-0.3+default-dev,
@ -163,6 +163,7 @@ Rules-Requires-Root: binary-targets
Package: proxmox-backup-server Package: proxmox-backup-server
Architecture: any Architecture: any
Depends: fonts-font-awesome, Depends: fonts-font-awesome,
gdisk,
libjs-extjs (>= 7~), libjs-extjs (>= 7~),
libjs-qrcodejs (>= 1.20201119), libjs-qrcodejs (>= 1.20201119),
libproxmox-acme-plugins, libproxmox-acme-plugins,

View File

@ -4,4 +4,5 @@ proxmox-backup-server: elevated-privileges 4755 root/root [usr/lib/x86_64-linux-
proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [lib/systemd/system/proxmox-backup-banner.service] proxmox-backup-server: systemd-service-file-refers-to-unusual-wantedby-target getty.target [lib/systemd/system/proxmox-backup-banner.service]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api] proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-api]
proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy] proxmox-backup-server: uses-dpkg-database-directly [usr/lib/x86_64-linux-gnu/proxmox-backup/proxmox-backup-proxy]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/pbs2to3]
proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/proxmox-backup-debug] proxmox-backup-server: uses-dpkg-database-directly [usr/sbin/proxmox-backup-debug]

View File

@ -116,6 +116,20 @@ of the specified criteria are synced. The available criteria are:
The same filter is applied to local groups, for handling of the The same filter is applied to local groups, for handling of the
``remove-vanished`` option. ``remove-vanished`` option.
A ``group-filter`` can be inverted by prepending ``exclude:`` to it.
* Regular expression example, excluding the match:
.. code-block:: console
# proxmox-backup-manager sync-job update ID --group-filter exclude:regex:'^vm/1\d{2,3}$'
For mixing include and exclude filter, following rules apply:
- no filters: all backup groups
- include: only those matching the include filters
- exclude: all but those matching the exclude filters
- both: those matching the include filters, but without those matching the exclude filters
.. note:: The ``protected`` flag of remote backup snapshots will not be synced. .. note:: The ``protected`` flag of remote backup snapshots will not be synced.
Namespace Support Namespace Support

View File

@ -98,6 +98,31 @@ so it takes 33 hours to read the 12TB needed to fill up an LTO-8 tape. If you wa
to write to your tape at full speed, please make sure that the source to write to your tape at full speed, please make sure that the source
datastore is able to deliver that performance (for example, by using SSDs). datastore is able to deliver that performance (for example, by using SSDs).
LTO-9+ considerations
~~~~~~~~~~~~~~~~~~~~~
Since LTO-9, it is necessary to initialize new media in your drives, this is
called `Media Optimization`. This usually takes between 40 and 120 minutes per
medium. It is recommended to initialize your media in this manner with the
tools provided by your hardware vendor of your drive or changer. Some tape
changers have a method to 'bulk' initialize your media.
Because of this, formatting tapes is handled differently in Proxmox Backup
Server to avoid re-optimizing on each format/labelling. If you want to format
your media for use with the Proxmox Backup Server the first time or after use
with another program, either use the functionality of your drive/changer, or
use the 'slow' format on the cli:
.. code-block:: console
# proxmox-tape format --drive your-drive --fast 0
This will completely remove all pre-existing data and trigger a `Media
Optimization` pass.
If you format a partitioned LTO-9 medium with the 'fast' method (the default or
by setting `--fast 1`), only the first partition will be formatted, so make
sure to use the 'slow' method.
Terminology Terminology
----------- -----------
@ -326,6 +351,25 @@ the status output:
│ slot │ 14 │ │ │ │ slot │ 14 │ │ │
└───────────────┴──────────┴────────────┴─────────────┘ └───────────────┴──────────┴────────────┴─────────────┘
Advanced options
^^^^^^^^^^^^^^^^
Since not all tape changer behave the same, there is sometimes the need
for configuring advanced options.
Currently there are the following:
* `eject-before-unload` : This is needed for some changers that require a tape
to be ejected before unloading from the drive.
You can set these options with `proxmox-tape` like this:
.. code-block:: console
# proxmox-tape changer update sl3 --eject-before-unload true
.. _tape_drive_config: .. _tape_drive_config:
Tape drives Tape drives
@ -515,8 +559,6 @@ a single media pool, so a job only uses tapes from that pool.
- Create a new set when the specified Calendar Event triggers. - Create a new set when the specified Calendar Event triggers.
.. _systemd.time manpage: https://manpages.debian.org/buster/systemd/systemd.time.7.en.html
This allows you to specify points in time by using systemd like This allows you to specify points in time by using systemd like
Calendar Event specifications (see `systemd.time manpage`_). Calendar Event specifications (see `systemd.time manpage`_).
@ -664,16 +706,16 @@ dust protection than inside a drive:
.. Note:: For failed jobs, the tape remains in the drive. .. Note:: For failed jobs, the tape remains in the drive.
For tape libraries, the ``export-media`` option moves all tapes from For tape libraries, the ``export-media-set`` option moves all tapes from
the media set to an export slot, making sure that the following backup the media set to an export slot, making sure that the following backup
cannot use the tapes. An operator can pick up those tapes and move them cannot use the tapes. An operator can pick up those tapes and move them
to a vault. to a vault.
.. code-block:: console .. code-block:: console
# proxmox-tape backup-job update job2 --export-media # proxmox-tape backup-job update job2 --export-media-set
.. Note:: The ``export-media`` option can be used to force the start .. Note:: The ``export-media-set`` option can be used to force the start
of a new media set, because tapes from the current set are no of a new media set, because tapes from the current set are no
longer online. longer online.

View File

@ -10,9 +10,9 @@ use proxmox_schema::{
}; };
use crate::{ use crate::{
Authid, CryptMode, Fingerprint, MaintenanceMode, Userid, DATASTORE_NOTIFY_STRING_SCHEMA, Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid,
GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
SINGLE_LINE_COMMENT_SCHEMA, UPID, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, UPID,
}; };
const_regex! { const_regex! {
@ -843,19 +843,37 @@ impl BackupGroup {
} }
pub fn matches(&self, filter: &crate::GroupFilter) -> bool { pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
use crate::GroupFilter; use crate::FilterType;
match &filter.filter_type {
match filter { FilterType::Group(backup_group) => {
GroupFilter::Group(backup_group) => {
match backup_group.parse::<BackupGroup>() { match backup_group.parse::<BackupGroup>() {
Ok(group) => *self == group, Ok(group) => *self == group,
Err(_) => false, // shouldn't happen if value is schema-checked Err(_) => false, // shouldn't happen if value is schema-checked
} }
} }
GroupFilter::BackupType(ty) => self.ty == *ty, FilterType::BackupType(ty) => self.ty == *ty,
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()), FilterType::Regex(regex) => regex.is_match(&self.to_string()),
} }
} }
pub fn apply_filters(&self, filters: &[GroupFilter]) -> bool {
// since there will only be view filter in the list, an extra iteration to get the umber of
// include filter should not be an issue
let is_included = if filters.iter().filter(|f| !f.is_exclude).count() == 0 {
true
} else {
filters
.iter()
.filter(|f| !f.is_exclude)
.any(|filter| self.matches(filter))
};
is_included
&& !filters
.iter()
.filter(|f| f.is_exclude)
.any(|filter| self.matches(filter))
}
} }
impl AsRef<BackupGroup> for BackupGroup { impl AsRef<BackupGroup> for BackupGroup {
@ -1302,12 +1320,15 @@ pub struct DataStoreStatus {
/// Status of a Datastore /// Status of a Datastore
pub struct DataStoreStatusListItem { pub struct DataStoreStatusListItem {
pub store: String, pub store: String,
/// The Size of the underlying storage in bytes. (-1 on error) /// The Size of the underlying storage in bytes.
pub total: i64, #[serde(skip_serializing_if = "Option::is_none")]
/// The used bytes of the underlying storage. (-1 on error) pub total: Option<u64>,
pub used: i64, /// The used bytes of the underlying storage.
#[serde(skip_serializing_if = "Option::is_none")]
pub used: Option<u64>,
/// The available bytes of the underlying storage. (-1 on error) /// The available bytes of the underlying storage. (-1 on error)
pub avail: i64, #[serde(skip_serializing_if = "Option::is_none")]
pub avail: Option<u64>,
/// A list of usages of the past (last Month). /// A list of usages of the past (last Month).
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub history: Option<Vec<Option<f64>>>, pub history: Option<Vec<Option<f64>>>,
@ -1335,9 +1356,9 @@ impl DataStoreStatusListItem {
pub fn empty(store: &str, err: Option<String>) -> Self { pub fn empty(store: &str, err: Option<String>) -> Self {
DataStoreStatusListItem { DataStoreStatusListItem {
store: store.to_owned(), store: store.to_owned(),
total: -1, total: None,
used: -1, used: None,
avail: -1, avail: None,
history: None, history: None,
history_start: None, history_start: None,
history_delta: None, history_delta: None,

View File

@ -1,6 +1,6 @@
use anyhow::format_err;
use std::str::FromStr; use std::str::FromStr;
use anyhow::bail;
use regex::Regex; use regex::Regex;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -388,7 +388,7 @@ pub struct TapeBackupJobStatus {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
/// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`. /// Filter for matching `BackupGroup`s, for use with `BackupGroup::filter`.
pub enum GroupFilter { pub enum FilterType {
/// BackupGroup type - either `vm`, `ct`, or `host`. /// BackupGroup type - either `vm`, `ct`, or `host`.
BackupType(BackupType), BackupType(BackupType),
/// Full identifier of BackupGroup, including type /// Full identifier of BackupGroup, including type
@ -397,7 +397,7 @@ pub enum GroupFilter {
Regex(Regex), Regex(Regex),
} }
impl PartialEq for GroupFilter { impl PartialEq for FilterType {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
match (self, other) { match (self, other) {
(Self::BackupType(a), Self::BackupType(b)) => a == b, (Self::BackupType(a), Self::BackupType(b)) => a == b,
@ -408,28 +408,69 @@ impl PartialEq for GroupFilter {
} }
} }
impl std::str::FromStr for FilterType {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s.split_once(':') {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| FilterType::Group(value.to_string()))?,
Some(("type", value)) => FilterType::BackupType(value.parse()?),
Some(("regex", value)) => FilterType::Regex(Regex::new(value)?),
Some((ty, _value)) => bail!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty),
None => bail!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'"),
})
}
}
// used for serializing below, caution!
impl std::fmt::Display for FilterType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FilterType::BackupType(backup_type) => write!(f, "type:{}", backup_type),
FilterType::Group(backup_group) => write!(f, "group:{}", backup_group),
FilterType::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
}
}
}
#[derive(Clone, Debug)]
pub struct GroupFilter {
pub is_exclude: bool,
pub filter_type: FilterType,
}
impl PartialEq for GroupFilter {
fn eq(&self, other: &Self) -> bool {
self.filter_type == other.filter_type && self.is_exclude == other.is_exclude
}
}
impl Eq for GroupFilter {}
impl std::str::FromStr for GroupFilter { impl std::str::FromStr for GroupFilter {
type Err = anyhow::Error; type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(':') { let (is_exclude, type_str) = match s.split_once(':') {
Some(("group", value)) => BACKUP_GROUP_SCHEMA.parse_simple_value(value).map(|_| GroupFilter::Group(value.to_string())), Some(("include", value)) => (false, value),
Some(("type", value)) => Ok(GroupFilter::BackupType(value.parse()?)), Some(("exclude", value)) => (true, value),
Some(("regex", value)) => Ok(GroupFilter::Regex(Regex::new(value)?)), _ => (false, s),
Some((ty, _value)) => Err(format_err!("expected 'group', 'type' or 'regex' prefix, got '{}'", ty)), };
None => Err(format_err!("input doesn't match expected format '<group:GROUP||type:<vm|ct|host>|regex:REGEX>'")),
}.map_err(|err| format_err!("'{}' - {}", s, err)) Ok(GroupFilter {
is_exclude,
filter_type: type_str.parse()?,
})
} }
} }
// used for serializing below, caution! // used for serializing below, caution!
impl std::fmt::Display for GroupFilter { impl std::fmt::Display for GroupFilter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { if self.is_exclude {
GroupFilter::BackupType(backup_type) => write!(f, "type:{}", backup_type), f.write_str("exclude:")?;
GroupFilter::Group(backup_group) => write!(f, "group:{}", backup_group),
GroupFilter::Regex(regex) => write!(f, "regex:{}", regex.as_str()),
} }
std::fmt::Display::fmt(&self.filter_type, f)
} }
} }
@ -441,9 +482,9 @@ fn verify_group_filter(input: &str) -> Result<(), anyhow::Error> {
} }
pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new( pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
"Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE').") "Group filter based on group identifier ('group:GROUP'), group type ('type:<vm|ct|host>'), or regex ('regex:RE'). Can be inverted by prepending 'exclude:'.")
.format(&ApiStringFormat::VerifyFn(verify_group_filter)) .format(&ApiStringFormat::VerifyFn(verify_group_filter))
.type_text("<type:<vm|ct|host>|group:GROUP|regex:RE>") .type_text("[<exclude:|include:>]<type:<vm|ct|host>|group:GROUP|regex:RE>")
.schema(); .schema();
pub const GROUP_FILTER_LIST_SCHEMA: Schema = pub const GROUP_FILTER_LIST_SCHEMA: Schema =

View File

@ -79,7 +79,7 @@ pub struct RemoteConfig {
pub struct Remote { pub struct Remote {
pub name: String, pub name: String,
// Note: The stored password is base64 encoded // Note: The stored password is base64 encoded
#[serde(skip_serializing_if = "String::is_empty")] #[serde(default, skip_serializing_if = "String::is_empty")]
#[serde(with = "proxmox_serde::string_as_base64")] #[serde(with = "proxmox_serde::string_as_base64")]
pub password: String, pub password: String,
#[serde(flatten)] #[serde(flatten)]

View File

@ -51,6 +51,10 @@ Import/Export, i.e. any media in those slots are considered to be
schema: EXPORT_SLOT_LIST_SCHEMA, schema: EXPORT_SLOT_LIST_SCHEMA,
optional: true, optional: true,
}, },
"eject-before-unload": {
optional: true,
default: false,
}
}, },
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize, Deserialize, Updater)]
@ -62,6 +66,9 @@ pub struct ScsiTapeChanger {
pub path: String, pub path: String,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub export_slots: Option<String>, pub export_slots: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
/// if set to true, tapes are ejected manually before unloading
pub eject_before_unload: Option<bool>,
} }
#[api( #[api(

View File

@ -59,7 +59,7 @@ pub struct VirtualTapeDrive {
}, },
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize, Deserialize, Updater, Clone)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Lto SCSI tape driver /// Lto SCSI tape driver
pub struct LtoTapeDrive { pub struct LtoTapeDrive {
@ -108,7 +108,7 @@ pub struct MamAttribute {
} }
#[api()] #[api()]
#[derive(Serialize, Deserialize, Copy, Clone, Debug)] #[derive(Serialize, Deserialize, Copy, Clone, Debug, PartialOrd, PartialEq)]
pub enum TapeDensity { pub enum TapeDensity {
/// Unknown (no media loaded) /// Unknown (no media loaded)
Unknown, Unknown,

View File

@ -0,0 +1,76 @@
use pbs_api_types::{BackupGroup, BackupType, GroupFilter};
use std::str::FromStr;
#[test]
fn test_no_filters() {
let group_filters = vec![];
let do_backup = [
"vm/101", "vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_filters() {
let group_filters = vec![GroupFilter::from_str("regex:.*10[2-8]").unwrap()];
let do_backup = [
"vm/102", "vm/103", "vm/104", "vm/105", "vm/106", "vm/107", "vm/108",
];
let dont_backup = ["vm/101", "vm/109"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108", "vm/109"];
let dont_backup = ["vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107"];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}
#[test]
fn test_include_and_exclude_filters() {
let group_filters = [
GroupFilter::from_str("exclude:regex:.*10[1-3]").unwrap(),
GroupFilter::from_str("regex:.*10[2-8]").unwrap(),
GroupFilter::from_str("exclude:regex:.*10[5-7]").unwrap(),
];
let do_backup = ["vm/104", "vm/108"];
let dont_backup = [
"vm/101", "vm/102", "vm/103", "vm/105", "vm/106", "vm/107", "vm/109",
];
for id in do_backup {
assert!(BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
for id in dont_backup {
assert!(!BackupGroup::new(BackupType::Vm, id).apply_filters(&group_filters));
}
}

View File

@ -304,68 +304,6 @@ async fn restore_command(target: String, pattern: Option<String>) -> Result<(),
/// The `Path` type's component iterator does not tell us anything about trailing slashes or /// The `Path` type's component iterator does not tell us anything about trailing slashes or
/// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own /// trailing `Component::CurDir` entries. Since we only support regular paths we'll roll our own
/// here: /// here:
enum PathComponent<'a> {
Root,
CurDir,
ParentDir,
Normal(&'a OsStr),
TrailingSlash,
}
struct PathComponentIter<'a> {
path: &'a [u8],
state: u8, // 0=beginning, 1=ongoing, 2=trailing, 3=finished (fused)
}
impl std::iter::FusedIterator for PathComponentIter<'_> {}
impl<'a> Iterator for PathComponentIter<'a> {
type Item = PathComponent<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.path.is_empty() {
return None;
}
if self.state == 0 {
self.state = 1;
if self.path[0] == b'/' {
// absolute path
self.path = &self.path[1..];
return Some(PathComponent::Root);
}
}
// skip slashes
let had_slashes = self.path[0] == b'/';
while self.path.first().copied() == Some(b'/') {
self.path = &self.path[1..];
}
Some(match self.path {
[] if had_slashes => PathComponent::TrailingSlash,
[] => return None,
[b'.'] | [b'.', b'/', ..] => {
self.path = &self.path[1..];
PathComponent::CurDir
}
[b'.', b'.'] | [b'.', b'.', b'/', ..] => {
self.path = &self.path[2..];
PathComponent::ParentDir
}
_ => {
let end = self
.path
.iter()
.position(|&b| b == b'/')
.unwrap_or(self.path.len());
let (out, rest) = self.path.split_at(end);
self.path = rest;
PathComponent::Normal(OsStr::from_bytes(out))
}
})
}
}
pub struct Shell { pub struct Shell {
/// Readline instance handling input and callbacks /// Readline instance handling input and callbacks

View File

@ -44,12 +44,14 @@ pub fn backup_group() -> Result<nix::unistd::Group, Error> {
} }
} }
pub struct BackupLockGuard(Option<std::fs::File>); pub struct BackupLockGuard {
_file: Option<std::fs::File>,
}
#[doc(hidden)] #[doc(hidden)]
/// Note: do not use for production code, this is only intended for tests /// Note: do not use for production code, this is only intended for tests
pub unsafe fn create_mocked_lock() -> BackupLockGuard { pub unsafe fn create_mocked_lock() -> BackupLockGuard {
BackupLockGuard(None) BackupLockGuard { _file: None }
} }
/// Open or create a lock file owned by user "backup" and lock it. /// Open or create a lock file owned by user "backup" and lock it.
@ -73,7 +75,7 @@ pub fn open_backup_lockfile<P: AsRef<std::path::Path>>(
let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0)); let timeout = timeout.unwrap_or(std::time::Duration::new(10, 0));
let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?; let file = proxmox_sys::fs::open_file_locked(&path, timeout, exclusive, options)?;
Ok(BackupLockGuard(Some(file))) Ok(BackupLockGuard { _file: Some(file) })
} }
/// Atomically write data to file owned by "root:backup" with permission "0640" /// Atomically write data to file owned by "root:backup" with permission "0640"

View File

@ -602,7 +602,10 @@ impl DataStore {
) -> Result<Authid, Error> { ) -> Result<Authid, Error> {
let full_path = self.owner_path(ns, backup_group); let full_path = self.owner_path(ns, backup_group);
let owner = proxmox_sys::fs::file_read_firstline(full_path)?; let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
owner.trim_end().parse() // remove trailing newline owner
.trim_end() // remove trailing newline
.parse()
.map_err(|err| format_err!("parsing owner for {backup_group} failed: {err}"))
} }
pub fn owns_backup( pub fn owns_backup(

View File

@ -14,6 +14,7 @@ lazy_static.workspace = true
libc.workspace = true libc.workspace = true
log.workspace = true log.workspace = true
nix.workspace = true nix.workspace = true
openssl.workspace = true
regex.workspace = true regex.workspace = true
serde.workspace = true serde.workspace = true
serde_json.workspace = true serde_json.workspace = true

View File

@ -247,7 +247,7 @@ pub fn transfer_medium<F: AsRawFd>(
Ok(()) Ok(())
} }
#[derive(Clone, Copy)] #[derive(Clone, Copy, Debug)]
enum ElementType { enum ElementType {
MediumTransport, MediumTransport,
Storage, Storage,
@ -326,7 +326,9 @@ fn get_element<F: AsRawFd>(
let data = execute_scsi_command(sg_raw, &cmd, "read element status (B8h)", retry)?; let data = execute_scsi_command(sg_raw, &cmd, "read element status (B8h)", retry)?;
let page = decode_element_status_page(&data, start_element_address)?; let page = decode_element_status_page(&data, start_element_address).map_err(|err| {
format_err!("decode element status for {element_type:?} on {start_element_address} failed - {err}")
})?;
retry = false; // only retry the first command retry = false; // only retry the first command
@ -367,7 +369,7 @@ pub fn read_element_status<F: AsRawFd>(file: &mut F) -> Result<MtxStatus, Error>
// first, request address assignment (used for sanity checks) // first, request address assignment (used for sanity checks)
let setup = read_element_address_assignment(file)?; let setup = read_element_address_assignment(file)?;
let allocation_len: u32 = 0x10000; let allocation_len: u32 = 0xFFFF; // some changer only use the lower 2 bytes
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?; let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT); sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
@ -679,149 +681,146 @@ fn decode_element_status_page(
data: &[u8], data: &[u8],
start_element_address: u16, start_element_address: u16,
) -> Result<DecodedStatusPage, Error> { ) -> Result<DecodedStatusPage, Error> {
proxmox_lang::try_block!({ let mut result = DecodedStatusPage {
let mut result = DecodedStatusPage { last_element_address: None,
last_element_address: None, transports: Vec::new(),
transports: Vec::new(), drives: Vec::new(),
drives: Vec::new(), storage_slots: Vec::new(),
storage_slots: Vec::new(), import_export_slots: Vec::new(),
import_export_slots: Vec::new(), };
};
let mut reader = data; let mut reader = data;
let head: ElementStatusHeader = unsafe { reader.read_be_value()? }; let head: ElementStatusHeader = unsafe { reader.read_be_value()? };
if head.number_of_elements_available == 0 { if head.number_of_elements_available == 0 {
return Ok(result); return Ok(result);
}
if head.first_element_address_reported < start_element_address {
bail!("got wrong first_element_address_reported"); // sanity check
}
let len = head.byte_count_of_report_available;
let len = ((len[0] as usize) << 16) + ((len[1] as usize) << 8) + (len[2] as usize);
use std::cmp::Ordering;
match len.cmp(&reader.len()) {
Ordering::Less => reader = &reader[..len],
Ordering::Greater => bail!(
"wrong amount of data: expected {}, got {}",
len,
reader.len()
),
_ => (),
}
loop {
if reader.is_empty() {
break;
} }
if head.first_element_address_reported < start_element_address { let subhead: SubHeader = unsafe { reader.read_be_value()? };
bail!("got wrong first_element_address_reported"); // sanity check
let len = subhead.byte_count_of_descriptor_data_available;
let mut len = ((len[0] as usize) << 16) + ((len[1] as usize) << 8) + (len[2] as usize);
if len > reader.len() {
len = reader.len();
} }
let len = head.byte_count_of_report_available; let descr_data = reader.read_exact_allocated(len)?;
let len = ((len[0] as usize) << 16) + ((len[1] as usize) << 8) + (len[2] as usize);
use std::cmp::Ordering; let descr_len = subhead.descriptor_length as usize;
match len.cmp(&reader.len()) {
Ordering::Less => reader = &reader[..len], if descr_len == 0 {
Ordering::Greater => bail!( bail!("got elements, but descriptor length 0");
"wrong amount of data: expected {}, got {}",
len,
reader.len()
),
_ => (),
} }
loop { for descriptor in descr_data.chunks_exact(descr_len) {
if reader.is_empty() { let mut reader = descriptor;
break;
}
let subhead: SubHeader = unsafe { reader.read_be_value()? }; match subhead.element_type_code {
1 => {
let desc: TransportDescriptor = unsafe { reader.read_be_value()? };
let len = subhead.byte_count_of_descriptor_data_available; let full = (desc.flags1 & 1) != 0;
let mut len = ((len[0] as usize) << 16) + ((len[1] as usize) << 8) + (len[2] as usize); let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
if len > reader.len() {
len = reader.len();
}
let descr_data = reader.read_exact_allocated(len)?; subhead.skip_alternate_volume_tag(&mut reader)?;
let descr_len = subhead.descriptor_length as usize; result.last_element_address = Some(desc.element_address);
if descr_len == 0 { let status = TransportElementStatus {
bail!("got elements, but descriptor length 0"); status: create_element_status(full, volume_tag),
} element_address: desc.element_address,
};
for descriptor in descr_data.chunks_exact(descr_len) { result.transports.push(status);
let mut reader = descriptor;
match subhead.element_type_code {
1 => {
let desc: TransportDescriptor = unsafe { reader.read_be_value()? };
let full = (desc.flags1 & 1) != 0;
let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
subhead.skip_alternate_volume_tag(&mut reader)?;
result.last_element_address = Some(desc.element_address);
let status = TransportElementStatus {
status: create_element_status(full, volume_tag),
element_address: desc.element_address,
};
result.transports.push(status);
}
2 | 3 => {
let desc: StorageDescriptor = unsafe { reader.read_be_value()? };
let full = (desc.flags1 & 1) != 0;
let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
subhead.skip_alternate_volume_tag(&mut reader)?;
result.last_element_address = Some(desc.element_address);
if subhead.element_type_code == 3 {
let status = StorageElementStatus {
import_export: true,
status: create_element_status(full, volume_tag),
element_address: desc.element_address,
};
result.import_export_slots.push(status);
} else {
let status = StorageElementStatus {
import_export: false,
status: create_element_status(full, volume_tag),
element_address: desc.element_address,
};
result.storage_slots.push(status);
}
}
4 => {
let desc: TransferDescriptor = unsafe { reader.read_be_value()? };
let loaded_slot = if (desc.flags2 & 128) != 0 {
// SValid
Some(desc.source_storage_element_address as u64)
} else {
None
};
let full = (desc.flags1 & 1) != 0;
let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
subhead.skip_alternate_volume_tag(&mut reader)?;
let dvcid = decode_dvcid_info(&mut reader).unwrap_or(DvcidInfo {
vendor: None,
model: None,
serial: None,
});
result.last_element_address = Some(desc.element_address);
let drive = DriveStatus {
loaded_slot,
status: create_element_status(full, volume_tag),
drive_serial_number: dvcid.serial,
vendor: dvcid.vendor,
model: dvcid.model,
element_address: desc.element_address,
};
result.drives.push(drive);
}
code => bail!("got unknown element type code {}", code),
} }
2 | 3 => {
let desc: StorageDescriptor = unsafe { reader.read_be_value()? };
let full = (desc.flags1 & 1) != 0;
let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
subhead.skip_alternate_volume_tag(&mut reader)?;
result.last_element_address = Some(desc.element_address);
if subhead.element_type_code == 3 {
let status = StorageElementStatus {
import_export: true,
status: create_element_status(full, volume_tag),
element_address: desc.element_address,
};
result.import_export_slots.push(status);
} else {
let status = StorageElementStatus {
import_export: false,
status: create_element_status(full, volume_tag),
element_address: desc.element_address,
};
result.storage_slots.push(status);
}
}
4 => {
let desc: TransferDescriptor = unsafe { reader.read_be_value()? };
let loaded_slot = if (desc.flags2 & 128) != 0 {
// SValid
Some(desc.source_storage_element_address as u64)
} else {
None
};
let full = (desc.flags1 & 1) != 0;
let volume_tag = subhead.parse_optional_volume_tag(&mut reader, full)?;
subhead.skip_alternate_volume_tag(&mut reader)?;
let dvcid = decode_dvcid_info(&mut reader).unwrap_or(DvcidInfo {
vendor: None,
model: None,
serial: None,
});
result.last_element_address = Some(desc.element_address);
let drive = DriveStatus {
loaded_slot,
status: create_element_status(full, volume_tag),
drive_serial_number: dvcid.serial,
vendor: dvcid.vendor,
model: dvcid.model,
element_address: desc.element_address,
};
result.drives.push(drive);
}
code => bail!("got unknown element type code {}", code),
} }
} }
}
Ok(result) Ok(result)
})
.map_err(|err: Error| format_err!("decode element status failed - {}", err))
} }
/// Open the device for read/write, returns the file handle /// Open the device for read/write, returns the file handle

View File

@ -9,9 +9,10 @@ use endian_trait::Endian;
use nix::fcntl::{fcntl, FcntlArg, OFlag}; use nix::fcntl::{fcntl, FcntlArg, OFlag};
mod encryption; mod encryption;
pub use encryption::*; pub use encryption::{drive_get_encryption, drive_set_encryption};
mod volume_statistics; mod volume_statistics;
use proxmox_uuid::Uuid;
pub use volume_statistics::*; pub use volume_statistics::*;
mod tape_alert_flags; mod tape_alert_flags;
@ -26,8 +27,11 @@ pub use report_density::*;
use proxmox_io::{ReadExt, WriteExt}; use proxmox_io::{ReadExt, WriteExt};
use proxmox_sys::error::SysResult; use proxmox_sys::error::SysResult;
use pbs_api_types::{Lp17VolumeStatistics, LtoDriveAndMediaStatus, MamAttribute}; use pbs_api_types::{
Lp17VolumeStatistics, LtoDriveAndMediaStatus, LtoTapeDrive, MamAttribute, TapeDensity,
};
use crate::linux_list_drives::open_lto_tape_device;
use crate::{ use crate::{
sgutils2::{ sgutils2::{
alloc_page_aligned_buffer, scsi_cmd_mode_select10, scsi_cmd_mode_select6, scsi_inquiry, alloc_page_aligned_buffer, scsi_cmd_mode_select10, scsi_cmd_mode_select6, scsi_inquiry,
@ -102,7 +106,6 @@ pub struct SgTape {
file: File, file: File,
locate_offset: Option<i64>, locate_offset: Option<i64>,
info: InquiryInfo, info: InquiryInfo,
encryption_key_loaded: bool,
} }
impl SgTape { impl SgTape {
@ -124,11 +127,47 @@ impl SgTape {
Ok(Self { Ok(Self {
file, file,
info, info,
encryption_key_loaded: false,
locate_offset: None, locate_offset: None,
}) })
} }
/// Open a tape device
///
/// This does additional checks:
///
/// - check if it is a non-rewinding tape device
/// - check if drive is ready (tape loaded)
/// - check block size
/// - for autoloader only, try to reload ejected tapes
pub fn open_lto_drive(config: &LtoTapeDrive) -> Result<Self, Error> {
proxmox_lang::try_block!({
let file = open_lto_tape_device(&config.path)?;
let mut handle = SgTape::new(file)?;
if handle.test_unit_ready().is_err() {
// for autoloader only, try to reload ejected tapes
if config.changer.is_some() {
let _ = handle.load(); // just try, ignore error
}
}
handle.wait_until_ready(None)?;
handle.set_default_options()?;
Ok(handle)
})
.map_err(|err: Error| {
format_err!(
"open drive '{}' ({}) failed - {}",
config.name,
config.path,
err
)
})
}
/// Access to file descriptor - useful for testing /// Access to file descriptor - useful for testing
pub fn file_mut(&mut self) -> &mut File { pub fn file_mut(&mut self) -> &mut File {
&mut self.file &mut self.file
@ -197,16 +236,17 @@ impl SgTape {
/// Format media, single partition /// Format media, single partition
pub fn format_media(&mut self, fast: bool) -> Result<(), Error> { pub fn format_media(&mut self, fast: bool) -> Result<(), Error> {
// try to get info about loaded media first // try to get info about loaded media first
let (has_format, is_worm) = match self.read_medium_configuration_page() { let (density, is_worm) = match self.read_medium_configuration_page() {
Ok((_head, block_descriptor, page)) => { Ok((_head, block_descriptor, page)) => {
// FORMAT requires LTO5 or newer // FORMAT requires LTO5 or newer
let has_format = block_descriptor.density_code >= 0x58; let density: TapeDensity = TapeDensity::try_from(block_descriptor.density_code)
.unwrap_or(TapeDensity::Unknown);
let is_worm = page.is_worm(); let is_worm = page.is_worm();
(has_format, is_worm) (density, is_worm)
} }
Err(_) => { Err(_) => {
// LTO3 and older do not support medium configuration mode page // LTO3 and older do not support medium configuration mode page
(false, false) (TapeDensity::Unknown, false)
} }
}; };
@ -227,14 +267,21 @@ impl SgTape {
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT); sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new(); let mut cmd = Vec::new();
if has_format { if density >= TapeDensity::LTO5 && density <= TapeDensity::LTO8 {
cmd.extend([0x04, 0, 0, 0, 0, 0]); // FORMAT cmd.extend([0x04, 0, 0, 0, 0, 0]); // FORMAT
sg_raw.do_command(&cmd)?; sg_raw.do_command(&cmd)?;
if !fast { if !fast {
self.erase_media(false)?; // overwrite everything self.erase_media(false)?; // overwrite everything
} }
} else if density >= TapeDensity::LTO9 && !fast {
cmd.extend([0x04, 0x01, 0, 0, 0, 0]); // FORMAT, set IMMED
sg_raw.do_command(&cmd)?;
self.wait_until_ready(Some(60 * 60 * 2)) // 2 hours, max. initialization time
.map_err(|err| format_err!("error waiting for LTO9+ initialization: {err}"))?;
self.erase_media(false)?; // overwrite everything
} else { } else {
// try rewind/erase instead // try rewind/erase instead
// we also do this for LTO9+ to avoid reinitialization on FORMAT(04h)
self.erase_media(fast)? self.erase_media(fast)?
} }
@ -571,9 +618,10 @@ impl SgTape {
} }
} }
pub fn wait_until_ready(&mut self) -> Result<(), Error> { pub fn wait_until_ready(&mut self, timeout: Option<u64>) -> Result<(), Error> {
let start = SystemTime::now(); let start = SystemTime::now();
let max_wait = std::time::Duration::new(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64, 0); let timeout = timeout.unwrap_or(Self::SCSI_TAPE_DEFAULT_TIMEOUT as u64);
let max_wait = std::time::Duration::new(timeout, 0);
loop { loop {
match self.test_unit_ready() { match self.test_unit_ready() {
@ -603,10 +651,28 @@ impl SgTape {
read_volume_statistics(&mut self.file) read_volume_statistics(&mut self.file)
} }
pub fn set_encryption(&mut self, key: Option<[u8; 32]>) -> Result<(), Error> { pub fn set_encryption(&mut self, key_data: Option<([u8; 32], Uuid)>) -> Result<(), Error> {
self.encryption_key_loaded = key.is_some(); let key = if let Some((ref key, ref uuid)) = key_data {
// derive specialized key for each media-set
set_encryption(&mut self.file, key) let mut tape_key = [0u8; 32];
let uuid_bytes: [u8; 16] = *uuid.as_bytes();
openssl::pkcs5::pbkdf2_hmac(
key,
&uuid_bytes,
10,
openssl::hash::MessageDigest::sha256(),
&mut tape_key,
)?;
Some(tape_key)
} else {
None
};
drive_set_encryption(&mut self.file, key)
} }
// Note: use alloc_page_aligned_buffer to alloc data transfer buffer // Note: use alloc_page_aligned_buffer to alloc data transfer buffer
@ -960,15 +1026,6 @@ impl SgTape {
} }
} }
impl Drop for SgTape {
fn drop(&mut self) {
// For security reasons, clear the encryption key
if self.encryption_key_loaded {
let _ = self.set_encryption(None);
}
}
}
pub struct SgTapeReader<'a> { pub struct SgTapeReader<'a> {
sg_tape: &'a mut SgTape, sg_tape: &'a mut SgTape,
end_of_file: bool, end_of_file: bool,

View File

@ -8,21 +8,10 @@ use proxmox_io::{ReadExt, WriteExt};
use crate::sgutils2::{alloc_page_aligned_buffer, SgRaw}; use crate::sgutils2::{alloc_page_aligned_buffer, SgRaw};
/// Test if drive supports hardware encryption
///
/// We search for AES_GCM algorithm with 256bits key.
pub fn has_encryption<F: AsRawFd>(file: &mut F) -> bool {
let data = match sg_spin_data_encryption_caps(file) {
Ok(data) => data,
Err(_) => return false,
};
decode_spin_data_encryption_caps(&data).is_ok()
}
/// Set or clear encryption key /// Set or clear encryption key
/// ///
/// We always use mixed mode, /// We always use mixed mode,
pub fn set_encryption<F: AsRawFd>(file: &mut F, key: Option<[u8; 32]>) -> Result<(), Error> { pub fn drive_set_encryption<F: AsRawFd>(file: &mut F, key: Option<[u8; 32]>) -> Result<(), Error> {
let data = match sg_spin_data_encryption_caps(file) { let data = match sg_spin_data_encryption_caps(file) {
Ok(data) => data, Ok(data) => data,
Err(_) if key.is_none() => { Err(_) if key.is_none() => {
@ -57,6 +46,27 @@ pub fn set_encryption<F: AsRawFd>(file: &mut F, key: Option<[u8; 32]>) -> Result
bail!("got unexpected encryption mode {:?}", status.mode); bail!("got unexpected encryption mode {:?}", status.mode);
} }
/// Returns if encryption is enabled on the drive
pub fn drive_get_encryption<F: AsRawFd>(file: &mut F) -> Result<bool, Error> {
let data = match sg_spin_data_encryption_status(file) {
Ok(data) => data,
Err(_) => {
// Assume device does not support HW encryption
return Ok(false);
}
};
let status = decode_spin_data_encryption_status(&data)?;
match status.mode {
// these three below have all encryption enabled, and only differ in how decryption is
// handled
DataEncryptionMode::On => Ok(true),
DataEncryptionMode::Mixed => Ok(true),
DataEncryptionMode::RawRead => Ok(true),
// currently, the mode below is the only one that has encryption actually disabled
DataEncryptionMode::Off => Ok(false),
}
}
#[derive(Endian)] #[derive(Endian)]
#[repr(C, packed)] #[repr(C, packed)]
struct SspSetDataEncryptionPage { struct SspSetDataEncryptionPage {
@ -187,7 +197,7 @@ fn sg_spin_data_encryption_caps<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Err
.map(|v| v.to_vec()) .map(|v| v.to_vec())
} }
#[derive(Debug)] #[derive(Debug, PartialEq, Eq)]
enum DataEncryptionMode { enum DataEncryptionMode {
On, On,
Mixed, Mixed,

View File

@ -1,25 +0,0 @@
[package]
name = "proxmox-rrd"
version = "0.1.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
description = "Simple RRD database implementation."
[dev-dependencies]
proxmox-router = { workspace = true, features = ["cli", "server"] }
[dependencies]
anyhow.workspace = true
bitflags.workspace = true
crossbeam-channel.workspace = true
libc.workspace = true
log.workspace = true
nix.workspace = true
serde.workspace = true
serde_cbor.workspace = true
serde_json.workspace = true
proxmox-schema = { workspace = true, features = [ "api-macro" ] }
proxmox-sys.workspace = true
proxmox-time.workspace = true

View File

@ -1,390 +0,0 @@
//! RRD toolkit - create/manage/update proxmox RRD (v2) file
use std::path::PathBuf;
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
use serde_json::json;
use proxmox_router::cli::{
complete_file_name, run_cli_command, CliCommand, CliCommandMap, CliEnvironment,
};
use proxmox_router::RpcEnvironment;
use proxmox_schema::{api, ApiStringFormat, ApiType, IntegerSchema, Schema, StringSchema};
use proxmox_sys::fs::CreateOptions;
use proxmox_rrd::rrd::{CF, DST, RRA, RRD};
pub const RRA_INDEX_SCHEMA: Schema = IntegerSchema::new("Index of the RRA.").minimum(0).schema();
pub const RRA_CONFIG_STRING_SCHEMA: Schema = StringSchema::new("RRA configuration")
.format(&ApiStringFormat::PropertyString(&RRAConfig::API_SCHEMA))
.schema();
#[api(
properties: {},
default_key: "cf",
)]
#[derive(Debug, Serialize, Deserialize)]
/// RRA configuration
pub struct RRAConfig {
/// Time resolution
pub r: u64,
pub cf: CF,
/// Number of data points
pub n: u64,
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
},
},
)]
/// Dump the RRD file in JSON format
pub fn dump_rrd(path: String) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
serde_json::to_writer_pretty(std::io::stdout(), &rrd)?;
println!();
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
},
},
)]
/// RRD file information
pub fn rrd_info(path: String) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
println!("DST: {:?}", rrd.source.dst);
for (i, rra) in rrd.rra_list.iter().enumerate() {
// use RRAConfig property string format
println!(
"RRA[{}]: {:?},r={},n={}",
i,
rra.cf,
rra.resolution,
rra.data.len()
);
}
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
time: {
description: "Update time.",
optional: true,
},
value: {
description: "Update value.",
},
},
},
)]
/// Update the RRD database
pub fn update_rrd(path: String, time: Option<u64>, value: f64) -> Result<(), Error> {
let path = PathBuf::from(path);
let time = time
.map(|v| v as f64)
.unwrap_or_else(proxmox_time::epoch_f64);
let mut rrd = RRD::load(&path, false)?;
rrd.update(time, value);
rrd.save(&path, CreateOptions::new(), false)?;
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
cf: {
type: CF,
},
resolution: {
description: "Time resolution",
},
start: {
description: "Start time. If not specified, we simply extract 10 data points.",
optional: true,
},
end: {
description: "End time (Unix Epoch). Default is the last update time.",
optional: true,
},
},
},
)]
/// Fetch data from the RRD file
pub fn fetch_rrd(
path: String,
cf: CF,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
let data = rrd.extract_data(cf, resolution, start, end)?;
println!("{}", serde_json::to_string_pretty(&data)?);
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
"rra-index": {
schema: RRA_INDEX_SCHEMA,
},
},
},
)]
/// Return the Unix timestamp of the first time slot inside the
/// specified RRA (slot start time)
pub fn first_update_time(path: String, rra_index: usize) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
if rra_index >= rrd.rra_list.len() {
bail!("rra-index is out of range");
}
let rra = &rrd.rra_list[rra_index];
let duration = (rra.data.len() as u64) * rra.resolution;
let first = rra.slot_start_time((rrd.source.last_update as u64).saturating_sub(duration));
println!("{}", first);
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
},
},
)]
/// Return the Unix timestamp of the last update
pub fn last_update_time(path: String) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
println!("{}", rrd.source.last_update);
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
},
},
)]
/// Return the time and value from the last update
pub fn last_update(path: String) -> Result<(), Error> {
let rrd = RRD::load(&PathBuf::from(path), false)?;
let result = json!({
"time": rrd.source.last_update,
"value": rrd.source.last_value,
});
println!("{}", serde_json::to_string_pretty(&result)?);
Ok(())
}
#[api(
input: {
properties: {
dst: {
type: DST,
},
path: {
description: "The filename to create."
},
rra: {
description: "Configuration of contained RRAs.",
type: Array,
items: {
schema: RRA_CONFIG_STRING_SCHEMA,
}
},
},
},
)]
/// Create a new RRD file
pub fn create_rrd(dst: DST, path: String, rra: Vec<String>) -> Result<(), Error> {
let mut rra_list = Vec::new();
for item in rra.iter() {
let rra: RRAConfig =
serde_json::from_value(RRAConfig::API_SCHEMA.parse_property_string(item)?)?;
println!("GOT {:?}", rra);
rra_list.push(RRA::new(rra.cf, rra.r, rra.n as usize));
}
let path = PathBuf::from(path);
let rrd = RRD::new(dst, rra_list);
rrd.save(&path, CreateOptions::new(), false)?;
Ok(())
}
#[api(
input: {
properties: {
path: {
description: "The filename."
},
"rra-index": {
schema: RRA_INDEX_SCHEMA,
},
slots: {
description: "The number of slots you want to add or remove.",
type: i64,
},
},
},
)]
/// Resize. Change the number of data slots for the specified RRA.
pub fn resize_rrd(path: String, rra_index: usize, slots: i64) -> Result<(), Error> {
let path = PathBuf::from(&path);
let mut rrd = RRD::load(&path, false)?;
if rra_index >= rrd.rra_list.len() {
bail!("rra-index is out of range");
}
let rra = &rrd.rra_list[rra_index];
let new_slots = (rra.data.len() as i64) + slots;
if new_slots < 1 {
bail!("number of new slots is too small ('{}' < 1)", new_slots);
}
if new_slots > 1024 * 1024 {
bail!("number of new slots is too big ('{}' > 1M)", new_slots);
}
let rra_end = rra.slot_end_time(rrd.source.last_update as u64);
let rra_start = rra_end - rra.resolution * (rra.data.len() as u64);
let (start, reso, data) = rra
.extract_data(rra_start, rra_end, rrd.source.last_update)
.into();
let mut new_rra = RRA::new(rra.cf, rra.resolution, new_slots as usize);
new_rra.last_count = rra.last_count;
new_rra.insert_data(start, reso, data)?;
rrd.rra_list[rra_index] = new_rra;
rrd.save(&path, CreateOptions::new(), false)?;
Ok(())
}
fn main() -> Result<(), Error> {
let uid = nix::unistd::Uid::current();
let username = match nix::unistd::User::from_uid(uid)? {
Some(user) => user.name,
None => bail!("unable to get user name"),
};
let cmd_def = CliCommandMap::new()
.insert(
"create",
CliCommand::new(&API_METHOD_CREATE_RRD)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"dump",
CliCommand::new(&API_METHOD_DUMP_RRD)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"fetch",
CliCommand::new(&API_METHOD_FETCH_RRD)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"first",
CliCommand::new(&API_METHOD_FIRST_UPDATE_TIME)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"info",
CliCommand::new(&API_METHOD_RRD_INFO)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"last",
CliCommand::new(&API_METHOD_LAST_UPDATE_TIME)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"lastupdate",
CliCommand::new(&API_METHOD_LAST_UPDATE)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"resize",
CliCommand::new(&API_METHOD_RESIZE_RRD)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
)
.insert(
"update",
CliCommand::new(&API_METHOD_UPDATE_RRD)
.arg_param(&["path"])
.completion_cb("path", complete_file_name),
);
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(format!("{}@pam", username)));
run_cli_command(cmd_def, rpcenv, None);
Ok(())
}

View File

@ -1,448 +0,0 @@
use std::collections::BTreeSet;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
use std::sync::{Arc, RwLock};
use std::thread::spawn;
use std::time::SystemTime;
use anyhow::{bail, format_err, Error};
use crossbeam_channel::{bounded, TryRecvError};
use proxmox_sys::fs::{create_path, CreateOptions};
use crate::rrd::{CF, DST, RRA, RRD};
use crate::Entry;
mod journal;
use journal::*;
mod rrd_map;
use rrd_map::*;
/// RRD cache - keep RRD data in RAM, but write updates to disk
///
/// This cache is designed to run as single instance (no concurrent
/// access from other processes).
pub struct RRDCache {
config: Arc<CacheConfig>,
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
}
pub(crate) struct CacheConfig {
apply_interval: f64,
basedir: PathBuf,
file_options: CreateOptions,
dir_options: CreateOptions,
}
impl RRDCache {
/// Creates a new instance
///
/// `basedir`: All files are stored relative to this path.
///
/// `file_options`: Files are created with this options.
///
/// `dir_options`: Directories are created with this options.
///
/// `apply_interval`: Commit journal after `apply_interval` seconds.
///
/// `load_rrd_cb`; The callback function is used to load RRD files,
/// and should return a newly generated RRD if the file does not
/// exists (or is unreadable). This may generate RRDs with
/// different configurations (dependent on `rel_path`).
pub fn new<P: AsRef<Path>>(
basedir: P,
file_options: Option<CreateOptions>,
dir_options: Option<CreateOptions>,
apply_interval: f64,
load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
) -> Result<Self, Error> {
let basedir = basedir.as_ref().to_owned();
let file_options = file_options.unwrap_or_else(CreateOptions::new);
let dir_options = dir_options.unwrap_or_else(CreateOptions::new);
create_path(
&basedir,
Some(dir_options.clone()),
Some(dir_options.clone()),
)
.map_err(|err: Error| format_err!("unable to create rrdb stat dir - {}", err))?;
let config = Arc::new(CacheConfig {
basedir,
file_options,
dir_options,
apply_interval,
});
let state = JournalState::new(Arc::clone(&config))?;
let rrd_map = RRDMap::new(Arc::clone(&config), load_rrd_cb);
Ok(Self {
config: Arc::clone(&config),
state: Arc::new(RwLock::new(state)),
rrd_map: Arc::new(RwLock::new(rrd_map)),
})
}
/// Create a new RRD as used by the proxmox backup server
///
/// It contains the following RRAs:
///
/// * cf=average,r=60,n=1440 => 1day
/// * cf=maximum,r=60,n=1440 => 1day
/// * cf=average,r=30*60,n=1440 => 1month
/// * cf=maximum,r=30*60,n=1440 => 1month
/// * cf=average,r=6*3600,n=1440 => 1year
/// * cf=maximum,r=6*3600,n=1440 => 1year
/// * cf=average,r=7*86400,n=570 => 10years
/// * cf=maximum,r=7*86400,n=570 => 10year
///
/// The resulting data file size is about 80KB.
pub fn create_proxmox_backup_default_rrd(dst: DST) -> RRD {
let rra_list = vec![
// 1 min * 1440 => 1 day
RRA::new(CF::Average, 60, 1440),
RRA::new(CF::Maximum, 60, 1440),
// 30 min * 1440 => 30 days ~ 1 month
RRA::new(CF::Average, 30 * 60, 1440),
RRA::new(CF::Maximum, 30 * 60, 1440),
// 6 h * 1440 => 360 days ~ 1 year
RRA::new(CF::Average, 6 * 3600, 1440),
RRA::new(CF::Maximum, 6 * 3600, 1440),
// 1 week * 570 => 10 years
RRA::new(CF::Average, 7 * 86400, 570),
RRA::new(CF::Maximum, 7 * 86400, 570),
];
RRD::new(dst, rra_list)
}
/// Sync the journal data to disk (using `fdatasync` syscall)
pub fn sync_journal(&self) -> Result<(), Error> {
self.state.read().unwrap().sync_journal()
}
/// Apply and commit the journal. Should be used at server startup.
pub fn apply_journal(&self) -> Result<bool, Error> {
let config = Arc::clone(&self.config);
let state = Arc::clone(&self.state);
let rrd_map = Arc::clone(&self.rrd_map);
let mut state_guard = self.state.write().unwrap();
let journal_applied = state_guard.journal_applied;
if let Some(ref recv) = state_guard.apply_thread_result {
match recv.try_recv() {
Ok(Ok(())) => {
// finished without errors, OK
state_guard.apply_thread_result = None;
}
Ok(Err(err)) => {
// finished with errors, log them
log::error!("{}", err);
state_guard.apply_thread_result = None;
}
Err(TryRecvError::Empty) => {
// still running
return Ok(journal_applied);
}
Err(TryRecvError::Disconnected) => {
// crashed, start again
log::error!("apply journal thread crashed - try again");
state_guard.apply_thread_result = None;
}
}
}
let now = proxmox_time::epoch_f64();
let wants_commit = (now - state_guard.last_journal_flush) > self.config.apply_interval;
if journal_applied && !wants_commit {
return Ok(journal_applied);
}
state_guard.last_journal_flush = proxmox_time::epoch_f64();
let (sender, receiver) = bounded(1);
state_guard.apply_thread_result = Some(receiver);
spawn(move || {
let result = apply_and_commit_journal_thread(config, state, rrd_map, journal_applied)
.map_err(|err| err.to_string());
sender.send(result).unwrap();
});
Ok(journal_applied)
}
/// Update data in RAM and write file back to disk (journal)
pub fn update_value(
&self,
rel_path: &str,
time: f64,
value: f64,
dst: DST,
) -> Result<(), Error> {
let journal_applied = self.apply_journal()?;
self.state
.write()
.unwrap()
.append_journal_entry(time, value, dst, rel_path)?;
if journal_applied {
self.rrd_map
.write()
.unwrap()
.update(rel_path, time, value, dst, false)?;
}
Ok(())
}
/// Extract data from cached RRD
///
/// `start`: Start time. If not specified, we simply extract 10 data points.
///
/// `end`: End time. Default is to use the current time.
pub fn extract_cached_data(
&self,
base: &str,
name: &str,
cf: CF,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<Option<Entry>, Error> {
self.rrd_map
.read()
.unwrap()
.extract_cached_data(base, name, cf, resolution, start, end)
}
}
fn apply_and_commit_journal_thread(
config: Arc<CacheConfig>,
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
commit_only: bool,
) -> Result<(), Error> {
if commit_only {
state.write().unwrap().rotate_journal()?; // start new journal, keep old one
} else {
let start_time = SystemTime::now();
log::debug!("applying rrd journal");
match apply_journal_impl(Arc::clone(&state), Arc::clone(&rrd_map)) {
Ok(entries) => {
let elapsed = start_time.elapsed().unwrap().as_secs_f64();
log::info!(
"applied rrd journal ({} entries in {:.3} seconds)",
entries,
elapsed
);
}
Err(err) => bail!("apply rrd journal failed - {}", err),
}
}
let start_time = SystemTime::now();
log::debug!("commit rrd journal");
match commit_journal_impl(config, state, rrd_map) {
Ok(rrd_file_count) => {
let elapsed = start_time.elapsed().unwrap().as_secs_f64();
log::info!(
"rrd journal successfully committed ({} files in {:.3} seconds)",
rrd_file_count,
elapsed
);
}
Err(err) => bail!("rrd journal commit failed: {}", err),
}
Ok(())
}
fn apply_journal_lines(
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
journal_name: &str, // used for logging
reader: &mut BufReader<File>,
lock_read_line: bool,
) -> Result<usize, Error> {
let mut linenr = 0;
loop {
linenr += 1;
let mut line = String::new();
let len = if lock_read_line {
let _lock = state.read().unwrap(); // make sure we read entire lines
reader.read_line(&mut line)?
} else {
reader.read_line(&mut line)?
};
if len == 0 {
break;
}
let entry: JournalEntry = match line.parse() {
Ok(entry) => entry,
Err(err) => {
log::warn!(
"unable to parse rrd journal '{}' line {} (skip) - {}",
journal_name,
linenr,
err,
);
continue; // skip unparsable lines
}
};
rrd_map.write().unwrap().update(
&entry.rel_path,
entry.time,
entry.value,
entry.dst,
true,
)?;
}
Ok(linenr)
}
fn apply_journal_impl(
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
) -> Result<usize, Error> {
let mut lines = 0;
// Apply old journals first
let journal_list = state.read().unwrap().list_old_journals()?;
for entry in journal_list {
log::info!("apply old journal log {}", entry.name);
let file = std::fs::OpenOptions::new().read(true).open(&entry.path)?;
let mut reader = BufReader::new(file);
lines += apply_journal_lines(
Arc::clone(&state),
Arc::clone(&rrd_map),
&entry.name,
&mut reader,
false,
)?;
}
let mut journal = state.read().unwrap().open_journal_reader()?;
lines += apply_journal_lines(
Arc::clone(&state),
Arc::clone(&rrd_map),
"rrd.journal",
&mut journal,
true,
)?;
{
let mut state_guard = state.write().unwrap(); // block other writers
lines += apply_journal_lines(
Arc::clone(&state),
Arc::clone(&rrd_map),
"rrd.journal",
&mut journal,
false,
)?;
state_guard.rotate_journal()?; // start new journal, keep old one
// We need to apply the journal only once, because further updates
// are always directly applied.
state_guard.journal_applied = true;
}
Ok(lines)
}
fn fsync_file_or_dir(path: &Path) -> Result<(), Error> {
let file = std::fs::File::open(path)?;
nix::unistd::fsync(file.as_raw_fd())?;
Ok(())
}
pub(crate) fn fsync_file_and_parent(path: &Path) -> Result<(), Error> {
let file = std::fs::File::open(path)?;
nix::unistd::fsync(file.as_raw_fd())?;
if let Some(parent) = path.parent() {
fsync_file_or_dir(parent)?;
}
Ok(())
}
fn rrd_parent_dir(basedir: &Path, rel_path: &str) -> PathBuf {
let mut path = basedir.to_owned();
let rel_path = Path::new(rel_path);
if let Some(parent) = rel_path.parent() {
path.push(parent);
}
path
}
fn commit_journal_impl(
config: Arc<CacheConfig>,
state: Arc<RwLock<JournalState>>,
rrd_map: Arc<RwLock<RRDMap>>,
) -> Result<usize, Error> {
let files = rrd_map.read().unwrap().file_list();
let mut rrd_file_count = 0;
let mut errors = 0;
let mut dir_set = BTreeSet::new();
log::info!("write rrd data back to disk");
// save all RRDs - we only need a read lock here
// Note: no fsync here (we do it afterwards)
for rel_path in files.iter() {
let parent_dir = rrd_parent_dir(&config.basedir, rel_path);
dir_set.insert(parent_dir);
rrd_file_count += 1;
if let Err(err) = rrd_map.read().unwrap().flush_rrd_file(rel_path) {
errors += 1;
log::error!("unable to save rrd {}: {}", rel_path, err);
}
}
if errors != 0 {
bail!("errors during rrd flush - unable to commit rrd journal");
}
// Important: We fsync files after writing all data! This increase
// the likelihood that files are already synced, so this is
// much faster (although we need to re-open the files).
log::info!("starting rrd data sync");
for rel_path in files.iter() {
let mut path = config.basedir.clone();
path.push(rel_path);
fsync_file_or_dir(&path)
.map_err(|err| format_err!("fsync rrd file {} failed - {}", rel_path, err))?;
}
// also fsync directories
for dir_path in dir_set {
fsync_file_or_dir(&dir_path)
.map_err(|err| format_err!("fsync rrd dir {:?} failed - {}", dir_path, err))?;
}
// if everything went ok, remove the old journal files
state.write().unwrap().remove_old_journals()?;
Ok(rrd_file_count)
}

View File

@ -1,200 +0,0 @@
use std::ffi::OsStr;
use std::fs::File;
use std::io::{BufReader, Write};
use std::os::unix::io::AsRawFd;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use anyhow::{bail, format_err, Error};
use crossbeam_channel::Receiver;
use nix::fcntl::OFlag;
use proxmox_sys::fs::atomic_open_or_create_file;
const RRD_JOURNAL_NAME: &str = "rrd.journal";
use crate::cache::CacheConfig;
use crate::rrd::DST;
// shared state behind RwLock
pub struct JournalState {
config: Arc<CacheConfig>,
journal: File,
pub last_journal_flush: f64,
pub journal_applied: bool,
pub apply_thread_result: Option<Receiver<Result<(), String>>>,
}
pub struct JournalEntry {
pub time: f64,
pub value: f64,
pub dst: DST,
pub rel_path: String,
}
impl FromStr for JournalEntry {
type Err = Error;
fn from_str(line: &str) -> Result<Self, Self::Err> {
let line = line.trim();
let parts: Vec<&str> = line.splitn(4, ':').collect();
if parts.len() != 4 {
bail!("wrong numper of components");
}
let time: f64 = parts[0]
.parse()
.map_err(|_| format_err!("unable to parse time"))?;
let value: f64 = parts[1]
.parse()
.map_err(|_| format_err!("unable to parse value"))?;
let dst: u8 = parts[2]
.parse()
.map_err(|_| format_err!("unable to parse data source type"))?;
let dst = match dst {
0 => DST::Gauge,
1 => DST::Derive,
_ => bail!("got strange value for data source type '{}'", dst),
};
let rel_path = parts[3].to_string();
Ok(JournalEntry {
time,
value,
dst,
rel_path,
})
}
}
pub struct JournalFileInfo {
pub time: u64,
pub name: String,
pub path: PathBuf,
}
impl JournalState {
pub(crate) fn new(config: Arc<CacheConfig>) -> Result<Self, Error> {
let journal = JournalState::open_journal_writer(&config)?;
Ok(Self {
config,
journal,
last_journal_flush: 0.0,
journal_applied: false,
apply_thread_result: None,
})
}
pub fn sync_journal(&self) -> Result<(), Error> {
nix::unistd::fdatasync(self.journal.as_raw_fd())?;
Ok(())
}
pub fn append_journal_entry(
&mut self,
time: f64,
value: f64,
dst: DST,
rel_path: &str,
) -> Result<(), Error> {
let journal_entry = format!("{}:{}:{}:{}\n", time, value, dst as u8, rel_path);
self.journal.write_all(journal_entry.as_bytes())?;
Ok(())
}
pub fn open_journal_reader(&self) -> Result<BufReader<File>, Error> {
// fixme : dup self.journal instead??
let mut journal_path = self.config.basedir.clone();
journal_path.push(RRD_JOURNAL_NAME);
let flags = OFlag::O_CLOEXEC | OFlag::O_RDONLY;
let journal = atomic_open_or_create_file(
&journal_path,
flags,
&[],
self.config.file_options.clone(),
false,
)?;
Ok(BufReader::new(journal))
}
fn open_journal_writer(config: &CacheConfig) -> Result<File, Error> {
let mut journal_path = config.basedir.clone();
journal_path.push(RRD_JOURNAL_NAME);
let flags = OFlag::O_CLOEXEC | OFlag::O_WRONLY | OFlag::O_APPEND;
let journal = atomic_open_or_create_file(
&journal_path,
flags,
&[],
config.file_options.clone(),
false,
)?;
Ok(journal)
}
pub fn rotate_journal(&mut self) -> Result<(), Error> {
let mut journal_path = self.config.basedir.clone();
journal_path.push(RRD_JOURNAL_NAME);
let mut new_name = journal_path.clone();
let now = proxmox_time::epoch_i64();
new_name.set_extension(format!("journal-{:08x}", now));
std::fs::rename(journal_path, &new_name)?;
self.journal = Self::open_journal_writer(&self.config)?;
// make sure the old journal data landed on the disk
super::fsync_file_and_parent(&new_name)?;
Ok(())
}
pub fn remove_old_journals(&self) -> Result<(), Error> {
let journal_list = self.list_old_journals()?;
for entry in journal_list {
std::fs::remove_file(entry.path)?;
}
Ok(())
}
pub fn list_old_journals(&self) -> Result<Vec<JournalFileInfo>, Error> {
let mut list = Vec::new();
for entry in std::fs::read_dir(&self.config.basedir)? {
let entry = entry?;
let path = entry.path();
if !path.is_file() {
continue;
}
match path.file_stem() {
None => continue,
Some(stem) if stem != OsStr::new("rrd") => continue,
Some(_) => (),
}
if let Some(extension) = path.extension() {
if let Some(extension) = extension.to_str() {
if let Some(rest) = extension.strip_prefix("journal-") {
if let Ok(time) = u64::from_str_radix(rest, 16) {
list.push(JournalFileInfo {
time,
name: format!("rrd.{}", extension),
path: path.to_owned(),
});
}
}
}
}
}
list.sort_unstable_by_key(|entry| entry.time);
Ok(list)
}
}

View File

@ -1,97 +0,0 @@
use std::collections::HashMap;
use std::path::Path;
use std::sync::Arc;
use anyhow::{bail, Error};
use proxmox_sys::fs::create_path;
use crate::rrd::{CF, DST, RRD};
use super::CacheConfig;
use crate::Entry;
pub struct RRDMap {
config: Arc<CacheConfig>,
map: HashMap<String, RRD>,
load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
}
impl RRDMap {
pub(crate) fn new(
config: Arc<CacheConfig>,
load_rrd_cb: fn(path: &Path, rel_path: &str, dst: DST) -> RRD,
) -> Self {
Self {
config,
map: HashMap::new(),
load_rrd_cb,
}
}
pub fn update(
&mut self,
rel_path: &str,
time: f64,
value: f64,
dst: DST,
new_only: bool,
) -> Result<(), Error> {
if let Some(rrd) = self.map.get_mut(rel_path) {
if !new_only || time > rrd.last_update() {
rrd.update(time, value);
}
} else {
let mut path = self.config.basedir.clone();
path.push(rel_path);
create_path(
path.parent().unwrap(),
Some(self.config.dir_options.clone()),
Some(self.config.dir_options.clone()),
)?;
let mut rrd = (self.load_rrd_cb)(&path, rel_path, dst);
if !new_only || time > rrd.last_update() {
rrd.update(time, value);
}
self.map.insert(rel_path.to_string(), rrd);
}
Ok(())
}
pub fn file_list(&self) -> Vec<String> {
let mut list = Vec::new();
for rel_path in self.map.keys() {
list.push(rel_path.clone());
}
list
}
pub fn flush_rrd_file(&self, rel_path: &str) -> Result<(), Error> {
if let Some(rrd) = self.map.get(rel_path) {
let mut path = self.config.basedir.clone();
path.push(rel_path);
rrd.save(&path, self.config.file_options.clone(), true)
} else {
bail!("rrd file {} not loaded", rel_path);
}
}
pub fn extract_cached_data(
&self,
base: &str,
name: &str,
cf: CF,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<Option<Entry>, Error> {
match self.map.get(&format!("{}/{}", base, name)) {
Some(rrd) => Ok(Some(rrd.extract_data(cf, resolution, start, end)?)),
None => Ok(None),
}
}
}

View File

@ -1,16 +0,0 @@
//! # Round Robin Database files
//!
//! ## Features
//!
//! * One file stores a single data source
//! * Stores data for different time resolution
//! * Simple cache implementation with journal support
mod rrd_v1;
pub mod rrd;
#[doc(inline)]
pub use rrd::Entry;
mod cache;
pub use cache::*;

View File

@ -1,694 +0,0 @@
//! # Proxmox RRD format version 2
//!
//! The new format uses
//! [CBOR](https://datatracker.ietf.org/doc/html/rfc8949) as storage
//! format. This way we can use the serde serialization framework,
//! which make our code more flexible, much nicer and type safe.
//!
//! ## Features
//!
//! * Well defined data format [CBOR](https://datatracker.ietf.org/doc/html/rfc8949)
//! * Platform independent (big endian f64, hopefully a standard format?)
//! * Arbitrary number of RRAs (dynamically changeable)
use std::io::{Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::path::Path;
use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use proxmox_schema::api;
use proxmox_sys::fs::{make_tmp_file, CreateOptions};
use crate::rrd_v1;
/// Proxmox RRD v2 file magic number
// openssl::sha::sha256(b"Proxmox Round Robin Database file v2.0")[0..8];
pub const PROXMOX_RRD_MAGIC_2_0: [u8; 8] = [224, 200, 228, 27, 239, 112, 122, 159];
#[api()]
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// RRD data source type
pub enum DST {
/// Gauge values are stored unmodified.
Gauge,
/// Stores the difference to the previous value.
Derive,
/// Stores the difference to the previous value (like Derive), but
/// detect counter overflow (and ignores that value)
Counter,
}
#[api()]
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq)]
#[serde(rename_all = "kebab-case")]
/// Consolidation function
pub enum CF {
/// Average
Average,
/// Maximum
Maximum,
/// Minimum
Minimum,
/// Use the last value
Last,
}
#[derive(Serialize, Deserialize)]
/// Data source specification
pub struct DataSource {
/// Data source type
pub dst: DST,
/// Last update time (epoch)
pub last_update: f64,
/// Stores the last value, used to compute differential value for
/// derive/counters
pub last_value: f64,
}
/// An RRD entry.
///
/// Serializes as a tuple.
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(
from = "(u64, u64, Vec<Option<f64>>)",
into = "(u64, u64, Vec<Option<f64>>)"
)]
pub struct Entry {
pub start: u64,
pub resolution: u64,
pub data: Vec<Option<f64>>,
}
impl Entry {
pub const fn new(start: u64, resolution: u64, data: Vec<Option<f64>>) -> Self {
Self {
start,
resolution,
data,
}
}
/// Get a data point at a specific index which also does bound checking and returns `None` for
/// out of bounds indices.
pub fn get(&self, idx: usize) -> Option<f64> {
self.data.get(idx).copied().flatten()
}
}
impl From<Entry> for (u64, u64, Vec<Option<f64>>) {
fn from(entry: Entry) -> (u64, u64, Vec<Option<f64>>) {
(entry.start, entry.resolution, entry.data)
}
}
impl From<(u64, u64, Vec<Option<f64>>)> for Entry {
fn from(data: (u64, u64, Vec<Option<f64>>)) -> Self {
Self::new(data.0, data.1, data.2)
}
}
impl DataSource {
/// Create a new Instance
pub fn new(dst: DST) -> Self {
Self {
dst,
last_update: 0.0,
last_value: f64::NAN,
}
}
fn compute_new_value(&mut self, time: f64, mut value: f64) -> Result<f64, Error> {
if time < 0.0 {
bail!("got negative time");
}
if time <= self.last_update {
bail!("time in past ({} < {})", time, self.last_update);
}
if value.is_nan() {
bail!("new value is NAN");
}
// derive counter value
let is_counter = self.dst == DST::Counter;
if is_counter || self.dst == DST::Derive {
let time_diff = time - self.last_update;
let diff = if self.last_value.is_nan() {
0.0
} else if is_counter && value < 0.0 {
bail!("got negative value for counter");
} else if is_counter && value < self.last_value {
// Note: We do not try automatic overflow corrections, but
// we update last_value anyways, so that we can compute the diff
// next time.
self.last_value = value;
bail!("counter overflow/reset detected");
} else {
value - self.last_value
};
self.last_value = value;
value = diff / time_diff;
} else {
self.last_value = value;
}
Ok(value)
}
}
#[derive(Serialize, Deserialize)]
/// Round Robin Archive
pub struct RRA {
/// Number of seconds spaned by a single data entry.
pub resolution: u64,
/// Consolitation function.
pub cf: CF,
/// Count values computed inside this update interval.
pub last_count: u64,
/// The actual data entries.
pub data: Vec<f64>,
}
impl RRA {
/// Creates a new instance
pub fn new(cf: CF, resolution: u64, points: usize) -> Self {
Self {
cf,
resolution,
last_count: 0,
data: vec![f64::NAN; points],
}
}
/// Data slot end time
pub fn slot_end_time(&self, time: u64) -> u64 {
self.resolution * (time / self.resolution + 1)
}
/// Data slot start time
pub fn slot_start_time(&self, time: u64) -> u64 {
self.resolution * (time / self.resolution)
}
/// Data slot index
pub fn slot(&self, time: u64) -> usize {
((time / self.resolution) as usize) % self.data.len()
}
/// Directly overwrite data slots.
///
/// The caller need to set `last_update` value on the [DataSource] manually.
pub fn insert_data(
&mut self,
start: u64,
resolution: u64,
data: Vec<Option<f64>>,
) -> Result<(), Error> {
if resolution != self.resolution {
bail!("inser_data failed: got wrong resolution");
}
let mut index = self.slot(start);
for item in data {
if let Some(v) = item {
self.data[index] = v;
}
index += 1;
if index >= self.data.len() {
index = 0;
}
}
Ok(())
}
fn delete_old_slots(&mut self, time: f64, last_update: f64) {
let epoch = time as u64;
let last_update = last_update as u64;
let reso = self.resolution;
let num_entries = self.data.len() as u64;
let min_time = epoch.saturating_sub(num_entries * reso);
let min_time = self.slot_end_time(min_time);
let mut t = last_update.saturating_sub(num_entries * reso);
let mut index = self.slot(t);
for _ in 0..num_entries {
t += reso;
index += 1;
if index >= self.data.len() {
index = 0;
}
if t < min_time {
self.data[index] = f64::NAN;
} else {
break;
}
}
}
fn compute_new_value(&mut self, time: f64, last_update: f64, value: f64) {
let epoch = time as u64;
let last_update = last_update as u64;
let reso = self.resolution;
let index = self.slot(epoch);
let last_index = self.slot(last_update);
if (epoch - last_update) > reso || index != last_index {
self.last_count = 0;
}
let last_value = self.data[index];
if last_value.is_nan() {
self.last_count = 0;
}
let new_count = self.last_count.saturating_add(1);
if self.last_count == 0 {
self.data[index] = value;
self.last_count = 1;
} else {
let new_value = match self.cf {
CF::Maximum => {
if last_value > value {
last_value
} else {
value
}
}
CF::Minimum => {
if last_value < value {
last_value
} else {
value
}
}
CF::Last => value,
CF::Average => {
(last_value * (self.last_count as f64)) / (new_count as f64)
+ value / (new_count as f64)
}
};
self.data[index] = new_value;
self.last_count = new_count;
}
}
/// Extract data
///
/// Extract data from `start` to `end`. The RRA itself does not
/// store the `last_update` time, so you need to pass this a
/// parameter (see [DataSource]).
pub fn extract_data(&self, start: u64, end: u64, last_update: f64) -> Entry {
let last_update = last_update as u64;
let reso = self.resolution;
let num_entries = self.data.len() as u64;
let mut list = Vec::new();
let rrd_end = self.slot_end_time(last_update);
let rrd_start = rrd_end.saturating_sub(reso * num_entries);
let mut t = start;
let mut index = self.slot(t);
for _ in 0..num_entries {
if t > end {
break;
};
if t < rrd_start || t >= rrd_end {
list.push(None);
} else {
let value = self.data[index];
if value.is_nan() {
list.push(None);
} else {
list.push(Some(value));
}
}
t += reso;
index += 1;
if index >= self.data.len() {
index = 0;
}
}
Entry::new(start, reso, list)
}
}
#[derive(Serialize, Deserialize)]
/// Round Robin Database
pub struct RRD {
/// The data source definition
pub source: DataSource,
/// List of round robin archives
pub rra_list: Vec<RRA>,
}
impl RRD {
/// Creates a new Instance
pub fn new(dst: DST, rra_list: Vec<RRA>) -> RRD {
let source = DataSource::new(dst);
RRD { source, rra_list }
}
fn from_raw(raw: &[u8]) -> Result<Self, Error> {
if raw.len() < 8 {
bail!("not an rrd file - file is too small ({})", raw.len());
}
let rrd = if raw[0..8] == rrd_v1::PROXMOX_RRD_MAGIC_1_0 {
let v1 = rrd_v1::RRDv1::from_raw(raw)?;
v1.to_rrd_v2()
.map_err(|err| format_err!("unable to convert from old V1 format - {}", err))?
} else if raw[0..8] == PROXMOX_RRD_MAGIC_2_0 {
serde_cbor::from_slice(&raw[8..])
.map_err(|err| format_err!("unable to decode RRD file - {}", err))?
} else {
bail!("not an rrd file - unknown magic number");
};
if rrd.source.last_update < 0.0 {
bail!("rrd file has negative last_update time");
}
Ok(rrd)
}
/// Load data from a file
///
/// Setting `avoid_page_cache` uses
/// `fadvise(..,POSIX_FADV_DONTNEED)` to avoid keeping the data in
/// the linux page cache.
pub fn load(path: &Path, avoid_page_cache: bool) -> Result<Self, std::io::Error> {
let mut file = std::fs::File::open(path)?;
let buffer_size = file.metadata().map(|m| m.len() as usize + 1).unwrap_or(0);
let mut raw = Vec::with_capacity(buffer_size);
file.read_to_end(&mut raw)?;
if avoid_page_cache {
nix::fcntl::posix_fadvise(
file.as_raw_fd(),
0,
buffer_size as i64,
nix::fcntl::PosixFadviseAdvice::POSIX_FADV_DONTNEED,
)
.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err.to_string()))?;
}
match Self::from_raw(&raw) {
Ok(rrd) => Ok(rrd),
Err(err) => Err(std::io::Error::new(
std::io::ErrorKind::Other,
err.to_string(),
)),
}
}
/// Store data into a file (atomic replace file)
///
/// Setting `avoid_page_cache` uses
/// `fadvise(..,POSIX_FADV_DONTNEED)` to avoid keeping the data in
/// the linux page cache.
pub fn save(
&self,
path: &Path,
options: CreateOptions,
avoid_page_cache: bool,
) -> Result<(), Error> {
let (fd, tmp_path) = make_tmp_file(path, options)?;
let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
let mut try_block = || -> Result<(), Error> {
let mut data: Vec<u8> = Vec::new();
data.extend(PROXMOX_RRD_MAGIC_2_0);
serde_cbor::to_writer(&mut data, self)?;
file.write_all(&data)?;
if avoid_page_cache {
nix::fcntl::posix_fadvise(
file.as_raw_fd(),
0,
data.len() as i64,
nix::fcntl::PosixFadviseAdvice::POSIX_FADV_DONTNEED,
)?;
}
Ok(())
};
match try_block() {
Ok(()) => (),
error => {
let _ = nix::unistd::unlink(&tmp_path);
return error;
}
}
if let Err(err) = std::fs::rename(&tmp_path, path) {
let _ = nix::unistd::unlink(&tmp_path);
bail!("Atomic rename failed - {}", err);
}
Ok(())
}
/// Returns the last update time.
pub fn last_update(&self) -> f64 {
self.source.last_update
}
/// Update the value (in memory)
///
/// Note: This does not call [Self::save].
pub fn update(&mut self, time: f64, value: f64) {
let value = match self.source.compute_new_value(time, value) {
Ok(value) => value,
Err(err) => {
log::error!("rrd update failed: {}", err);
return;
}
};
let last_update = self.source.last_update;
self.source.last_update = time;
for rra in self.rra_list.iter_mut() {
rra.delete_old_slots(time, last_update);
rra.compute_new_value(time, last_update, value);
}
}
/// Extract data from the archive
///
/// This selects the RRA with specified [CF] and (minimum)
/// resolution, and extract data from `start` to `end`.
///
/// `start`: Start time. If not specified, we simply extract 10 data points.
/// `end`: End time. Default is to use the current time.
pub fn extract_data(
&self,
cf: CF,
resolution: u64,
start: Option<u64>,
end: Option<u64>,
) -> Result<Entry, Error> {
let mut rra: Option<&RRA> = None;
for item in self.rra_list.iter() {
if item.cf != cf {
continue;
}
if item.resolution > resolution {
continue;
}
if let Some(current) = rra {
if item.resolution > current.resolution {
rra = Some(item);
}
} else {
rra = Some(item);
}
}
match rra {
Some(rra) => {
let end = end.unwrap_or_else(|| proxmox_time::epoch_f64() as u64);
let start = start.unwrap_or_else(|| end.saturating_sub(10 * rra.resolution));
Ok(rra.extract_data(start, end, self.source.last_update))
}
None => bail!("unable to find RRA suitable ({:?}:{})", cf, resolution),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn basic_rra_maximum_gauge_test() -> Result<(), Error> {
let rra = RRA::new(CF::Maximum, 60, 5);
let mut rrd = RRD::new(DST::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
}
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Maximum, 60, Some(0), Some(5 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(3.0), Some(5.0), Some(7.0), Some(9.0)]);
Ok(())
}
#[test]
fn basic_rra_minimum_gauge_test() -> Result<(), Error> {
let rra = RRA::new(CF::Minimum, 60, 5);
let mut rrd = RRD::new(DST::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
}
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Minimum, 60, Some(0), Some(5 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(2.0), Some(4.0), Some(6.0), Some(8.0)]);
Ok(())
}
#[test]
fn basic_rra_last_gauge_test() -> Result<(), Error> {
let rra = RRA::new(CF::Last, 60, 5);
let mut rrd = RRD::new(DST::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
}
assert!(
rrd.extract_data(CF::Average, 60, Some(0), Some(5 * 60))
.is_err(),
"CF::Average should not exist"
);
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Last, 60, Some(0), Some(20 * 60))?;
assert_eq!(start, 0);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(3.0), Some(5.0), Some(7.0), Some(9.0)]);
Ok(())
}
#[test]
fn basic_rra_average_derive_test() -> Result<(), Error> {
let rra = RRA::new(CF::Average, 60, 5);
let mut rrd = RRD::new(DST::Derive, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, (i * 60) as f64);
}
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(1.0), Some(2.0), Some(2.0), Some(2.0), None]);
Ok(())
}
#[test]
fn basic_rra_average_gauge_test() -> Result<(), Error> {
let rra = RRA::new(CF::Average, 60, 5);
let mut rrd = RRD::new(DST::Gauge, vec![rra]);
for i in 2..10 {
rrd.update((i as f64) * 30.0, i as f64);
}
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(2.5), Some(4.5), Some(6.5), Some(8.5), None]);
for i in 10..14 {
rrd.update((i as f64) * 30.0, i as f64);
}
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(60), Some(5 * 60))?;
assert_eq!(start, 60);
assert_eq!(resolution, 60);
assert_eq!(data, [None, Some(4.5), Some(6.5), Some(8.5), Some(10.5)]);
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(3 * 60), Some(8 * 60))?;
assert_eq!(start, 3 * 60);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(6.5), Some(8.5), Some(10.5), Some(12.5), None]);
// add much newer value (should delete all previous/outdated value)
let i = 100;
rrd.update((i as f64) * 30.0, i as f64);
println!("TEST {:?}", serde_json::to_string_pretty(&rrd));
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(100 * 30), Some(100 * 30 + 5 * 60))?;
assert_eq!(start, 100 * 30);
assert_eq!(resolution, 60);
assert_eq!(data, [Some(100.0), None, None, None, None]);
// extract with end time smaller than start time
let Entry {
start,
resolution,
data,
} = rrd.extract_data(CF::Average, 60, Some(100 * 30), Some(60))?;
assert_eq!(start, 100 * 30);
assert_eq!(resolution, 60);
assert_eq!(data, []);
Ok(())
}
}

View File

@ -1,295 +0,0 @@
use std::io::Read;
use anyhow::Error;
use bitflags::bitflags;
/// The number of data entries per RRA
pub const RRD_DATA_ENTRIES: usize = 70;
/// Proxmox RRD file magic number
// openssl::sha::sha256(b"Proxmox Round Robin Database file v1.0")[0..8];
pub const PROXMOX_RRD_MAGIC_1_0: [u8; 8] = [206, 46, 26, 212, 172, 158, 5, 186];
use crate::rrd::{DataSource, CF, DST, RRA, RRD};
bitflags! {
/// Flags to specify the data source type and consolidation function
pub struct RRAFlags: u64 {
// Data Source Types
const DST_GAUGE = 1;
const DST_DERIVE = 2;
const DST_COUNTER = 4;
const DST_MASK = 255; // first 8 bits
// Consolidation Functions
const CF_AVERAGE = 1 << 8;
const CF_MAX = 2 << 8;
const CF_MASK = 255 << 8;
}
}
/// Round Robin Archive with [RRD_DATA_ENTRIES] data slots.
///
/// This data structure is used inside [RRD] and directly written to the
/// RRD files.
#[repr(C)]
pub struct RRAv1 {
/// Defined the data source type and consolidation function
pub flags: RRAFlags,
/// Resolution (seconds)
pub resolution: u64,
/// Last update time (epoch)
pub last_update: f64,
/// Count values computed inside this update interval
pub last_count: u64,
/// Stores the last value, used to compute differential value for derive/counters
pub counter_value: f64,
/// Data slots
pub data: [f64; RRD_DATA_ENTRIES],
}
impl RRAv1 {
fn extract_data(&self) -> (u64, u64, Vec<Option<f64>>) {
let reso = self.resolution;
let mut list = Vec::new();
let rra_end = reso * ((self.last_update as u64) / reso);
let rra_start = rra_end - reso * (RRD_DATA_ENTRIES as u64);
let mut t = rra_start;
let mut index = ((t / reso) % (RRD_DATA_ENTRIES as u64)) as usize;
for _ in 0..RRD_DATA_ENTRIES {
let value = self.data[index];
if value.is_nan() {
list.push(None);
} else {
list.push(Some(value));
}
t += reso;
index = (index + 1) % RRD_DATA_ENTRIES;
}
(rra_start, reso, list)
}
}
/// Round Robin Database file format with fixed number of [RRA]s
#[repr(C)]
// Note: Avoid alignment problems by using 8byte types only
pub struct RRDv1 {
/// The magic number to identify the file type
pub magic: [u8; 8],
/// Hourly data (average values)
pub hour_avg: RRAv1,
/// Hourly data (maximum values)
pub hour_max: RRAv1,
/// Dayly data (average values)
pub day_avg: RRAv1,
/// Dayly data (maximum values)
pub day_max: RRAv1,
/// Weekly data (average values)
pub week_avg: RRAv1,
/// Weekly data (maximum values)
pub week_max: RRAv1,
/// Monthly data (average values)
pub month_avg: RRAv1,
/// Monthly data (maximum values)
pub month_max: RRAv1,
/// Yearly data (average values)
pub year_avg: RRAv1,
/// Yearly data (maximum values)
pub year_max: RRAv1,
}
impl RRDv1 {
pub fn from_raw(mut raw: &[u8]) -> Result<Self, std::io::Error> {
let expected_len = std::mem::size_of::<RRDv1>();
if raw.len() != expected_len {
let msg = format!("wrong data size ({} != {})", raw.len(), expected_len);
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
}
let mut rrd: RRDv1 = unsafe { std::mem::zeroed() };
unsafe {
let rrd_slice =
std::slice::from_raw_parts_mut(&mut rrd as *mut _ as *mut u8, expected_len);
raw.read_exact(rrd_slice)?;
}
if rrd.magic != PROXMOX_RRD_MAGIC_1_0 {
let msg = "wrong magic number".to_string();
return Err(std::io::Error::new(std::io::ErrorKind::Other, msg));
}
Ok(rrd)
}
pub fn to_rrd_v2(&self) -> Result<RRD, Error> {
let mut rra_list = Vec::new();
// old format v1:
//
// hour 1 min, 70 points
// day 30 min, 70 points
// week 3 hours, 70 points
// month 12 hours, 70 points
// year 1 week, 70 points
//
// new default for RRD v2:
//
// day 1 min, 1440 points
// month 30 min, 1440 points
// year 365 min (6h), 1440 points
// decade 1 week, 570 points
// Linear extrapolation
fn extrapolate_data(
start: u64,
reso: u64,
factor: u64,
data: Vec<Option<f64>>,
) -> (u64, u64, Vec<Option<f64>>) {
let mut new = Vec::new();
for i in 0..data.len() {
let mut next = i + 1;
if next >= data.len() {
next = 0
};
let v = data[i];
let v1 = data[next];
match (v, v1) {
(Some(v), Some(v1)) => {
let diff = (v1 - v) / (factor as f64);
for j in 0..factor {
new.push(Some(v + diff * (j as f64)));
}
}
(Some(v), None) => {
new.push(Some(v));
for _ in 0..factor - 1 {
new.push(None);
}
}
(None, Some(v1)) => {
for _ in 0..factor - 1 {
new.push(None);
}
new.push(Some(v1));
}
(None, None) => {
for _ in 0..factor {
new.push(None);
}
}
}
}
(start, reso / factor, new)
}
// Try to convert to new, higher capacity format
// compute daily average (merge old self.day_avg and self.hour_avg
let mut day_avg = RRA::new(CF::Average, 60, 1440);
let (start, reso, data) = self.day_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 30, data);
day_avg.insert_data(start, reso, data)?;
let (start, reso, data) = self.hour_avg.extract_data();
day_avg.insert_data(start, reso, data)?;
// compute daily maximum (merge old self.day_max and self.hour_max
let mut day_max = RRA::new(CF::Maximum, 60, 1440);
let (start, reso, data) = self.day_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 30, data);
day_max.insert_data(start, reso, data)?;
let (start, reso, data) = self.hour_max.extract_data();
day_max.insert_data(start, reso, data)?;
// compute monthly average (merge old self.month_avg,
// self.week_avg and self.day_avg)
let mut month_avg = RRA::new(CF::Average, 30 * 60, 1440);
let (start, reso, data) = self.month_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 24, data);
month_avg.insert_data(start, reso, data)?;
let (start, reso, data) = self.week_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 6, data);
month_avg.insert_data(start, reso, data)?;
let (start, reso, data) = self.day_avg.extract_data();
month_avg.insert_data(start, reso, data)?;
// compute monthly maximum (merge old self.month_max,
// self.week_max and self.day_max)
let mut month_max = RRA::new(CF::Maximum, 30 * 60, 1440);
let (start, reso, data) = self.month_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 24, data);
month_max.insert_data(start, reso, data)?;
let (start, reso, data) = self.week_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 6, data);
month_max.insert_data(start, reso, data)?;
let (start, reso, data) = self.day_max.extract_data();
month_max.insert_data(start, reso, data)?;
// compute yearly average (merge old self.year_avg)
let mut year_avg = RRA::new(CF::Average, 6 * 3600, 1440);
let (start, reso, data) = self.year_avg.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 28, data);
year_avg.insert_data(start, reso, data)?;
// compute yearly maximum (merge old self.year_avg)
let mut year_max = RRA::new(CF::Maximum, 6 * 3600, 1440);
let (start, reso, data) = self.year_max.extract_data();
let (start, reso, data) = extrapolate_data(start, reso, 28, data);
year_max.insert_data(start, reso, data)?;
// compute decade average (merge old self.year_avg)
let mut decade_avg = RRA::new(CF::Average, 7 * 86400, 570);
let (start, reso, data) = self.year_avg.extract_data();
decade_avg.insert_data(start, reso, data)?;
// compute decade maximum (merge old self.year_max)
let mut decade_max = RRA::new(CF::Maximum, 7 * 86400, 570);
let (start, reso, data) = self.year_max.extract_data();
decade_max.insert_data(start, reso, data)?;
rra_list.push(day_avg);
rra_list.push(day_max);
rra_list.push(month_avg);
rra_list.push(month_max);
rra_list.push(year_avg);
rra_list.push(year_max);
rra_list.push(decade_avg);
rra_list.push(decade_max);
// use values from hour_avg for source (all RRAv1 must have the same config)
let dst = if self.hour_avg.flags.contains(RRAFlags::DST_COUNTER) {
DST::Counter
} else if self.hour_avg.flags.contains(RRAFlags::DST_DERIVE) {
DST::Derive
} else {
DST::Gauge
};
let source = DataSource {
dst,
last_value: f64::NAN,
last_update: self.hour_avg.last_update, // IMPORTANT!
};
Ok(RRD { source, rra_list })
}
}

View File

@ -1,56 +0,0 @@
use std::path::Path;
use std::process::Command;
use anyhow::{bail, Error};
use proxmox_rrd::rrd::RRD;
use proxmox_sys::fs::CreateOptions;
fn compare_file(fn1: &str, fn2: &str) -> Result<(), Error> {
let status = Command::new("/usr/bin/cmp")
.arg(fn1)
.arg(fn2)
.status()
.expect("failed to execute process");
if !status.success() {
bail!("file compare failed");
}
Ok(())
}
const RRD_V1_FN: &str = "./tests/testdata/cpu.rrd_v1";
const RRD_V2_FN: &str = "./tests/testdata/cpu.rrd_v2";
// make sure we can load and convert RRD v1
#[test]
fn upgrade_from_rrd_v1() -> Result<(), Error> {
let rrd = RRD::load(Path::new(RRD_V1_FN), true)?;
const RRD_V2_NEW_FN: &str = "./tests/testdata/cpu.rrd_v2.upgraded";
let new_path = Path::new(RRD_V2_NEW_FN);
rrd.save(new_path, CreateOptions::new(), true)?;
let result = compare_file(RRD_V2_FN, RRD_V2_NEW_FN);
let _ = std::fs::remove_file(RRD_V2_NEW_FN);
result?;
Ok(())
}
// make sure we can load and save RRD v2
#[test]
fn load_and_save_rrd_v2() -> Result<(), Error> {
let rrd = RRD::load(Path::new(RRD_V2_FN), true)?;
const RRD_V2_NEW_FN: &str = "./tests/testdata/cpu.rrd_v2.saved";
let new_path = Path::new(RRD_V2_NEW_FN);
rrd.save(new_path, CreateOptions::new(), true)?;
let result = compare_file(RRD_V2_FN, RRD_V2_NEW_FN);
let _ = std::fs::remove_file(RRD_V2_NEW_FN);
result?;
Ok(())
}

Binary file not shown.

Binary file not shown.

View File

@ -116,6 +116,7 @@ impl AcmeClient {
tos_agreed: bool, tos_agreed: bool,
contact: Vec<String>, contact: Vec<String>,
rsa_bits: Option<u32>, rsa_bits: Option<u32>,
eab_creds: Option<(String, String)>,
) -> Result<&'a Account, anyhow::Error> { ) -> Result<&'a Account, anyhow::Error> {
self.tos = if tos_agreed { self.tos = if tos_agreed {
self.terms_of_service_url().await?.map(str::to_owned) self.terms_of_service_url().await?.map(str::to_owned)
@ -123,10 +124,14 @@ impl AcmeClient {
None None
}; };
let account = Account::creator() let mut account = Account::creator()
.set_contacts(contact) .set_contacts(contact)
.agree_to_tos(tos_agreed); .agree_to_tos(tos_agreed);
if let Some((eab_kid, eab_hmac_key)) = eab_creds {
account = account.set_eab_credentials(eab_kid, eab_hmac_key)?;
}
let account = if let Some(bits) = rsa_bits { let account = if let Some(bits) = rsa_bits {
account.generate_rsa_key(bits)? account.generate_rsa_key(bits)?
} else { } else {
@ -572,7 +577,7 @@ impl AcmeClient {
Self::execute(&mut self.http_client, request, &mut self.nonce).await Self::execute(&mut self.http_client, request, &mut self.nonce).await
} }
async fn directory(&mut self) -> Result<&Directory, Error> { pub async fn directory(&mut self) -> Result<&Directory, Error> {
Ok(Self::get_directory( Ok(Self::get_directory(
&mut self.http_client, &mut self.http_client,
&self.directory_url, &self.directory_url,

View File

@ -182,6 +182,16 @@ fn account_contact_from_string(s: &str) -> Vec<String> {
description: "The ACME Directory.", description: "The ACME Directory.",
optional: true, optional: true,
}, },
eab_kid: {
type: String,
description: "Key Identifier for External Account Binding.",
optional: true,
},
eab_hmac_key: {
type: String,
description: "HMAC Key for External Account Binding.",
optional: true,
}
}, },
}, },
access: { access: {
@ -196,6 +206,8 @@ fn register_account(
contact: String, contact: String,
tos_url: Option<String>, tos_url: Option<String>,
directory: Option<String>, directory: Option<String>,
eab_kid: Option<String>,
eab_hmac_key: Option<String>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -204,6 +216,15 @@ fn register_account(
AcmeAccountName::from_string_unchecked("default".to_string()) AcmeAccountName::from_string_unchecked("default".to_string())
}); });
// TODO: this should be done via the api definition, but
// the api schema currently lacks this ability (2023-11-06)
if eab_kid.is_some() != eab_hmac_key.is_some() {
http_bail!(
BAD_REQUEST,
"either both or none of 'eab_kid' and 'eab_hmac_key' have to be set."
);
}
if Path::new(&crate::config::acme::account_path(&name)).exists() { if Path::new(&crate::config::acme::account_path(&name)).exists() {
http_bail!(BAD_REQUEST, "account {} already exists", name); http_bail!(BAD_REQUEST, "account {} already exists", name);
} }
@ -224,8 +245,15 @@ fn register_account(
task_log!(worker, "Registering ACME account '{}'...", &name); task_log!(worker, "Registering ACME account '{}'...", &name);
let account = let account = do_register_account(
do_register_account(&mut client, &name, tos_url.is_some(), contact, None).await?; &mut client,
&name,
tos_url.is_some(),
contact,
None,
eab_kid.zip(eab_hmac_key),
)
.await?;
task_log!( task_log!(
worker, worker,
@ -244,10 +272,11 @@ pub async fn do_register_account<'a>(
agree_to_tos: bool, agree_to_tos: bool,
contact: String, contact: String,
rsa_bits: Option<u32>, rsa_bits: Option<u32>,
eab_creds: Option<(String, String)>,
) -> Result<&'a Account, Error> { ) -> Result<&'a Account, Error> {
let contact = account_contact_from_string(&contact); let contact = account_contact_from_string(&contact);
client client
.new_account(name, agree_to_tos, contact, rsa_bits) .new_account(name, agree_to_tos, contact, rsa_bits, eab_creds)
.await .await
} }
@ -460,9 +489,11 @@ pub struct PluginConfig {
ty: String, ty: String,
/// DNS Api name. /// DNS Api name.
#[serde(skip_serializing_if = "Option::is_none", default)]
api: Option<String>, api: Option<String>,
/// Plugin configuration data. /// Plugin configuration data.
#[serde(skip_serializing_if = "Option::is_none", default)]
data: Option<String>, data: Option<String>,
/// Extra delay in seconds to wait before requesting validation. /// Extra delay in seconds to wait before requesting validation.

View File

@ -138,6 +138,8 @@ pub fn list_changers(
pub enum DeletableProperty { pub enum DeletableProperty {
/// Delete export-slots. /// Delete export-slots.
ExportSlots, ExportSlots,
/// Delete eject-before-unload.
EjectBeforeUnload,
} }
#[api( #[api(
@ -194,6 +196,9 @@ pub fn update_changer(
DeletableProperty::ExportSlots => { DeletableProperty::ExportSlots => {
data.export_slots = None; data.export_slots = None;
} }
DeletableProperty::EjectBeforeUnload => {
data.eject_before_unload = None;
}
} }
} }
} }
@ -222,6 +227,10 @@ pub fn update_changer(
} }
} }
if let Some(eject_before_unload) = update.eject_before_unload {
data.eject_before_unload = Some(eject_before_unload);
}
config.set_data(&name, "changer", &data)?; config.set_data(&name, "changer", &data)?;
pbs_config::drive::save_config(&config)?; pbs_config::drive::save_config(&config)?;

View File

@ -64,71 +64,39 @@ const ACME_SUBDIRS: SubdirMap = &[(
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct CertificateInfo { pub struct CertificateInfo {
/// Certificate file name. /// Certificate file name.
#[serde(skip_serializing_if = "Option::is_none")] pub filename: String,
filename: Option<String>,
/// Certificate subject name. /// Certificate subject name.
subject: String, pub subject: String,
/// List of certificate's SubjectAlternativeName entries. /// List of certificate's SubjectAlternativeName entries.
san: Vec<String>, pub san: Vec<String>,
/// Certificate issuer name. /// Certificate issuer name.
issuer: String, pub issuer: String,
/// Certificate's notBefore timestamp (UNIX epoch). /// Certificate's notBefore timestamp (UNIX epoch).
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
notbefore: Option<i64>, pub notbefore: Option<i64>,
/// Certificate's notAfter timestamp (UNIX epoch). /// Certificate's notAfter timestamp (UNIX epoch).
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
notafter: Option<i64>, pub notafter: Option<i64>,
/// Certificate in PEM format. /// Certificate in PEM format.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pem: Option<String>, pub pem: Option<String>,
/// Certificate's public key algorithm. /// Certificate's public key algorithm.
public_key_type: String, pub public_key_type: String,
/// Certificate's public key size if available. /// Certificate's public key size if available.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
public_key_bits: Option<u32>, pub public_key_bits: Option<u32>,
/// The SSL Fingerprint. /// The SSL Fingerprint.
fingerprint: Option<String>, #[serde(skip_serializing_if = "Option::is_none")]
} pub fingerprint: Option<String>,
impl TryFrom<&cert::CertInfo> for CertificateInfo {
type Error = Error;
fn try_from(info: &cert::CertInfo) -> Result<Self, Self::Error> {
let pubkey = info.public_key()?;
Ok(Self {
filename: None,
subject: info.subject_name()?,
san: info
.subject_alt_names()
.map(|san| {
san.into_iter()
// FIXME: Support `.ipaddress()`?
.filter_map(|name| name.dnsname().map(str::to_owned))
.collect()
})
.unwrap_or_default(),
issuer: info.issuer_name()?,
notbefore: info.not_before_unix().ok(),
notafter: info.not_after_unix().ok(),
pem: None,
public_key_type: openssl::nid::Nid::from_raw(pubkey.id().as_raw())
.long_name()
.unwrap_or("<unsupported key type>")
.to_owned(),
public_key_bits: Some(pubkey.bits()),
fingerprint: Some(info.fingerprint()?),
})
}
} }
fn get_certificate_pem() -> Result<String, Error> { fn get_certificate_pem() -> Result<String, Error> {
@ -162,12 +130,31 @@ fn pem_to_cert_info(pem: &[u8]) -> Result<cert::CertInfo, Error> {
/// Get certificate info. /// Get certificate info.
pub fn get_info() -> Result<Vec<CertificateInfo>, Error> { pub fn get_info() -> Result<Vec<CertificateInfo>, Error> {
let cert_pem = get_certificate_pem()?; let cert_pem = get_certificate_pem()?;
let cert = pem_to_cert_info(cert_pem.as_bytes())?; let info = pem_to_cert_info(cert_pem.as_bytes())?;
let pubkey = info.public_key()?;
Ok(vec![CertificateInfo { Ok(vec![CertificateInfo {
filename: Some("proxy.pem".to_string()), // we only have the one filename: "proxy.pem".to_string(), // we only have the one
pem: Some(cert_pem), pem: Some(cert_pem),
..CertificateInfo::try_from(&cert)? subject: info.subject_name()?,
san: info
.subject_alt_names()
.map(|san| {
san.into_iter()
// FIXME: Support `.ipaddress()`?
.filter_map(|name| name.dnsname().map(str::to_owned))
.collect()
})
.unwrap_or_default(),
issuer: info.issuer_name()?,
notbefore: info.not_before_unix().ok(),
notafter: info.not_after_unix().ok(),
public_key_type: openssl::nid::Nid::from_raw(pubkey.id().as_raw())
.long_name()
.unwrap_or("<unsupported key type>")
.to_owned(),
public_key_bits: Some(pubkey.bits()),
fingerprint: Some(info.fingerprint()?),
}]) }])
} }
@ -176,7 +163,10 @@ pub fn get_info() -> Result<Vec<CertificateInfo>, Error> {
properties: { properties: {
node: { schema: NODE_SCHEMA }, node: { schema: NODE_SCHEMA },
certificates: { description: "PEM encoded certificate (chain)." }, certificates: { description: "PEM encoded certificate (chain)." },
key: { description: "PEM encoded private key." }, key: {
description: "PEM encoded private key.",
optional: true,
},
// FIXME: widget-toolkit should have an option to disable using these 2 parameters... // FIXME: widget-toolkit should have an option to disable using these 2 parameters...
restart: { restart: {
description: "UI compatibility parameter, ignored", description: "UI compatibility parameter, ignored",
@ -205,10 +195,16 @@ pub fn get_info() -> Result<Vec<CertificateInfo>, Error> {
/// Upload a custom certificate. /// Upload a custom certificate.
pub async fn upload_custom_certificate( pub async fn upload_custom_certificate(
certificates: String, certificates: String,
key: String, key: Option<String>,
) -> Result<Vec<CertificateInfo>, Error> { ) -> Result<Vec<CertificateInfo>, Error> {
let certificates = X509::stack_from_pem(certificates.as_bytes()) let certificates = X509::stack_from_pem(certificates.as_bytes())
.map_err(|err| format_err!("failed to decode certificate chain: {}", err))?; .map_err(|err| format_err!("failed to decode certificate chain: {}", err))?;
let key = match key {
Some(key) => key,
None => proxmox_sys::fs::file_read_string(configdir!("/proxy.key"))?,
};
let key = PKey::private_key_from_pem(key.as_bytes()) let key = PKey::private_key_from_pem(key.as_bytes())
.map_err(|err| format_err!("failed to parse private key: {}", err))?; .map_err(|err| format_err!("failed to parse private key: {}", err))?;

View File

@ -68,9 +68,9 @@ pub async fn datastore_status(
let mut entry = DataStoreStatusListItem { let mut entry = DataStoreStatusListItem {
store: store.clone(), store: store.clone(),
total: status.total as i64, total: Some(status.total),
used: status.used as i64, used: Some(status.used),
avail: status.available as i64, avail: Some(status.available),
history: None, history: None,
history_start: None, history_start: None,
history_delta: None, history_delta: None,

View File

@ -9,13 +9,13 @@ use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{ use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, GroupFilter, MediaPoolConfig, Operation, print_ns_and_snapshot, print_store_and_ns, Authid, MediaPoolConfig, Operation,
TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA,
PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupInfo};
use pbs_datastore::{DataStore, StoreProgress}; use pbs_datastore::{DataStore, StoreProgress};
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
@ -411,31 +411,24 @@ fn backup_worker(
group_list.sort_unstable_by(|a, b| a.group().cmp(b.group())); group_list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
let (group_list, group_count) = if let Some(group_filters) = &setup.group_filter { let group_count_full = group_list.len();
let filter_fn = |group: &BackupGroup, group_filters: &[GroupFilter]| {
group_filters.iter().any(|filter| group.matches(filter))
};
let group_count_full = group_list.len(); let group_list = match &setup.group_filter {
let list: Vec<BackupGroup> = group_list Some(f) => group_list
.into_iter() .into_iter()
.filter(|group| filter_fn(group, group_filters)) .filter(|group| group.group().apply_filters(f))
.collect(); .collect(),
let group_count = list.len(); None => group_list,
task_log!(
worker,
"found {} groups (out of {} total)",
group_count,
group_count_full
);
(list, group_count)
} else {
let group_count = group_list.len();
task_log!(worker, "found {} groups", group_count);
(group_list, group_count)
}; };
let mut progress = StoreProgress::new(group_count as u64); task_log!(
worker,
"found {} groups (out of {} total)",
group_list.len(),
group_count_full
);
let mut progress = StoreProgress::new(group_list.len() as u64);
let latest_only = setup.latest_only.unwrap_or(false); let latest_only = setup.latest_only.unwrap_or(false);

View File

@ -16,7 +16,7 @@ use proxmox_uuid::Uuid;
use pbs_api_types::{ use pbs_api_types::{
Authid, DriveListEntry, LabelUuidMap, Lp17VolumeStatistics, LtoDriveAndMediaStatus, Authid, DriveListEntry, LabelUuidMap, Lp17VolumeStatistics, LtoDriveAndMediaStatus,
LtoTapeDrive, MamAttribute, MediaIdFlat, CHANGER_NAME_SCHEMA, DRIVE_NAME_SCHEMA, LtoTapeDrive, MamAttribute, MediaIdFlat, TapeDensity, CHANGER_NAME_SCHEMA, DRIVE_NAME_SCHEMA,
MEDIA_LABEL_SCHEMA, MEDIA_POOL_NAME_SCHEMA, UPID_SCHEMA, MEDIA_LABEL_SCHEMA, MEDIA_POOL_NAME_SCHEMA, UPID_SCHEMA,
}; };
@ -36,8 +36,7 @@ use crate::{
changer::update_changer_online_status, changer::update_changer_online_status,
drive::{ drive::{
get_tape_device_state, lock_tape_device, media_changer, open_drive, get_tape_device_state, lock_tape_device, media_changer, open_drive,
open_lto_tape_drive, required_media_changer, set_tape_device_state, LtoTapeHandle, required_media_changer, set_tape_device_state, LtoTapeHandle, TapeDriver,
TapeDriver,
}, },
encryption_keys::insert_key, encryption_keys::insert_key,
file_formats::{MediaLabel, MediaSetLabel}, file_formats::{MediaLabel, MediaSetLabel},
@ -309,6 +308,21 @@ pub fn format_media(
let mut handle = open_drive(&config, &drive)?; let mut handle = open_drive(&config, &drive)?;
if !fast.unwrap_or(true) {
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
let file = open_lto_tape_device(&drive_config.path)?;
let mut handle = LtoTapeHandle::new(file)?;
if let Ok(status) = handle.get_drive_and_media_status() {
if status.density >= TapeDensity::LTO9 {
task_log!(worker, "Slow formatting LTO9+ media.");
task_log!(
worker,
"This can take a very long time due to media optimization."
);
}
}
}
match handle.read_label() { match handle.read_label() {
Err(err) => { Err(err) => {
if let Some(label) = label_text { if let Some(label) = label_text {
@ -524,6 +538,14 @@ fn write_media_label(
label: MediaLabel, label: MediaLabel,
pool: Option<String>, pool: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut inventory = Inventory::new(TAPE_STATUS_DIR);
inventory.reload()?;
if inventory
.find_media_by_label_text(&label.label_text)?
.is_some()
{
bail!("Media with label '{}' already exists", label.label_text);
}
drive.label_tape(&label)?; drive.label_tape(&label)?;
if let Some(ref pool) = pool { if let Some(ref pool) = pool {
task_log!( task_log!(
@ -547,8 +569,6 @@ fn write_media_label(
// Create the media catalog // Create the media catalog
MediaCatalog::overwrite(TAPE_STATUS_DIR, &media_id, false)?; MediaCatalog::overwrite(TAPE_STATUS_DIR, &media_id, false)?;
let mut inventory = Inventory::new(TAPE_STATUS_DIR);
inventory.store(media_id.clone(), false)?; inventory.store(media_id.clone(), false)?;
drive.rewind()?; drive.rewind()?;
@ -610,7 +630,7 @@ pub async fn restore_key(drive: String, password: String) -> Result<(), Error> {
run_drive_blocking_task(drive.clone(), "restore key".to_string(), move |config| { run_drive_blocking_task(drive.clone(), "restore key".to_string(), move |config| {
let mut drive = open_drive(&config, &drive)?; let mut drive = open_drive(&config, &drive)?;
let (_media_id, key_config) = drive.read_label()?; let (_media_id, key_config) = drive.read_label_without_loading_key()?;
if let Some(key_config) = key_config { if let Some(key_config) = key_config {
let password_fn = || Ok(password.as_bytes().to_vec()); let password_fn = || Ok(password.as_bytes().to_vec());
@ -657,9 +677,6 @@ pub async fn read_label(drive: String, inventorize: Option<bool>) -> Result<Medi
let label = if let Some(ref set) = media_id.media_set_label { let label = if let Some(ref set) = media_id.media_set_label {
let key = &set.encryption_key_fingerprint; let key = &set.encryption_key_fingerprint;
if let Err(err) = drive.set_encryption(key.clone().map(|fp| (fp, set.uuid.clone()))) {
eprintln!("unable to load encryption key: {}", err); // best-effort only
}
MediaIdFlat { MediaIdFlat {
ctime: media_id.label.ctime, ctime: media_id.label.ctime,
encryption_key_fingerprint: key.as_ref().map(|fp| fp.signature()), encryption_key_fingerprint: key.as_ref().map(|fp| fp.signature()),
@ -813,17 +830,27 @@ pub async fn inventory(drive: String) -> Result<Vec<LabelUuidMap>, Error> {
let label_text = label_text.to_string(); let label_text = label_text.to_string();
if let Some(media_id) = inventory.find_media_by_label_text(&label_text) { match inventory.find_media_by_label_text(&label_text) {
list.push(LabelUuidMap { Ok(Some(media_id)) => {
label_text, list.push(LabelUuidMap {
uuid: Some(media_id.label.uuid.clone()), label_text,
}); uuid: Some(media_id.label.uuid.clone()),
} else { });
list.push(LabelUuidMap { }
label_text, Ok(None) => {
uuid: None, list.push(LabelUuidMap {
}); label_text,
} uuid: None,
});
}
Err(err) => {
log::warn!("error getting unique media label: {err}");
list.push(LabelUuidMap {
label_text,
uuid: None,
});
}
};
} }
Ok(list) Ok(list)
@ -901,11 +928,21 @@ pub fn update_inventory(
let label_text = label_text.to_string(); let label_text = label_text.to_string();
if !read_all_labels { if !read_all_labels {
if let Some(media_id) = inventory.find_media_by_label_text(&label_text) { match inventory.find_media_by_label_text(&label_text) {
if !catalog || MediaCatalog::exists(TAPE_STATUS_DIR, &media_id.label.uuid) { Ok(Some(media_id)) => {
task_log!(worker, "media '{}' already inventoried", label_text); if !catalog
|| MediaCatalog::exists(TAPE_STATUS_DIR, &media_id.label.uuid)
{
task_log!(worker, "media '{}' already inventoried", label_text);
continue;
}
}
Err(err) => {
task_warn!(worker, "error getting media by unique label: {err}");
// we can't be sure which uuid it is
continue; continue;
} }
Ok(None) => {} // ok to inventorize
} }
} }
@ -1064,13 +1101,20 @@ fn barcode_label_media_worker(
} }
inventory.reload()?; inventory.reload()?;
if inventory.find_media_by_label_text(&label_text).is_some() { match inventory.find_media_by_label_text(&label_text) {
task_log!( Ok(Some(_)) => {
worker, task_log!(
"media '{}' already inventoried (already labeled)", worker,
label_text "media '{}' already inventoried (already labeled)",
); label_text
continue; );
continue;
}
Err(err) => {
task_warn!(worker, "error getting media by unique label: {err}",);
continue;
}
Ok(None) => {} // ok to label
} }
task_log!(worker, "checking/loading media '{}'", label_text); task_log!(worker, "checking/loading media '{}'", label_text);
@ -1144,7 +1188,7 @@ pub async fn cartridge_memory(drive: String) -> Result<Vec<MamAttribute>, Error>
"reading cartridge memory".to_string(), "reading cartridge memory".to_string(),
move |config| { move |config| {
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?; let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
let mut handle = open_lto_tape_drive(&drive_config)?; let mut handle = LtoTapeHandle::open_lto_drive(&drive_config)?;
handle.cartridge_memory() handle.cartridge_memory()
}, },
@ -1174,7 +1218,7 @@ pub async fn volume_statistics(drive: String) -> Result<Lp17VolumeStatistics, Er
"reading volume statistics".to_string(), "reading volume statistics".to_string(),
move |config| { move |config| {
let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?; let drive_config: LtoTapeDrive = config.lookup("lto", &drive)?;
let mut handle = open_lto_tape_drive(&drive_config)?; let mut handle = LtoTapeHandle::open_lto_drive(&drive_config)?;
handle.volume_statistics() handle.volume_statistics()
}, },
@ -1311,12 +1355,6 @@ pub fn catalog_media(
inventory.store(media_id.clone(), false)?; inventory.store(media_id.clone(), false)?;
return Ok(()); return Ok(());
} }
let encrypt_fingerprint = set
.encryption_key_fingerprint
.clone()
.map(|fp| (fp, set.uuid.clone()));
drive.set_encryption(encrypt_fingerprint)?;
let _pool_lock = lock_media_pool(TAPE_STATUS_DIR, &set.pool)?; let _pool_lock = lock_media_pool(TAPE_STATUS_DIR, &set.pool)?;
let media_set_lock = lock_media_set(TAPE_STATUS_DIR, &set.uuid, None)?; let media_set_lock = lock_media_set(TAPE_STATUS_DIR, &set.uuid, None)?;

View File

@ -3,7 +3,7 @@ use std::collections::HashSet;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap}; use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
use proxmox_schema::api; use proxmox_schema::{api, param_bail};
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use pbs_api_types::{ use pbs_api_types::{
@ -290,6 +290,11 @@ pub async fn list_media(
properties: { properties: {
"label-text": { "label-text": {
schema: MEDIA_LABEL_SCHEMA, schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
uuid: {
schema: MEDIA_UUID_SCHEMA,
optional: true,
}, },
"vault-name": { "vault-name": {
schema: VAULT_NAME_SCHEMA, schema: VAULT_NAME_SCHEMA,
@ -299,15 +304,33 @@ pub async fn list_media(
}, },
)] )]
/// Change Tape location to vault (if given), or offline. /// Change Tape location to vault (if given), or offline.
pub fn move_tape(label_text: String, vault_name: Option<String>) -> Result<(), Error> { pub fn move_tape(
label_text: Option<String>,
uuid: Option<Uuid>,
vault_name: Option<String>,
) -> Result<(), Error> {
let mut inventory = Inventory::load(TAPE_STATUS_DIR)?; let mut inventory = Inventory::load(TAPE_STATUS_DIR)?;
let uuid = inventory let uuid = match (uuid, label_text) {
.find_media_by_label_text(&label_text) (Some(_), Some(_)) => {
.ok_or_else(|| format_err!("no such media '{}'", label_text))? param_bail!(
.label "format-text",
.uuid format_err!("A uuid is given, no label-text is expected.")
.clone(); );
}
(None, None) => {
param_bail!(
"uuid",
format_err!("No label-text is given, a uuid is required.")
);
}
(Some(uuid), None) => uuid,
(None, Some(label_text)) => match inventory.find_media_by_label_text(&label_text) {
Ok(Some(media_id)) => media_id.label.uuid.clone(),
Ok(None) => bail!("no such media '{}'", label_text),
Err(err) => bail!("error getting media from unique label: {err}"),
},
};
if let Some(vault_name) = vault_name { if let Some(vault_name) = vault_name {
inventory.set_media_location_vault(&uuid, &vault_name)?; inventory.set_media_location_vault(&uuid, &vault_name)?;
@ -323,6 +346,11 @@ pub fn move_tape(label_text: String, vault_name: Option<String>) -> Result<(), E
properties: { properties: {
"label-text": { "label-text": {
schema: MEDIA_LABEL_SCHEMA, schema: MEDIA_LABEL_SCHEMA,
optional: true,
},
uuid: {
schema: MEDIA_UUID_SCHEMA,
optional: true,
}, },
force: { force: {
description: "Force removal (even if media is used in a media set).", description: "Force removal (even if media is used in a media set).",
@ -333,22 +361,46 @@ pub fn move_tape(label_text: String, vault_name: Option<String>) -> Result<(), E
}, },
)] )]
/// Destroy media (completely remove from database) /// Destroy media (completely remove from database)
pub fn destroy_media(label_text: String, force: Option<bool>) -> Result<(), Error> { pub fn destroy_media(
label_text: Option<String>,
uuid: Option<Uuid>,
force: Option<bool>,
) -> Result<(), Error> {
let force = force.unwrap_or(false); let force = force.unwrap_or(false);
let mut inventory = Inventory::load(TAPE_STATUS_DIR)?; let mut inventory = Inventory::load(TAPE_STATUS_DIR)?;
let media_id = inventory let (media_id, text) = match (uuid, label_text) {
.find_media_by_label_text(&label_text) (Some(_), Some(_)) => {
.ok_or_else(|| format_err!("no such media '{}'", label_text))?; param_bail!(
"format-text",
format_err!("A uuid is given, no label-text is expected.")
);
}
(None, None) => {
param_bail!(
"uuid",
format_err!("No label-text is given, a uuid is required.")
);
}
(Some(uuid), None) => (
inventory
.lookup_media(&uuid)
.ok_or_else(|| format_err!("no such media '{}'", uuid))?,
uuid.to_string(),
),
(None, Some(label_text)) => (
inventory
.find_media_by_label_text(&label_text)?
.ok_or_else(|| format_err!("no such media '{}'", label_text))?,
label_text,
),
};
if !force { if !force {
if let Some(ref set) = media_id.media_set_label { if let Some(ref set) = media_id.media_set_label {
if !set.unassigned() { if !set.unassigned() {
bail!( bail!("media '{text}' contains data (please use 'force' flag to remove.");
"media '{}' contains data (please use 'force' flag to remove.",
label_text
);
} }
} }
} }

View File

@ -1029,12 +1029,6 @@ fn restore_snapshots_to_tmpdir(
media_set_uuid media_set_uuid
); );
} }
let encrypt_fingerprint = set.encryption_key_fingerprint.clone().map(|fp| {
task_log!(worker, "Encryption key fingerprint: {}", fp);
(fp, set.uuid.clone())
});
drive.set_encryption(encrypt_fingerprint)?;
} }
} }
@ -1279,12 +1273,6 @@ pub fn request_and_restore_media(
media_set_uuid media_set_uuid
); );
} }
let encrypt_fingerprint = set
.encryption_key_fingerprint
.clone()
.map(|fp| (fp, set.uuid.clone()));
drive.set_encryption(encrypt_fingerprint)?;
} }
} }

View File

@ -103,8 +103,8 @@ async fn register_account(
contact: String, contact: String,
directory: Option<String>, directory: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let directory = match directory { let (directory_url, custom_directory) = match directory {
Some(directory) => directory, Some(directory) => (directory, true),
None => { None => {
println!("Directory endpoints:"); println!("Directory endpoints:");
for (i, dir) in KNOWN_ACME_DIRECTORIES.iter().enumerate() { for (i, dir) in KNOWN_ACME_DIRECTORIES.iter().enumerate() {
@ -122,12 +122,12 @@ async fn register_account(
match input.trim().parse::<usize>() { match input.trim().parse::<usize>() {
Ok(n) if n < KNOWN_ACME_DIRECTORIES.len() => { Ok(n) if n < KNOWN_ACME_DIRECTORIES.len() => {
break KNOWN_ACME_DIRECTORIES[n].url.to_owned(); break (KNOWN_ACME_DIRECTORIES[n].url.to_owned(), false);
} }
Ok(n) if n == KNOWN_ACME_DIRECTORIES.len() => { Ok(n) if n == KNOWN_ACME_DIRECTORIES.len() => {
input.clear(); input.clear();
std::io::stdin().read_line(&mut input)?; std::io::stdin().read_line(&mut input)?;
break input.trim().to_owned(); break (input.trim().to_owned(), true);
} }
_ => eprintln!("Invalid selection."), _ => eprintln!("Invalid selection."),
} }
@ -140,9 +140,13 @@ async fn register_account(
} }
}; };
println!("Attempting to fetch Terms of Service from {:?}", directory); println!(
let mut client = AcmeClient::new(directory.clone()); "Attempting to fetch Terms of Service from {:?}",
let tos_agreed = if let Some(tos_url) = client.terms_of_service_url().await? { directory_url
);
let mut client = AcmeClient::new(directory_url.clone());
let directory = client.directory().await?;
let tos_agreed = if let Some(tos_url) = directory.terms_of_service_url() {
println!("Terms of Service: {}", tos_url); println!("Terms of Service: {}", tos_url);
print!("Do you agree to the above terms? [y|N]: "); print!("Do you agree to the above terms? [y|N]: ");
std::io::stdout().flush()?; std::io::stdout().flush()?;
@ -154,11 +158,46 @@ async fn register_account(
true true
}; };
println!("Attempting to register account with {:?}...", directory); let mut eab_enabled = directory.external_account_binding_required();
if !eab_enabled && custom_directory {
print!("Do you want to use external account binding? [y|N]: ");
std::io::stdout().flush()?;
let mut input = String::new();
std::io::stdin().read_line(&mut input)?;
eab_enabled = input.trim().eq_ignore_ascii_case("y");
} else if eab_enabled {
println!("The CA requires external account binding.");
}
let account = let eab_creds = if eab_enabled {
api2::config::acme::do_register_account(&mut client, &name, tos_agreed, contact, None) println!("You should have received a key id and a key from your CA.");
.await?;
print!("Enter EAB key id: ");
std::io::stdout().flush()?;
let mut eab_kid = String::new();
std::io::stdin().read_line(&mut eab_kid)?;
print!("Enter EAB key: ");
std::io::stdout().flush()?;
let mut eab_hmac_key = String::new();
std::io::stdin().read_line(&mut eab_hmac_key)?;
Some((eab_kid.trim().to_owned(), eab_hmac_key.trim().to_owned()))
} else {
None
};
println!("Attempting to register account with {:?}...", directory_url);
let account = api2::config::acme::do_register_account(
&mut client,
&name,
tos_agreed,
contact,
None,
eab_creds,
)
.await?;
println!("Registration successful, account URL: {}", account.location); println!("Registration successful, account URL: {}", account.location);

View File

@ -161,6 +161,7 @@ fn get_config(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error
let options = default_table_format_options() let options = default_table_format_options()
.column(ColumnConfig::new("name")) .column(ColumnConfig::new("name"))
.column(ColumnConfig::new("path")) .column(ColumnConfig::new("path"))
.column(ColumnConfig::new("eject-before-unload"))
.column(ColumnConfig::new("export-slots")); .column(ColumnConfig::new("export-slots"));
format_and_print_result_full(&mut data, &info.returns, &output_format, &options); format_and_print_result_full(&mut data, &info.returns, &output_format, &options);

View File

@ -6,6 +6,8 @@ use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd}; use std::os::unix::io::{AsRawFd, FromRawFd};
use anyhow::{bail, Error}; use anyhow::{bail, Error};
use pbs_tape::sg_tape::SgTape;
use proxmox_backup::tape::encryption_keys::load_key;
use serde_json::Value; use serde_json::Value;
use proxmox_router::{cli::*, RpcEnvironment}; use proxmox_router::{cli::*, RpcEnvironment};
@ -19,28 +21,26 @@ use pbs_api_types::{
use pbs_tape::linux_list_drives::{check_tape_is_lto_tape_device, open_lto_tape_device}; use pbs_tape::linux_list_drives::{check_tape_is_lto_tape_device, open_lto_tape_device};
use proxmox_backup::tape::drive::{open_lto_tape_drive, LtoTapeHandle, TapeDriver}; fn get_tape_handle(param: &Value) -> Result<SgTape, Error> {
fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
let handle = if let Some(name) = param["drive"].as_str() { let handle = if let Some(name) = param["drive"].as_str() {
let (config, _digest) = pbs_config::drive::config()?; let (config, _digest) = pbs_config::drive::config()?;
let drive: LtoTapeDrive = config.lookup("lto", name)?; let drive: LtoTapeDrive = config.lookup("lto", name)?;
log::info!("using device {}", drive.path); log::info!("using device {}", drive.path);
open_lto_tape_drive(&drive)? SgTape::open_lto_drive(&drive)?
} else if let Some(device) = param["device"].as_str() { } else if let Some(device) = param["device"].as_str() {
log::info!("using device {}", device); log::info!("using device {}", device);
LtoTapeHandle::new(open_lto_tape_device(device)?)? SgTape::new(open_lto_tape_device(device)?)?
} else if let Some(true) = param["stdin"].as_bool() { } else if let Some(true) = param["stdin"].as_bool() {
log::info!("using stdin"); log::info!("using stdin");
let fd = std::io::stdin().as_raw_fd(); let fd = std::io::stdin().as_raw_fd();
let file = unsafe { File::from_raw_fd(fd) }; let file = unsafe { File::from_raw_fd(fd) };
check_tape_is_lto_tape_device(&file)?; check_tape_is_lto_tape_device(&file)?;
LtoTapeHandle::new(file)? SgTape::new(file)?
} else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") { } else if let Ok(name) = std::env::var("PROXMOX_TAPE_DRIVE") {
let (config, _digest) = pbs_config::drive::config()?; let (config, _digest) = pbs_config::drive::config()?;
let drive: LtoTapeDrive = config.lookup("lto", &name)?; let drive: LtoTapeDrive = config.lookup("lto", &name)?;
log::info!("using device {}", drive.path); log::info!("using device {}", drive.path);
open_lto_tape_drive(&drive)? SgTape::open_lto_drive(&drive)?
} else { } else {
let (config, _digest) = pbs_config::drive::config()?; let (config, _digest) = pbs_config::drive::config()?;
@ -56,7 +56,7 @@ fn get_tape_handle(param: &Value) -> Result<LtoTapeHandle, Error> {
let name = drive_names[0]; let name = drive_names[0];
let drive: LtoTapeDrive = config.lookup("lto", name)?; let drive: LtoTapeDrive = config.lookup("lto", name)?;
log::info!("using device {}", drive.path); log::info!("using device {}", drive.path);
open_lto_tape_drive(&drive)? SgTape::open_lto_drive(&drive)?
} else { } else {
bail!("no drive/device specified"); bail!("no drive/device specified");
} }
@ -103,7 +103,8 @@ fn set_encryption(
match (fingerprint, uuid) { match (fingerprint, uuid) {
(Some(fingerprint), Some(uuid)) => { (Some(fingerprint), Some(uuid)) => {
handle.set_encryption(Some((fingerprint, uuid)))?; let key = load_key(&fingerprint)?;
handle.set_encryption(Some((key, uuid)))?;
} }
(Some(_), None) => { (Some(_), None) => {
bail!("missing media set uuid"); bail!("missing media set uuid");

View File

@ -8,7 +8,7 @@ use nix::sys::stat::Mode;
use openssl::pkey::PKey; use openssl::pkey::PKey;
use openssl::rsa::Rsa; use openssl::rsa::Rsa;
use openssl::x509::X509Builder; use openssl::x509::X509Builder;
use std::path::PathBuf; use std::path::Path;
use proxmox_lang::try_block; use proxmox_lang::try_block;
@ -84,8 +84,8 @@ pub fn create_configdir() -> Result<(), Error> {
/// Update self signed node certificate. /// Update self signed node certificate.
pub fn update_self_signed_cert(force: bool) -> Result<(), Error> { pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
let key_path = PathBuf::from(configdir!("/proxy.key")); let key_path = Path::new(configdir!("/proxy.key"));
let cert_path = PathBuf::from(configdir!("/proxy.pem")); let cert_path = Path::new(configdir!("/proxy.pem"));
if key_path.exists() && cert_path.exists() && !force { if key_path.exists() && cert_path.exists() && !force {
return Ok(()); return Ok(());
@ -183,8 +183,8 @@ pub fn update_self_signed_cert(force: bool) -> Result<(), Error> {
} }
pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(), Error> { pub(crate) fn set_proxy_certificate(cert_pem: &[u8], key_pem: &[u8]) -> Result<(), Error> {
let key_path = PathBuf::from(configdir!("/proxy.key")); let key_path = Path::new(configdir!("/proxy.key"));
let cert_path = PathBuf::from(configdir!("/proxy.pem")); let cert_path = Path::new(configdir!("/proxy.pem"));
create_configdir()?; create_configdir()?;
pbs_config::replace_backup_config(key_path, key_pem) pbs_config::replace_backup_config(key_path, key_pem)

View File

@ -9,8 +9,8 @@ use std::path::Path;
use anyhow::{format_err, Error}; use anyhow::{format_err, Error};
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
use proxmox_rrd::rrd::{CF, DST, RRD}; use proxmox_rrd::rrd::{AggregationFn, DataSourceType, Database};
use proxmox_rrd::RRDCache; use proxmox_rrd::Cache;
use proxmox_sys::fs::CreateOptions; use proxmox_sys::fs::CreateOptions;
use pbs_api_types::{RRDMode, RRDTimeFrame}; use pbs_api_types::{RRDMode, RRDTimeFrame};
@ -18,10 +18,10 @@ use pbs_buildcfg::PROXMOX_BACKUP_STATE_DIR_M;
const RRD_CACHE_BASEDIR: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/rrdb"); const RRD_CACHE_BASEDIR: &str = concat!(PROXMOX_BACKUP_STATE_DIR_M!(), "/rrdb");
static RRD_CACHE: OnceCell<RRDCache> = OnceCell::new(); static RRD_CACHE: OnceCell<Cache> = OnceCell::new();
/// Get the RRD cache instance /// Get the RRD cache instance
pub fn get_rrd_cache() -> Result<&'static RRDCache, Error> { pub fn get_rrd_cache() -> Result<&'static Cache, Error> {
RRD_CACHE RRD_CACHE
.get() .get()
.ok_or_else(|| format_err!("RRD cache not initialized!")) .ok_or_else(|| format_err!("RRD cache not initialized!"))
@ -30,7 +30,7 @@ pub fn get_rrd_cache() -> Result<&'static RRDCache, Error> {
/// Initialize the RRD cache instance /// Initialize the RRD cache instance
/// ///
/// Note: Only a single process must do this (proxmox-backup-proxy) /// Note: Only a single process must do this (proxmox-backup-proxy)
pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> { pub fn initialize_rrd_cache() -> Result<&'static Cache, Error> {
let backup_user = pbs_config::backup_user()?; let backup_user = pbs_config::backup_user()?;
let file_options = CreateOptions::new() let file_options = CreateOptions::new()
@ -43,7 +43,7 @@ pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> {
let apply_interval = 30.0 * 60.0; // 30 minutes let apply_interval = 30.0 * 60.0; // 30 minutes
let cache = RRDCache::new( let cache = Cache::new(
RRD_CACHE_BASEDIR, RRD_CACHE_BASEDIR,
Some(file_options), Some(file_options),
Some(dir_options), Some(dir_options),
@ -58,8 +58,8 @@ pub fn initialize_rrd_cache() -> Result<&'static RRDCache, Error> {
Ok(RRD_CACHE.get().unwrap()) Ok(RRD_CACHE.get().unwrap())
} }
fn load_callback(path: &Path, _rel_path: &str, dst: DST) -> RRD { fn load_callback(path: &Path, _rel_path: &str, dst: DataSourceType) -> Database {
match RRD::load(path, true) { match Database::load(path, true) {
Ok(rrd) => rrd, Ok(rrd) => rrd,
Err(err) => { Err(err) => {
if err.kind() != std::io::ErrorKind::NotFound { if err.kind() != std::io::ErrorKind::NotFound {
@ -69,7 +69,7 @@ fn load_callback(path: &Path, _rel_path: &str, dst: DST) -> RRD {
err err
); );
} }
RRDCache::create_proxmox_backup_default_rrd(dst) Cache::create_proxmox_backup_default_rrd(dst)
} }
} }
} }
@ -93,8 +93,8 @@ pub fn extract_rrd_data(
}; };
let cf = match mode { let cf = match mode {
RRDMode::Max => CF::Maximum, RRDMode::Max => AggregationFn::Maximum,
RRDMode::Average => CF::Average, RRDMode::Average => AggregationFn::Average,
}; };
let rrd_cache = get_rrd_cache()?; let rrd_cache = get_rrd_cache()?;
@ -114,7 +114,7 @@ pub fn rrd_sync_journal() {
pub fn rrd_update_gauge(name: &str, value: f64) { pub fn rrd_update_gauge(name: &str, value: f64) {
if let Ok(rrd_cache) = get_rrd_cache() { if let Ok(rrd_cache) = get_rrd_cache() {
let now = proxmox_time::epoch_f64(); let now = proxmox_time::epoch_f64();
if let Err(err) = rrd_cache.update_value(name, now, value, DST::Gauge) { if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Gauge) {
log::error!("rrd::update_value '{}' failed - {}", name, err); log::error!("rrd::update_value '{}' failed - {}", name, err);
} }
} }
@ -124,7 +124,7 @@ pub fn rrd_update_gauge(name: &str, value: f64) {
pub fn rrd_update_derive(name: &str, value: f64) { pub fn rrd_update_derive(name: &str, value: f64) {
if let Ok(rrd_cache) = get_rrd_cache() { if let Ok(rrd_cache) = get_rrd_cache() {
let now = proxmox_time::epoch_f64(); let now = proxmox_time::epoch_f64();
if let Err(err) = rrd_cache.update_value(name, now, value, DST::Derive) { if let Err(err) = rrd_cache.update_value(name, now, value, DataSourceType::Derive) {
log::error!("rrd::update_value '{}' failed - {}", name, err); log::error!("rrd::update_value '{}' failed - {}", name, err);
} }
} }

View File

@ -486,7 +486,7 @@ pub(crate) struct PullParameters {
/// How many levels of sub-namespaces to pull (0 == no recursion, None == maximum recursion) /// How many levels of sub-namespaces to pull (0 == no recursion, None == maximum recursion)
max_depth: Option<usize>, max_depth: Option<usize>,
/// Filters for reducing the pull scope /// Filters for reducing the pull scope
group_filter: Option<Vec<GroupFilter>>, group_filter: Vec<GroupFilter>,
/// How many snapshots should be transferred at most (taking the newest N snapshots) /// How many snapshots should be transferred at most (taking the newest N snapshots)
transfer_last: Option<usize>, transfer_last: Option<usize>,
} }
@ -539,6 +539,8 @@ impl PullParameters {
ns, ns,
}; };
let group_filter = group_filter.unwrap_or_default();
Ok(Self { Ok(Self {
source, source,
target, target,
@ -1358,7 +1360,6 @@ pub(crate) async fn pull_ns(
) -> Result<(StoreProgress, bool), Error> { ) -> Result<(StoreProgress, bool), Error> {
let mut list: Vec<BackupGroup> = params.source.list_groups(namespace, &params.owner).await?; let mut list: Vec<BackupGroup> = params.source.list_groups(namespace, &params.owner).await?;
let total_count = list.len();
list.sort_unstable_by(|a, b| { list.sort_unstable_by(|a, b| {
let type_order = a.ty.cmp(&b.ty); let type_order = a.ty.cmp(&b.ty);
if type_order == std::cmp::Ordering::Equal { if type_order == std::cmp::Ordering::Equal {
@ -1368,27 +1369,17 @@ pub(crate) async fn pull_ns(
} }
}); });
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool { let unfiltered_count = list.len();
filters.iter().any(|filter| group.matches(filter)) let list: Vec<BackupGroup> = list
}; .into_iter()
.filter(|group| group.apply_filters(&params.group_filter))
let list = if let Some(ref group_filter) = &params.group_filter { .collect();
let unfiltered_count = list.len(); task_log!(
let list: Vec<BackupGroup> = list worker,
.into_iter() "found {} groups to sync (out of {} total)",
.filter(|group| apply_filters(group, group_filter)) list.len(),
.collect(); unfiltered_count
task_log!( );
worker,
"found {} groups to sync (out of {} total)",
list.len(),
unfiltered_count
);
list
} else {
task_log!(worker, "found {} groups to sync", total_count);
list
};
let mut errors = false; let mut errors = false;
@ -1457,10 +1448,8 @@ pub(crate) async fn pull_ns(
if check_backup_owner(&owner, &params.owner).is_err() { if check_backup_owner(&owner, &params.owner).is_err() {
continue; continue;
} }
if let Some(ref group_filter) = &params.group_filter { if !local_group.apply_filters(&params.group_filter) {
if !apply_filters(local_group, group_filter) { continue;
continue;
}
} }
task_log!(worker, "delete vanished group '{local_group}'",); task_log!(worker, "delete vanished group '{local_group}'",);
match params match params

View File

@ -42,6 +42,7 @@ fn files() -> Vec<(&'static str, Vec<&'static str>)> {
"Jobs", "Jobs",
vec![ vec![
"/etc/proxmox-backup/sync.cfg", "/etc/proxmox-backup/sync.cfg",
"/etc/proxmox-backup/prune.cfg",
"/etc/proxmox-backup/verification.cfg", "/etc/proxmox-backup/verification.cfg",
], ],
), ),
@ -103,7 +104,7 @@ fn function_calls() -> Vec<FunctionMapping> {
for store in config.sections.keys() { for store in config.sections.keys() {
list.push(store.as_str()); list.push(store.as_str());
} }
list.join(", ") format!("```\n{}\n```", list.join(", "))
}), }),
("System Load & Uptime", get_top_processes), ("System Load & Uptime", get_top_processes),
] ]
@ -212,7 +213,7 @@ pub fn generate_report() -> String {
.iter() .iter()
.map(|(desc, function)| { .map(|(desc, function)| {
let output = function(); let output = function();
format!("#### {desc}\n```\n{}\n```", output.trim_end()) format!("#### {desc}\n{}\n", output.trim_end())
}) })
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join("\n\n"); .join("\n\n");

View File

@ -13,7 +13,9 @@ use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger}; use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger};
use pbs_tape::{sg_pt_changer, ElementStatus, MtxStatus}; use pbs_tape::{linux_list_drives::open_lto_tape_device, sg_pt_changer, ElementStatus, MtxStatus};
use crate::tape::drive::{LtoTapeHandle, TapeDriver};
/// Interface to SCSI changer devices /// Interface to SCSI changer devices
pub trait ScsiMediaChange { pub trait ScsiMediaChange {
@ -384,8 +386,7 @@ fn load_changer_state_cache(changer: &str) -> Result<Option<MtxStatus>, Error> {
/// Implements MediaChange using 'mtx' linux cli tool /// Implements MediaChange using 'mtx' linux cli tool
pub struct MtxMediaChanger { pub struct MtxMediaChanger {
drive_name: String, // used for error messages drive: LtoTapeDrive,
drive_number: u64,
config: ScsiTapeChanger, config: ScsiTapeChanger,
} }
@ -398,8 +399,7 @@ impl MtxMediaChanger {
}; };
Ok(Self { Ok(Self {
drive_name: drive_config.name.clone(), drive: drive_config.clone(),
drive_number: drive_config.changer_drivenum.unwrap_or(0),
config: changer_config, config: changer_config,
}) })
} }
@ -407,11 +407,11 @@ impl MtxMediaChanger {
impl MediaChange for MtxMediaChanger { impl MediaChange for MtxMediaChanger {
fn drive_number(&self) -> u64 { fn drive_number(&self) -> u64 {
self.drive_number self.drive.changer_drivenum.unwrap_or(0)
} }
fn drive_name(&self) -> &str { fn drive_name(&self) -> &str {
&self.drive_name &self.drive.name
} }
fn status(&mut self) -> Result<MtxStatus, Error> { fn status(&mut self) -> Result<MtxStatus, Error> {
@ -423,12 +423,21 @@ impl MediaChange for MtxMediaChanger {
} }
fn load_media_from_slot(&mut self, slot: u64) -> Result<MtxStatus, Error> { fn load_media_from_slot(&mut self, slot: u64) -> Result<MtxStatus, Error> {
self.config.load_slot(slot, self.drive_number) self.config.load_slot(slot, self.drive_number())
} }
fn unload_media(&mut self, target_slot: Option<u64>) -> Result<MtxStatus, Error> { fn unload_media(&mut self, target_slot: Option<u64>) -> Result<MtxStatus, Error> {
if self.config.eject_before_unload.unwrap_or(false) {
let file = open_lto_tape_device(&self.drive.path)?;
let mut handle = LtoTapeHandle::new(file)?;
if handle.medium_present() {
handle.eject_media()?;
}
}
if let Some(target_slot) = target_slot { if let Some(target_slot) = target_slot {
self.config.unload(target_slot, self.drive_number) self.config.unload(target_slot, self.drive_number())
} else { } else {
let status = self.status()?; let status = self.status()?;
self.unload_to_free_slot(status) self.unload_to_free_slot(status)

View File

@ -87,6 +87,16 @@ impl OnlineStatusMap {
} }
} }
fn insert_into_online_set(inventory: &Inventory, label_text: &str, online_set: &mut HashSet<Uuid>) {
match inventory.find_media_by_label_text(&label_text) {
Ok(Some(media_id)) => {
online_set.insert(media_id.label.uuid.clone());
}
Ok(None) => {}
Err(err) => log::warn!("error getting media by unique label: {err}"),
}
}
/// Extract the list of online media from MtxStatus /// Extract the list of online media from MtxStatus
/// ///
/// Returns a HashSet containing all found media Uuid. This only /// Returns a HashSet containing all found media Uuid. This only
@ -96,9 +106,7 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha
for drive_status in status.drives.iter() { for drive_status in status.drives.iter() {
if let ElementStatus::VolumeTag(ref label_text) = drive_status.status { if let ElementStatus::VolumeTag(ref label_text) = drive_status.status {
if let Some(media_id) = inventory.find_media_by_label_text(label_text) { insert_into_online_set(inventory, label_text, &mut online_set);
online_set.insert(media_id.label.uuid.clone());
}
} }
} }
@ -107,9 +115,7 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha
continue; continue;
} }
if let ElementStatus::VolumeTag(ref label_text) = slot_info.status { if let ElementStatus::VolumeTag(ref label_text) = slot_info.status {
if let Some(media_id) = inventory.find_media_by_label_text(label_text) { insert_into_online_set(inventory, label_text, &mut online_set);
online_set.insert(media_id.label.uuid.clone());
}
} }
} }
@ -174,9 +180,7 @@ pub fn update_online_status<P: AsRef<Path>>(
let mut online_set = HashSet::new(); let mut online_set = HashSet::new();
for label_text in media_list { for label_text in media_list {
if let Some(media_id) = inventory.find_media_by_label_text(&label_text) { insert_into_online_set(&inventory, &label_text, &mut online_set);
online_set.insert(media_id.label.uuid.clone());
}
} }
map.update_online_status(&vtape.name, online_set)?; map.update_online_status(&vtape.name, online_set)?;
} }
@ -205,9 +209,7 @@ pub fn update_changer_online_status(
let mut online_map = OnlineStatusMap::new(drive_config)?; let mut online_map = OnlineStatusMap::new(drive_config)?;
let mut online_set = HashSet::new(); let mut online_set = HashSet::new();
for label_text in label_text_list.iter() { for label_text in label_text_list.iter() {
if let Some(media_id) = inventory.find_media_by_label_text(label_text) { insert_into_online_set(inventory, label_text, &mut online_set)
online_set.insert(media_id.label.uuid.clone());
}
} }
online_map.update_online_status(changer_name, online_set)?; online_map.update_online_status(changer_name, online_set)?;
inventory.update_online_status(&online_map)?; inventory.update_online_status(&online_map)?;

View File

@ -16,6 +16,7 @@ use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use pbs_tape::sg_tape::drive_get_encryption;
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use pbs_api_types::{ use pbs_api_types::{
@ -23,7 +24,6 @@ use pbs_api_types::{
}; };
use pbs_key_config::KeyConfig; use pbs_key_config::KeyConfig;
use pbs_tape::{ use pbs_tape::{
linux_list_drives::open_lto_tape_device,
sg_tape::{SgTape, TapeAlertFlags}, sg_tape::{SgTape, TapeAlertFlags},
BlockReadError, MediaContentHeader, TapeRead, TapeWrite, BlockReadError, MediaContentHeader, TapeRead, TapeWrite,
}; };
@ -34,75 +34,47 @@ use crate::tape::{
file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0}, file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0},
}; };
/// Open a tape device impl Drop for LtoTapeHandle {
/// fn drop(&mut self) {
/// This does additional checks: // always unload the encryption key when the handle is dropped for security
/// // but only log an error if we set one in the first place
/// - check if it is a non-rewinding tape device if let Err(err) = self.set_encryption(None) {
/// - check if drive is ready (tape loaded) if self.encryption_key_loaded {
/// - check block size log::error!("could not unload encryption key from drive: {err}");
/// - for autoloader only, try to reload ejected tapes
pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result<LtoTapeHandle, Error> {
proxmox_lang::try_block!({
let file = open_lto_tape_device(&config.path)?;
let mut handle = LtoTapeHandle::new(file)?;
if handle.sg_tape.test_unit_ready().is_err() {
// for autoloader only, try to reload ejected tapes
if config.changer.is_some() {
let _ = handle.sg_tape.load(); // just try, ignore error
} }
} }
}
handle.sg_tape.wait_until_ready()?;
handle.set_default_options()?;
Ok(handle)
})
.map_err(|err: Error| {
format_err!(
"open drive '{}' ({}) failed - {}",
config.name,
config.path,
err
)
})
} }
/// Lto Tape device handle /// Lto Tape device handle
pub struct LtoTapeHandle { pub struct LtoTapeHandle {
sg_tape: SgTape, sg_tape: SgTape,
encryption_key_loaded: bool,
} }
impl LtoTapeHandle { impl LtoTapeHandle {
/// Creates a new instance /// Creates a new instance
pub fn new(file: File) -> Result<Self, Error> { pub fn new(file: File) -> Result<Self, Error> {
let sg_tape = SgTape::new(file)?; let sg_tape = SgTape::new(file)?;
Ok(Self { sg_tape }) Ok(Self {
sg_tape,
encryption_key_loaded: false,
})
} }
/// Set all options we need/want /// Open a tape device
pub fn set_default_options(&mut self) -> Result<(), Error> { ///
self.sg_tape.set_default_options()?; /// since this calls [SgTape::open_lto_drive], it does some internal checks.
Ok(()) /// See [SgTape] docs for details.
} pub fn open_lto_drive(config: &LtoTapeDrive) -> Result<Self, Error> {
let sg_tape = SgTape::open_lto_drive(config)?;
/// Set driver options let handle = Self {
pub fn set_drive_options( sg_tape,
&mut self, encryption_key_loaded: false,
compression: Option<bool>, };
block_length: Option<u32>,
buffer_mode: Option<bool>,
) -> Result<(), Error> {
self.sg_tape
.set_drive_options(compression, block_length, buffer_mode)
}
/// Write a single EOF mark without flushing buffers Ok(handle)
pub fn write_filemarks(&mut self, count: usize) -> Result<(), std::io::Error> {
self.sg_tape.write_filemarks(count, false)
} }
/// Get Tape and Media status /// Get Tape and Media status
@ -118,27 +90,11 @@ impl LtoTapeHandle {
self.sg_tape.space_filemarks(-count.try_into()?) self.sg_tape.space_filemarks(-count.try_into()?)
} }
pub fn forward_space_count_records(&mut self, count: usize) -> Result<(), Error> {
self.sg_tape.space_blocks(count.try_into()?)
}
pub fn backward_space_count_records(&mut self, count: usize) -> Result<(), Error> {
self.sg_tape.space_blocks(-count.try_into()?)
}
/// Position the tape after filemark count. Count 0 means BOT. /// Position the tape after filemark count. Count 0 means BOT.
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> { pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
self.sg_tape.locate_file(position) self.sg_tape.locate_file(position)
} }
pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> {
self.sg_tape.erase_media(fast)
}
pub fn load(&mut self) -> Result<(), Error> {
self.sg_tape.load()
}
/// Read Cartridge Memory (MAM Attributes) /// Read Cartridge Memory (MAM Attributes)
pub fn cartridge_memory(&mut self) -> Result<Vec<MamAttribute>, Error> { pub fn cartridge_memory(&mut self) -> Result<Vec<MamAttribute>, Error> {
self.sg_tape.cartridge_memory() self.sg_tape.cartridge_memory()
@ -149,18 +105,9 @@ impl LtoTapeHandle {
self.sg_tape.volume_statistics() self.sg_tape.volume_statistics()
} }
/// Lock the drive door /// Returns if a medium is present
pub fn lock(&mut self) -> Result<(), Error> { pub fn medium_present(&mut self) -> bool {
self.sg_tape self.sg_tape.test_unit_ready().is_ok()
.set_medium_removal(false)
.map_err(|err| format_err!("lock door failed - {}", err))
}
/// Unlock the drive door
pub fn unlock(&mut self) -> Result<(), Error> {
self.sg_tape
.set_medium_removal(true)
.map_err(|err| format_err!("unlock door failed - {}", err))
} }
} }
@ -271,6 +218,13 @@ impl TapeDriver for LtoTapeHandle {
self.sync()?; // sync data to tape self.sync()?; // sync data to tape
let encrypt_fingerprint = media_set_label
.encryption_key_fingerprint
.clone()
.map(|fp| (fp, media_set_label.uuid.clone()));
self.set_encryption(encrypt_fingerprint)?;
Ok(()) Ok(())
} }
@ -292,46 +246,27 @@ impl TapeDriver for LtoTapeHandle {
&mut self, &mut self,
key_fingerprint: Option<(Fingerprint, Uuid)>, key_fingerprint: Option<(Fingerprint, Uuid)>,
) -> Result<(), Error> { ) -> Result<(), Error> {
if nix::unistd::Uid::effective().is_root() { if let Some((fingerprint, uuid)) = key_fingerprint {
if let Some((ref key_fingerprint, ref uuid)) = key_fingerprint {
let (key_map, _digest) = crate::tape::encryption_keys::load_keys()?;
match key_map.get(key_fingerprint) {
Some(item) => {
// derive specialized key for each media-set
let mut tape_key = [0u8; 32];
let uuid_bytes: [u8; 16] = *uuid.as_bytes();
openssl::pkcs5::pbkdf2_hmac(
&item.key,
&uuid_bytes,
10,
openssl::hash::MessageDigest::sha256(),
&mut tape_key,
)?;
return self.sg_tape.set_encryption(Some(tape_key));
}
None => bail!("unknown tape encryption key '{}'", key_fingerprint),
}
} else {
return self.sg_tape.set_encryption(None);
}
}
let output = if let Some((fingerprint, uuid)) = key_fingerprint {
let fingerprint = fingerprint.signature(); let fingerprint = fingerprint.signature();
run_sg_tape_cmd( let output = run_sg_tape_cmd(
"encryption", "encryption",
&["--fingerprint", &fingerprint, "--uuid", &uuid.to_string()], &["--fingerprint", &fingerprint, "--uuid", &uuid.to_string()],
self.sg_tape.file_mut().as_raw_fd(), self.sg_tape.file_mut().as_raw_fd(),
)? )?;
self.encryption_key_loaded = true;
let result: Result<(), String> = serde_json::from_str(&output)?;
result.map_err(|err| format_err!("{}", err))
} else { } else {
run_sg_tape_cmd("encryption", &[], self.sg_tape.file_mut().as_raw_fd())? self.sg_tape.set_encryption(None)
}; }
let result: Result<(), String> = serde_json::from_str(&output)?; }
result.map_err(|err| format_err!("{}", err))
fn assert_encryption_mode(&mut self, encryption_wanted: bool) -> Result<(), Error> {
let encryption_set = drive_get_encryption(self.sg_tape.file_mut())?;
if encryption_wanted != encryption_set {
bail!("Set encryption mode not what was desired (set: {encryption_set}, wanted: {encryption_wanted})");
}
Ok(())
} }
} }

View File

@ -105,11 +105,13 @@ pub trait TapeDriver {
key_config: Option<&KeyConfig>, key_config: Option<&KeyConfig>,
) -> Result<(), Error>; ) -> Result<(), Error>;
/// Read the media label /// Read the media label without setting the encryption key
/// ///
/// This tries to read both media labels (label and /// This is used internally by 'read_label' and when restoring the encryption
/// media_set_label). Also returns the optional encryption key configuration. /// key from the drive. Should not be used or overwritten otherwise!
fn read_label(&mut self) -> Result<(Option<MediaId>, Option<KeyConfig>), Error> { fn read_label_without_loading_key(
&mut self,
) -> Result<(Option<MediaId>, Option<KeyConfig>), Error> {
self.rewind()?; self.rewind()?;
let label = { let label = {
@ -187,6 +189,22 @@ pub trait TapeDriver {
Ok((Some(media_id), key_config)) Ok((Some(media_id), key_config))
} }
/// Read the media label
///
/// This tries to read both media labels (label and
/// media_set_label). Also returns the optional encryption key configuration.
///
/// Automatically sets the encryption key on the drive
fn read_label(&mut self) -> Result<(Option<MediaId>, Option<KeyConfig>), Error> {
let (media_id, key_config) = self.read_label_without_loading_key()?;
let encrypt_fingerprint = media_id.as_ref().and_then(|id| id.get_encryption_fp());
self.set_encryption(encrypt_fingerprint)?;
Ok((media_id, key_config))
}
/// Eject media /// Eject media
fn eject_media(&mut self) -> Result<(), Error>; fn eject_media(&mut self) -> Result<(), Error>;
@ -203,6 +221,9 @@ pub trait TapeDriver {
/// We use the media_set_uuid to XOR the secret key with the /// We use the media_set_uuid to XOR the secret key with the
/// uuid (first 16 bytes), so that each media set uses an unique /// uuid (first 16 bytes), so that each media set uses an unique
/// key for encryption. /// key for encryption.
///
/// Should be called as part of write_media_set_label or read_label,
/// so this should not be called manually.
fn set_encryption( fn set_encryption(
&mut self, &mut self,
key_fingerprint: Option<(Fingerprint, Uuid)>, key_fingerprint: Option<(Fingerprint, Uuid)>,
@ -212,6 +233,14 @@ pub trait TapeDriver {
} }
Ok(()) Ok(())
} }
/// Asserts that the encryption mode is set to the given value
fn assert_encryption_mode(&mut self, encryption_wanted: bool) -> Result<(), Error> {
if encryption_wanted {
bail!("drive does not support encryption");
}
Ok(())
}
} }
/// A boxed implementor of [`MediaChange`]. /// A boxed implementor of [`MediaChange`].
@ -280,7 +309,7 @@ pub fn open_drive(config: &SectionConfigData, drive: &str) -> Result<Box<dyn Tap
} }
"lto" => { "lto" => {
let tape = LtoTapeDrive::deserialize(config)?; let tape = LtoTapeDrive::deserialize(config)?;
let handle = open_lto_tape_drive(&tape)?; let handle = LtoTapeHandle::open_lto_drive(&tape)?;
Ok(Box::new(handle)) Ok(Box::new(handle))
} }
ty => bail!("unknown drive type '{}' - internal error", ty), ty => bail!("unknown drive type '{}' - internal error", ty),
@ -449,7 +478,7 @@ pub fn request_and_load_media(
} }
} }
let mut handle = match open_lto_tape_drive(&drive_config) { let mut handle = match LtoTapeHandle::open_lto_drive(&drive_config) {
Ok(handle) => handle, Ok(handle) => handle,
Err(err) => { Err(err) => {
update_and_log_request_error( update_and_log_request_error(
@ -572,7 +601,9 @@ fn tape_device_path(config: &SectionConfigData, drive: &str) -> Result<String, E
} }
} }
pub struct DeviceLockGuard(std::fs::File); pub struct DeviceLockGuard {
_file: std::fs::File,
}
// Uses systemd escape_unit to compute a file name from `device_path`, the try // Uses systemd escape_unit to compute a file name from `device_path`, the try
// to lock `/var/lock/<name>`. // to lock `/var/lock/<name>`.
@ -610,7 +641,7 @@ fn lock_device_path(device_path: &str) -> Result<DeviceLockGuard, TapeLockError>
} }
} }
Ok(DeviceLockGuard(file)) Ok(DeviceLockGuard { _file: file })
} }
// Same logic as lock_device_path, but uses a timeout of 0, making it // Same logic as lock_device_path, but uses a timeout of 0, making it

View File

@ -12,7 +12,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use anyhow::{bail, Error}; use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use proxmox_sys::fs::file_read_optional_string; use proxmox_sys::fs::file_read_optional_string;
@ -92,6 +92,14 @@ pub fn load_keys() -> Result<(HashMap<Fingerprint, EncryptionKeyInfo>, [u8; 32])
Ok((map, digest)) Ok((map, digest))
} }
pub fn load_key(fingerprint: &Fingerprint) -> Result<[u8; 32], Error> {
let (key_map, _digest) = crate::tape::encryption_keys::load_keys()?;
key_map
.get(fingerprint)
.map(|data| data.key)
.ok_or_else(|| format_err!("unknown tape encryption key '{fingerprint}'"))
}
/// Load tape encryption key configurations (password protected keys) /// Load tape encryption key configurations (password protected keys)
pub fn load_key_configs() -> Result<(HashMap<Fingerprint, KeyConfig>, [u8; 32]), Error> { pub fn load_key_configs() -> Result<(HashMap<Fingerprint, KeyConfig>, [u8; 32]), Error> {
let content = file_read_optional_string(TAPE_KEY_CONFIG_FILENAME)?; let content = file_read_optional_string(TAPE_KEY_CONFIG_FILENAME)?;

View File

@ -33,7 +33,7 @@ use serde_json::json;
use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions}; use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use pbs_api_types::{MediaLocation, MediaSetPolicy, MediaStatus, RetentionPolicy}; use pbs_api_types::{Fingerprint, MediaLocation, MediaSetPolicy, MediaStatus, RetentionPolicy};
use pbs_config::BackupLockGuard; use pbs_config::BackupLockGuard;
#[cfg(not(test))] #[cfg(not(test))]
@ -71,6 +71,10 @@ impl MediaId {
} }
self.label.pool.to_owned() self.label.pool.to_owned()
} }
pub(crate) fn get_encryption_fp(&self) -> Option<(Fingerprint, Uuid)> {
let label = self.clone().media_set_label?;
label.encryption_key_fingerprint.map(|fp| (fp, label.uuid))
}
} }
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -244,14 +248,24 @@ impl Inventory {
} }
/// find media by label_text /// find media by label_text
pub fn find_media_by_label_text(&self, label_text: &str) -> Option<&MediaId> { pub fn find_media_by_label_text(&self, label_text: &str) -> Result<Option<&MediaId>, Error> {
self.map.values().find_map(|entry| { let ids: Vec<_> = self
if entry.id.label.label_text == label_text { .map
Some(&entry.id) .values()
} else { .filter_map(|entry| {
None if entry.id.label.label_text == label_text {
} Some(&entry.id)
}) } else {
None
}
})
.collect();
match ids.len() {
0 => Ok(None),
1 => Ok(Some(ids[0])),
count => bail!("There are '{count}' tapes with the label '{label_text}'"),
}
} }
/// Lookup media pool /// Lookup media pool

View File

@ -272,12 +272,7 @@ impl PoolWriter {
let media_set = media.media_set_label().unwrap(); let media_set = media.media_set_label().unwrap();
let encrypt_fingerprint = media_set drive.assert_encryption_mode(media_set.encryption_key_fingerprint.is_some())?;
.encryption_key_fingerprint
.clone()
.map(|fp| (fp, media_set.uuid.clone()));
drive.set_encryption(encrypt_fingerprint)?;
self.status = Some(PoolWriterState { self.status = Some(PoolWriterState {
drive, drive,

View File

@ -214,6 +214,12 @@ struct DiskInfo {
serial: OnceCell<Option<OsString>>, serial: OnceCell<Option<OsString>>,
// for perl: #[serde(skip_serializing)] // for perl: #[serde(skip_serializing)]
partition_table_type: OnceCell<Option<OsString>>, partition_table_type: OnceCell<Option<OsString>>,
// for perl: #[serde(skip_serializing)]
partition_entry_scheme: OnceCell<Option<OsString>>,
// for perl: #[serde(skip_serializing)]
partition_entry_uuid: OnceCell<Option<OsString>>,
// for perl: #[serde(skip_serializing)]
partition_entry_type: OnceCell<Option<OsString>>,
gpt: OnceCell<bool>, gpt: OnceCell<bool>,
// ??? // ???
bus: OnceCell<Option<OsString>>, bus: OnceCell<Option<OsString>>,
@ -412,6 +418,50 @@ impl Disk {
}) })
} }
/// Get the partitioning scheme of which this device is a partition.
pub fn partition_entry_scheme(&self) -> Option<&OsStr> {
self.info
.partition_entry_scheme
.get_or_init(|| {
self.device
.property_value("ID_PART_ENTRY_SCHEME")
.map(|v| v.to_owned())
})
.as_ref()
.map(OsString::as_os_str)
}
/// Check if this is a partition.
pub fn is_partition(&self) -> bool {
self.partition_entry_scheme().is_some()
}
/// Get the type of partition entry (ie. type UUID from the entry in the GPT partition table).
pub fn partition_entry_type(&self) -> Option<&OsStr> {
self.info
.partition_entry_type
.get_or_init(|| {
self.device
.property_value("ID_PART_ENTRY_TYPE")
.map(|v| v.to_owned())
})
.as_ref()
.map(OsString::as_os_str)
}
/// Get the partition entry UUID (ie. the UUID from the entry in the GPT partition table).
pub fn partition_entry_uuid(&self) -> Option<&OsStr> {
self.info
.partition_entry_uuid
.get_or_init(|| {
self.device
.property_value("ID_PART_ENTRY_UUID")
.map(|v| v.to_owned())
})
.as_ref()
.map(OsString::as_os_str)
}
/// Get the bus type used for this disk. /// Get the bus type used for this disk.
pub fn bus(&self) -> Option<&OsStr> { pub fn bus(&self) -> Option<&OsStr> {
self.info self.info
@ -1071,17 +1121,8 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error>
Some(path) => path, Some(path) => path,
None => bail!("disk {:?} has no node in /dev", disk.syspath()), None => bail!("disk {:?} has no node in /dev", disk.syspath()),
}; };
let disk_path_str = match disk_path.to_str() {
Some(path) => path,
None => bail!("disk {:?} could not transform into a str", disk.syspath()),
};
let mut is_partition = false; let is_partition = disk.is_partition();
for disk_info in get_lsblk_info()?.iter() {
if disk_info.path == disk_path_str && disk_info.partition_type.is_some() {
is_partition = true;
}
}
let mut to_wipe: Vec<PathBuf> = Vec::new(); let mut to_wipe: Vec<PathBuf> = Vec::new();

View File

@ -21,6 +21,7 @@ TAPE_UI_FILES= \
tape/window/Erase.js \ tape/window/Erase.js \
tape/window/EncryptionEdit.js \ tape/window/EncryptionEdit.js \
tape/window/LabelMedia.js \ tape/window/LabelMedia.js \
tape/window/MediaRemoveWindow.js \
tape/window/PoolEdit.js \ tape/window/PoolEdit.js \
tape/window/TapeBackup.js \ tape/window/TapeBackup.js \
tape/window/TapeBackupJob.js \ tape/window/TapeBackupJob.js \

View File

@ -52,12 +52,20 @@ Ext.define('PBS.datastore.DataStoreListSummary', {
vm.set('maintenance', ''); vm.set('maintenance', '');
} }
let total = statusData.avail + statusData.used; let usagetext;
let usage = statusData.used / total; let usage;
let usagetext = Ext.String.format(gettext('{0} of {1}'),
Proxmox.Utils.format_size(statusData.used, true), if (Object.hasOwn(statusData, 'avail') && Object.hasOwn(statusData, 'used')) {
Proxmox.Utils.format_size(total, true), let total = statusData.avail + statusData.used;
); usage = statusData.used / total;
usagetext = Ext.String.format(gettext('{0} of {1}'),
Proxmox.Utils.format_size(statusData.used, true),
Proxmox.Utils.format_size(total, true),
);
} else {
usagetext = Ext.String.format(gettext('{0} of {1}'), 0, 0);
usage = 0;
}
let usagePanel = me.lookup('usage'); let usagePanel = me.lookup('usage');
usagePanel.updateValue(usage, usagetext); usagePanel.updateValue(usage, usagetext);

View File

@ -35,13 +35,36 @@ Ext.define('PBS.form.GroupFilter', {
// break cyclic reference // break cyclic reference
me.removeReferences(record); me.removeReferences(record);
me.lookup('grid').getStore().remove(record); me.lookup('grid-include').getStore().remove(record);
me.lookup('grid-exclude').getStore().remove(record);
me.updateRealField(); me.updateRealField();
}, },
addFilter: function() { addIncludeFilter: function() {
let me = this; let me = this;
me.lookup('grid').getStore().add({}); me.lookup('grid-include').getStore().add({ behavior: 'include' });
me.updateRealField();
},
addExcludeFilter: function() {
let me = this;
me.lookup('grid-exclude').getStore().add({ behavior: 'exclude' });
me.updateRealField();
},
onBehaviorChange: function(field, value) {
let me = this;
let record = field.getWidgetRecord();
if (record === undefined) {
return;
}
record.set('behavior', value);
record.commit();
if (record.widgets) {
me.setInputValue(record.widgets, record);
}
me.updateRealField(); me.updateRealField();
}, },
@ -77,8 +100,12 @@ Ext.define('PBS.form.GroupFilter', {
}, },
parseGroupFilter: function(filter) { parseGroupFilter: function(filter) {
let [, type, input] = filter.match(/^(type|group|regex):(.*)$/); let [, behavior, type, input] = filter.match(/^(?:(exclude|include):)?(type|group|regex):(.*)$/);
if (behavior === undefined) {
behavior = "include";
}
return { return {
behavior,
type, type,
input, input,
}; };
@ -86,13 +113,16 @@ Ext.define('PBS.form.GroupFilter', {
onValueChange: function(field, values) { onValueChange: function(field, values) {
let me = this; let me = this;
let grid = me.lookup('grid'); let grid_include = me.lookup('grid-include');
let grid_exclude = me.lookup('grid-exclude');
if (!values || values.length === 0) { if (!values || values.length === 0) {
grid.getStore().removeAll(); grid_include.getStore().removeAll();
grid_exclude.getStore().removeAll();
return; return;
} }
let records = values.map((filter) => me.parseGroupFilter(filter)); let records = values.map((filter) => me.parseGroupFilter(filter));
grid.getStore().setData(records); grid_include.getStore().setData(records);
grid_exclude.getStore().setData(records);
}, },
setInputValue: function(widgets, rec) { setInputValue: function(widgets, rec) {
@ -162,9 +192,18 @@ Ext.define('PBS.form.GroupFilter', {
let me = this; let me = this;
let filter = []; let filter = [];
me.lookup('grid').getStore().each((rec) => { me.lookup('grid-include').getStore().each((rec) => {
if (rec.data.type && rec.data.input) { if (rec.data.type && rec.data.input) {
filter.push(`${rec.data.type}:${rec.data.input}`); filter.push(`${rec.data.type}:${rec.data.input}`);
}
});
me.lookup('grid-exclude').getStore().each((rec) => {
if (rec.data.type && rec.data.input && rec.data.behavior) {
let behavior_string = '';
if (rec.data.behavior === 'exclude') {
behavior_string = 'exclude:';
}
filter.push(`${behavior_string}${rec.data.type}:${rec.data.input}`);
} }
}); });
@ -175,6 +214,9 @@ Ext.define('PBS.form.GroupFilter', {
}, },
control: { control: {
'grid pbsGroupBehaviorSelector': {
change: 'onBehaviorChange',
},
'grid pbsGroupFilterTypeSelector': { 'grid pbsGroupFilterTypeSelector': {
change: 'onTypeChange', change: 'onTypeChange',
}, },
@ -264,72 +306,59 @@ Ext.define('PBS.form.GroupFilter', {
items: [ items: [
{ {
xtype: 'grid', xtype: 'pbsGroupFilterGrid',
reference: 'grid', title: 'Include filters',
margin: '0 0 5 0', margin: '0 0 5 0',
scrollable: true, reference: 'grid-include',
height: 300,
store: { store: {
fields: ['type', 'input'], filters: [
}, function(item) {
return item.data.behavior === "include";
},
],
},
emptyText: gettext('Include all groups'), emptyText: gettext('Include all groups'),
viewConfig: { viewConfig: {
deferEmptyText: false, deferEmptyText: false,
}, },
columns: [ },
{
xtype: 'container',
layout: {
type: 'hbox',
},
items: [
{ {
text: gettext('Filter Type'), xtype: 'button',
xtype: 'widgetcolumn', text: gettext('Add include'),
dataIndex: 'type', iconCls: 'fa fa-plus-circle',
flex: 1, handler: 'addIncludeFilter',
widget: {
xtype: 'pbsGroupFilterTypeSelector',
isFormField: false,
},
}, },
{ {
text: gettext('Filter Value'), xtype: 'box',
xtype: 'widgetcolumn',
flex: 1, flex: 1,
onWidgetAttach: 'newInputColumn',
widget: {
padding: 0,
bodyPadding: 0,
xtype: 'fieldcontainer',
layout: 'fit',
defaults: {
margin: 0,
},
items: [
{
hidden: true,
xtype: 'pbsGroupTypeSelector',
isFormField: false,
},
{
hidden: true,
xtype: 'textfield',
type: 'regex',
isFormField: false,
},
{
hidden: true,
xtype: 'pbsGroupSelector',
isFormField: false,
},
],
},
}, },
{ {
xtype: 'widgetcolumn', xtype: 'box',
width: 40, style: 'margin: 3px 0px;',
widget: { html: `<span class="pmx-hint">${gettext('Note')}</span>: `
xtype: 'button', + gettext('Filters are additive'),
iconCls: 'fa fa-trash-o',
},
}, },
], ],
}, },
{
xtype: 'pbsGroupFilterGrid',
title: 'Exclude filters',
margin: '10 0 5 0',
reference: 'grid-exclude',
store: {
filters: [
function(item) {
return item.data.behavior === "exclude";
},
],
},
},
{ {
xtype: 'hiddenfield', xtype: 'hiddenfield',
reference: 'realfield', reference: 'realfield',
@ -356,9 +385,9 @@ Ext.define('PBS.form.GroupFilter', {
items: [ items: [
{ {
xtype: 'button', xtype: 'button',
text: gettext('Add'), text: gettext('Add exclude'),
iconCls: 'fa fa-plus-circle', iconCls: 'fa fa-plus-circle',
handler: 'addFilter', handler: 'addExcludeFilter',
}, },
{ {
xtype: 'box', xtype: 'box',
@ -368,7 +397,7 @@ Ext.define('PBS.form.GroupFilter', {
xtype: 'box', xtype: 'box',
style: 'margin: 3px 0px;', style: 'margin: 3px 0px;',
html: `<span class="pmx-hint">${gettext('Note')}</span>: ` html: `<span class="pmx-hint">${gettext('Note')}</span>: `
+ gettext('Filters are additive (OR-like)'), + gettext('Exclude filters will be applied after include filters'),
}, },
], ],
}, },
@ -384,6 +413,82 @@ Ext.define('PBS.form.GroupFilter', {
}, },
}); });
Ext.define('PBS.form.pbsGroupBehaviorSelector', {
extend: 'Proxmox.form.KVComboBox',
alias: 'widget.pbsGroupBehaviorSelector',
allowBlank: false,
comboItems: [
['include', gettext('Include')],
['exclude', gettext('Exclude')],
],
});
Ext.define('PBS.form.GroupFilterGrid', {
extend: 'Ext.grid.Panel',
alias: 'widget.pbsGroupFilterGrid',
scrollable: true,
height: 200,
store: {
fields: ['type', 'input'],
},
columns: [
{
text: gettext('Filter Type'),
xtype: 'widgetcolumn',
dataIndex: 'type',
flex: 1,
widget: {
xtype: 'pbsGroupFilterTypeSelector',
isFormField: false,
},
},
{
text: gettext('Filter Value'),
xtype: 'widgetcolumn',
flex: 1,
onWidgetAttach: 'newInputColumn',
widget: {
padding: 0,
bodyPadding: 0,
xtype: 'fieldcontainer',
layout: 'fit',
defaults:
{
margin: 0,
},
items: [
{
hidden: true,
xtype: 'pbsGroupTypeSelector',
isFormField: false,
},
{
hidden: true,
xtype: 'textfield',
type: 'regex',
isFormField: false,
},
{
hidden: true,
xtype: 'pbsGroupSelector',
isFormField: false,
},
],
},
},
{
xtype: 'widgetcolumn',
width: 40,
widget: {
xtype: 'button',
iconCls: 'fa fa-trash-o',
},
},
],
});
Ext.define('PBS.form.GroupFilterTypeSelector', { Ext.define('PBS.form.GroupFilterTypeSelector', {
extend: 'Proxmox.form.KVComboBox', extend: 'Proxmox.form.KVComboBox',
alias: 'widget.pbsGroupFilterTypeSelector', alias: 'widget.pbsGroupFilterTypeSelector',

View File

@ -151,7 +151,7 @@ Ext.define('PBS.NodeInfoPanel', {
return data.kversion; return data.kversion;
} }
let kernel = data['current-kernel']; let kernel = data['current-kernel'];
let buildDate = kernel.version.match(/\((.+)\)\s*$/)[1] ?? 'unknown'; let buildDate = kernel.version.match(/\((.+)\)\s*$/)?.[1] ?? 'unknown';
return `${kernel.sysname} ${kernel.release} (${buildDate})`; return `${kernel.sysname} ${kernel.release} (${buildDate})`;
}, },
value: '', value: '',

View File

@ -17,7 +17,7 @@ Ext.define('pbs-model-tapes', {
'status', 'status',
'uuid', 'uuid',
], ],
idProperty: 'label-text', idProperty: 'uuid',
proxy: { proxy: {
type: 'proxmox', type: 'proxmox',
url: '/api2/json/tape/media/list', url: '/api2/json/tape/media/list',
@ -60,6 +60,27 @@ Ext.define('PBS.TapeManagement.TapeInventory', {
}).show(); }).show();
}, },
remove: function() {
let me = this;
let view = me.getView();
let selection = view.getSelection();
if (!selection || selection.length < 1) {
return;
}
let uuid = selection[0].data.uuid;
let label = selection[0].data['label-text'];
Ext.create('PBS.TapeManagement.MediaRemoveWindow', {
uuid,
label,
autoShow: true,
listeners: {
destroy: function() {
me.reload();
},
},
});
},
moveToVault: function() { moveToVault: function() {
let me = this; let me = this;
let view = me.getView(); let view = me.getView();
@ -206,6 +227,12 @@ Ext.define('PBS.TapeManagement.TapeInventory', {
disabled: true, disabled: true,
handler: 'format', handler: 'format',
}, },
{
xtype: 'proxmoxButton',
text: gettext('Remove'),
disabled: true,
handler: 'remove',
},
], ],
features: [ features: [
@ -293,5 +320,11 @@ Ext.define('PBS.TapeManagement.TapeInventory', {
}, },
flex: 1, flex: 1,
}, },
{
text: gettext('UUID'),
dataIndex: 'uuid',
flex: 1,
hidden: true,
},
], ],
}); });

View File

@ -0,0 +1,66 @@
Ext.define('PBS.TapeManagement.MediaRemoveWindow', {
extend: 'Proxmox.window.Edit',
mixins: ['Proxmox.Mixin.CBind'],
uuid: undefined,
label: undefined,
cbindData: function(config) {
let me = this;
return {
uuid: me.uuid,
warning: Ext.String.format(gettext("Are you sure you want to remove tape '{0}' ?"), me.label),
};
},
title: gettext('Remove Media'),
url: `/api2/extjs/tape/media/destroy`,
layout: 'hbox',
width: 400,
method: 'GET',
isCreate: true,
submitText: gettext('Ok'),
items: [
{
xtype: 'container',
padding: 0,
layout: {
type: 'hbox',
align: 'stretch',
},
items: [
{
xtype: 'component',
cls: [Ext.baseCSSPrefix + 'message-box-icon',
Ext.baseCSSPrefix + 'message-box-warning',
Ext.baseCSSPrefix + 'dlg-icon'],
},
{
xtype: 'container',
flex: 1,
items: [
{
xtype: 'displayfield',
cbind: {
value: '{warning}',
},
},
{
xtype: 'hidden',
name: 'uuid',
cbind: {
value: '{uuid}',
},
},
{
xtype: 'proxmoxcheckbox',
fieldLabel: gettext('Force'),
name: 'force',
},
],
},
],
},
],
});

View File

@ -66,7 +66,6 @@ Ext.define('PBS.window.UserEdit', {
fieldLabel: gettext('User name'), fieldLabel: gettext('User name'),
renderer: Ext.htmlEncode, renderer: Ext.htmlEncode,
allowBlank: false, allowBlank: false,
minLength: 4,
cbind: { cbind: {
editable: '{isCreate}', editable: '{isCreate}',
}, },