Note:
Weeks start on Monday and end on Sunday. The software
@@ -98,10 +98,10 @@ the end of the year correctly.
keep-monthly <N>
Keep backups for the last <N>
months. If there is more than one
-backup for a single month, only the latest is kept.
+backup for a single month, only the latest is kept. Months without backups do not count.
keep-yearly <N>
Keep backups for the last <N>
years. If there is more than one
-backup for a single year, only the latest is kept.
+backup for a single year, only the latest is kept. Years without backups do not count.
The retention options are processed in the order given above. Each option
only covers backups within its time period. The next option does not take care
diff --git a/docs/pxar/man1.rst b/docs/pxar/man1.rst
index 39b2861c1..be8a1604f 100644
--- a/docs/pxar/man1.rst
+++ b/docs/pxar/man1.rst
@@ -1,3 +1,5 @@
+:orphan:
+
====
pxar
====
diff --git a/pbs-api-types/Cargo.toml b/pbs-api-types/Cargo.toml
index 31b69f625..94ab583b1 100644
--- a/pbs-api-types/Cargo.toml
+++ b/pbs-api-types/Cargo.toml
@@ -7,6 +7,7 @@ description = "general API type helpers for PBS"
[dependencies]
anyhow.workspace = true
+const_format.workspace = true
hex.workspace = true
lazy_static.workspace = true
percent-encoding.workspace = true
diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs
index 8bbd29588..ef6398629 100644
--- a/pbs-api-types/src/acl.rs
+++ b/pbs-api-types/src/acl.rs
@@ -1,5 +1,6 @@
use std::str::FromStr;
+use const_format::concatcp;
use serde::de::{value, IntoDeserializer};
use serde::{Deserialize, Serialize};
@@ -8,8 +9,10 @@ use proxmox_schema::{
api, const_regex, ApiStringFormat, BooleanSchema, EnumEntry, Schema, StringSchema,
};
+use crate::PROXMOX_SAFE_ID_REGEX_STR;
+
const_regex! {
- pub ACL_PATH_REGEX = concat!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR!(), ")+", r")$");
+ pub ACL_PATH_REGEX = concatcp!(r"^(?:/|", r"(?:/", PROXMOX_SAFE_ID_REGEX_STR, ")+", r")$");
}
// define Privilege bitfield
diff --git a/pbs-api-types/src/common_regex.rs b/pbs-api-types/src/common_regex.rs
deleted file mode 100644
index 8fe306737..000000000
--- a/pbs-api-types/src/common_regex.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-//! Predefined Regular Expressions
-//!
-//! This is a collection of useful regular expressions
-
-use lazy_static::lazy_static;
-use regex::Regex;
-
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPV4OCTET { () => (r"(?:25[0-5]|(?:2[0-4]|1[0-9]|[1-9])?[0-9])") }
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPV6H16 { () => (r"(?:[0-9a-fA-F]{1,4})") }
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPV6LS32 { () => (concat!(r"(?:(?:", IPV4RE!(), "|", IPV6H16!(), ":", IPV6H16!(), "))" )) }
-
-/// Returns the regular expression string to match IPv4 addresses
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPV4RE { () => (concat!(r"(?:(?:", IPV4OCTET!(), r"\.){3}", IPV4OCTET!(), ")")) }
-
-/// Returns the regular expression string to match IPv6 addresses
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPV6RE { () => (concat!(r"(?:",
- r"(?:(?:", r"(?:", IPV6H16!(), r":){6})", IPV6LS32!(), r")|",
- r"(?:(?:", r"::(?:", IPV6H16!(), r":){5})", IPV6LS32!(), r")|",
- r"(?:(?:(?:", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){4})", IPV6LS32!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,1}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){3})", IPV6LS32!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,2}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){2})", IPV6LS32!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,3}", IPV6H16!(), r")?::(?:", IPV6H16!(), r":){1})", IPV6LS32!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,4}", IPV6H16!(), r")?::", ")", IPV6LS32!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,5}", IPV6H16!(), r")?::", ")", IPV6H16!(), r")|",
- r"(?:(?:(?:(?:", IPV6H16!(), r":){0,6}", IPV6H16!(), r")?::", ")))"))
-}
-
-/// Returns the regular expression string to match IP addresses (v4 or v6)
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPRE { () => (concat!(r"(?:", IPV4RE!(), "|", IPV6RE!(), ")")) }
-
-/// Regular expression string to match IP addresses where IPv6 addresses require brackets around
-/// them, while for IPv4 they are forbidden.
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! IPRE_BRACKET { () => (
- concat!(r"(?:",
- IPV4RE!(),
- r"|\[(?:",
- IPV6RE!(),
- r")\]",
- r")"))
-}
-
-lazy_static! {
- pub static ref IP_REGEX: Regex = Regex::new(concat!(r"^", IPRE!(), r"$")).unwrap();
- pub static ref IP_BRACKET_REGEX: Regex =
- Regex::new(concat!(r"^", IPRE_BRACKET!(), r"$")).unwrap();
- pub static ref SHA256_HEX_REGEX: Regex = Regex::new(r"^[a-f0-9]{64}$").unwrap();
- pub static ref SYSTEMD_DATETIME_REGEX: Regex =
- Regex::new(r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$").unwrap();
-}
-
-#[test]
-fn test_regexes() {
- assert!(IP_REGEX.is_match("127.0.0.1"));
- assert!(IP_REGEX.is_match("::1"));
- assert!(IP_REGEX.is_match("2014:b3a::27"));
- assert!(IP_REGEX.is_match("2014:b3a::192.168.0.1"));
- assert!(IP_REGEX.is_match("2014:b3a:0102:adf1:1234:4321:4afA:BCDF"));
-
- assert!(IP_BRACKET_REGEX.is_match("127.0.0.1"));
- assert!(IP_BRACKET_REGEX.is_match("[::1]"));
- assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::27]"));
- assert!(IP_BRACKET_REGEX.is_match("[2014:b3a::192.168.0.1]"));
- assert!(IP_BRACKET_REGEX.is_match("[2014:b3a:0102:adf1:1234:4321:4afA:BCDF]"));
-}
diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index cce9888b5..5e13c157e 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -2,6 +2,7 @@ use std::fmt;
use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
+use const_format::concatcp;
use serde::{Deserialize, Serialize};
use proxmox_schema::{
@@ -10,31 +11,33 @@ use proxmox_schema::{
};
use crate::{
- Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid,
- DATASTORE_NOTIFY_STRING_SCHEMA, GC_SCHEDULE_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
- PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA, UPID,
+ Authid, CryptMode, Fingerprint, GroupFilter, MaintenanceMode, Userid, BACKUP_ID_RE,
+ BACKUP_NS_RE, BACKUP_TIME_RE, BACKUP_TYPE_RE, DATASTORE_NOTIFY_STRING_SCHEMA,
+ GC_SCHEDULE_SCHEMA, GROUP_OR_SNAPSHOT_PATH_REGEX_STR, PROXMOX_SAFE_ID_FORMAT,
+ PROXMOX_SAFE_ID_REGEX_STR, PRUNE_SCHEDULE_SCHEMA, SHA256_HEX_REGEX, SINGLE_LINE_COMMENT_SCHEMA,
+ SNAPSHOT_PATH_REGEX_STR, UPID,
};
const_regex! {
- pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
+ pub BACKUP_NAMESPACE_REGEX = concatcp!(r"^", BACKUP_NS_RE, r"$");
- pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
+ pub BACKUP_TYPE_REGEX = concatcp!(r"^(", BACKUP_TYPE_RE, r")$");
- pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
+ pub BACKUP_ID_REGEX = concatcp!(r"^", BACKUP_ID_RE, r"$");
- pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
+ pub BACKUP_DATE_REGEX = concatcp!(r"^", BACKUP_TIME_RE ,r"$");
- pub GROUP_PATH_REGEX = concat!(
- r"^(", BACKUP_TYPE_RE!(), ")/",
- r"(", BACKUP_ID_RE!(), r")$",
+ pub GROUP_PATH_REGEX = concatcp!(
+ r"^(", BACKUP_TYPE_RE, ")/",
+ r"(", BACKUP_ID_RE, r")$",
);
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
- pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
- pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
+ pub SNAPSHOT_PATH_REGEX = concatcp!(r"^", SNAPSHOT_PATH_REGEX_STR, r"$");
+ pub GROUP_OR_SNAPSHOT_PATH_REGEX = concatcp!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR, r"$");
- pub DATASTORE_MAP_REGEX = concat!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
+ pub DATASTORE_MAP_REGEX = concatcp!(r"^(?:", PROXMOX_SAFE_ID_REGEX_STR, r"=)?", PROXMOX_SAFE_ID_REGEX_STR, r"$");
}
pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index 80578d80c..6fb9b187d 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -1,6 +1,7 @@
use std::str::FromStr;
use anyhow::bail;
+use const_format::concatcp;
use regex::Regex;
use serde::{Deserialize, Serialize};
@@ -8,17 +9,17 @@ use proxmox_schema::*;
use crate::{
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
- BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
- NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
- SINGLE_LINE_COMMENT_SCHEMA,
+ BACKUP_NAMESPACE_SCHEMA, BACKUP_NS_RE, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA,
+ MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
+ PROXMOX_SAFE_ID_REGEX_STR, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
};
const_regex! {
/// Regex for verification jobs 'DATASTORE:ACTUAL_JOB_ID'
- pub VERIFICATION_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):");
+ pub VERIFICATION_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"):");
/// Regex for sync jobs '(REMOTE|\-):REMOTE_DATASTORE:LOCAL_DATASTORE:(?:LOCAL_NS_ANCHOR:)ACTUAL_JOB_ID'
- pub SYNC_JOB_WORKER_ID_REGEX = concat!(r"^(", PROXMOX_SAFE_ID_REGEX_STR!(), r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR!(), r"):(", PROXMOX_SAFE_ID_REGEX_STR!(), r")(?::(", BACKUP_NS_RE!(), r"))?:");
+ pub SYNC_JOB_WORKER_ID_REGEX = concatcp!(r"^(", PROXMOX_SAFE_ID_REGEX_STR, r"|\-):(", PROXMOX_SAFE_ID_REGEX_STR, r"):(", PROXMOX_SAFE_ID_REGEX_STR, r")(?::(", BACKUP_NS_RE, r"))?:");
}
pub const JOB_ID_SCHEMA: Schema = StringSchema::new("Job ID.")
diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs
index 795ff2a6d..7eb836ed8 100644
--- a/pbs-api-types/src/lib.rs
+++ b/pbs-api-types/src/lib.rs
@@ -1,10 +1,8 @@
//! Basic API types used by most of the PBS code.
+use const_format::concatcp;
use serde::{Deserialize, Serialize};
-use proxmox_auth_api::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
-
-pub mod common_regex;
pub mod percent_encoding;
use proxmox_schema::{
@@ -12,59 +10,78 @@ use proxmox_schema::{
};
use proxmox_time::parse_daily_duration;
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! PROXMOX_SAFE_ID_REGEX_STR { () => { r"(?:[A-Za-z0-9_][A-Za-z0-9._\-]*)" }; }
+use proxmox_auth_api::types::{APITOKEN_ID_REGEX_STR, USER_ID_REGEX_STR};
+
+pub use proxmox_schema::api_types::SAFE_ID_FORMAT as PROXMOX_SAFE_ID_FORMAT;
+pub use proxmox_schema::api_types::SAFE_ID_REGEX as PROXMOX_SAFE_ID_REGEX;
+pub use proxmox_schema::api_types::SAFE_ID_REGEX_STR as PROXMOX_SAFE_ID_REGEX_STR;
+pub use proxmox_schema::api_types::{
+ BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX,
+};
+pub use proxmox_schema::api_types::{DNS_ALIAS_REGEX, DNS_NAME_OR_IP_REGEX, DNS_NAME_REGEX};
+pub use proxmox_schema::api_types::{FINGERPRINT_SHA256_REGEX, SHA256_HEX_REGEX};
+pub use proxmox_schema::api_types::{
+ GENERIC_URI_REGEX, HOSTNAME_REGEX, HOST_PORT_REGEX, HTTP_URL_REGEX,
+};
+pub use proxmox_schema::api_types::{MULTI_LINE_COMMENT_REGEX, SINGLE_LINE_COMMENT_REGEX};
+pub use proxmox_schema::api_types::{PASSWORD_REGEX, SYSTEMD_DATETIME_REGEX, UUID_REGEX};
+
+pub use proxmox_schema::api_types::{CIDR_FORMAT, CIDR_REGEX};
+pub use proxmox_schema::api_types::{CIDR_V4_FORMAT, CIDR_V4_REGEX};
+pub use proxmox_schema::api_types::{CIDR_V6_FORMAT, CIDR_V6_REGEX};
+pub use proxmox_schema::api_types::{IPRE_STR, IP_FORMAT, IP_REGEX};
+pub use proxmox_schema::api_types::{IPV4RE_STR, IP_V4_FORMAT, IP_V4_REGEX};
+pub use proxmox_schema::api_types::{IPV6RE_STR, IP_V6_FORMAT, IP_V6_REGEX};
+
+pub use proxmox_schema::api_types::COMMENT_SCHEMA as SINGLE_LINE_COMMENT_SCHEMA;
+pub use proxmox_schema::api_types::HOSTNAME_SCHEMA;
+pub use proxmox_schema::api_types::HOST_PORT_SCHEMA;
+pub use proxmox_schema::api_types::HTTP_URL_SCHEMA;
+pub use proxmox_schema::api_types::MULTI_LINE_COMMENT_SCHEMA;
+pub use proxmox_schema::api_types::NODE_SCHEMA;
+pub use proxmox_schema::api_types::SINGLE_LINE_COMMENT_FORMAT;
+pub use proxmox_schema::api_types::{
+ BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA,
+};
+pub use proxmox_schema::api_types::{CERT_FINGERPRINT_SHA256_SCHEMA, FINGERPRINT_SHA256_FORMAT};
+pub use proxmox_schema::api_types::{DISK_ARRAY_SCHEMA, DISK_LIST_SCHEMA};
+pub use proxmox_schema::api_types::{DNS_ALIAS_FORMAT, DNS_NAME_FORMAT, DNS_NAME_OR_IP_SCHEMA};
+pub use proxmox_schema::api_types::{PASSWORD_FORMAT, PASSWORD_SCHEMA};
+pub use proxmox_schema::api_types::{SERVICE_ID_SCHEMA, UUID_FORMAT};
+pub use proxmox_schema::api_types::{SYSTEMD_DATETIME_FORMAT, TIME_ZONE_SCHEMA};
+
+use proxmox_schema::api_types::{DNS_NAME_STR, IPRE_BRACKET_STR};
#[rustfmt::skip]
-#[macro_export]
-macro_rules! BACKUP_ID_RE { () => (r"[A-Za-z0-9_][A-Za-z0-9._\-]*") }
+pub const BACKUP_ID_RE: &str = r"[A-Za-z0-9_][A-Za-z0-9._\-]*";
#[rustfmt::skip]
-#[macro_export]
-macro_rules! BACKUP_TYPE_RE { () => (r"(?:host|vm|ct)") }
+pub const BACKUP_TYPE_RE: &str = r"(?:host|vm|ct)";
#[rustfmt::skip]
-#[macro_export]
-macro_rules! BACKUP_TIME_RE { () => (r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z") }
+pub const BACKUP_TIME_RE: &str = r"[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z";
#[rustfmt::skip]
-#[macro_export]
-macro_rules! BACKUP_NS_RE {
- () => (
- concat!("(?:",
- "(?:", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR!(),
- ")?")
+pub const BACKUP_NS_RE: &str =
+ concatcp!("(?:",
+ "(?:", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}", PROXMOX_SAFE_ID_REGEX_STR,
+ ")?");
+
+#[rustfmt::skip]
+pub const BACKUP_NS_PATH_RE: &str =
+ concatcp!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR, r"/");
+
+#[rustfmt::skip]
+pub const SNAPSHOT_PATH_REGEX_STR: &str =
+ concatcp!(
+ r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")/(", BACKUP_TIME_RE, r")",
);
-}
#[rustfmt::skip]
-#[macro_export]
-macro_rules! BACKUP_NS_PATH_RE {
- () => (
- concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/")
+pub const GROUP_OR_SNAPSHOT_PATH_REGEX_STR: &str =
+ concatcp!(
+ r"(", BACKUP_TYPE_RE, ")/(", BACKUP_ID_RE, ")(?:/(", BACKUP_TIME_RE, r"))?",
);
-}
-
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! SNAPSHOT_PATH_REGEX_STR {
- () => (
- concat!(
- r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
- )
- );
-}
-
-#[rustfmt::skip]
-#[macro_export]
-macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
- () => {
- concat!(
- r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?",
- )
- };
-}
mod acl;
pub use acl::*;
@@ -128,97 +145,28 @@ pub use zfs::*;
mod metrics;
pub use metrics::*;
-#[rustfmt::skip]
-#[macro_use]
-mod local_macros {
- macro_rules! DNS_LABEL { () => (r"(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
- macro_rules! DNS_NAME { () => (concat!(r"(?:(?:", DNS_LABEL!() , r"\.)*", DNS_LABEL!(), ")")) }
- macro_rules! CIDR_V4_REGEX_STR { () => (concat!(r"(?:", IPV4RE!(), r"/\d{1,2})$")) }
- macro_rules! CIDR_V6_REGEX_STR { () => (concat!(r"(?:", IPV6RE!(), r"/\d{1,3})$")) }
- macro_rules! DNS_ALIAS_LABEL { () => (r"(?:[a-zA-Z0-9_](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)") }
- macro_rules! DNS_ALIAS_NAME {
- () => (concat!(r"(?:(?:", DNS_ALIAS_LABEL!() , r"\.)*", DNS_ALIAS_LABEL!(), ")"))
- }
- macro_rules! PORT_REGEX_STR { () => (r"(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])") }
-}
-
const_regex! {
- pub IP_V4_REGEX = concat!(r"^", IPV4RE!(), r"$");
- pub IP_V6_REGEX = concat!(r"^", IPV6RE!(), r"$");
- pub IP_REGEX = concat!(r"^", IPRE!(), r"$");
- pub CIDR_V4_REGEX = concat!(r"^", CIDR_V4_REGEX_STR!(), r"$");
- pub CIDR_V6_REGEX = concat!(r"^", CIDR_V6_REGEX_STR!(), r"$");
- pub CIDR_REGEX = concat!(r"^(?:", CIDR_V4_REGEX_STR!(), "|", CIDR_V6_REGEX_STR!(), r")$");
- pub HOSTNAME_REGEX = r"^(?:[a-zA-Z0-9](?:[a-zA-Z0-9\-]*[a-zA-Z0-9])?)$";
- pub DNS_NAME_REGEX = concat!(r"^", DNS_NAME!(), r"$");
- pub DNS_ALIAS_REGEX = concat!(r"^", DNS_ALIAS_NAME!(), r"$");
- pub DNS_NAME_OR_IP_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE!(), r")$");
- pub HOST_PORT_REGEX = concat!(r"^(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), "):", PORT_REGEX_STR!() ,"$");
- pub HTTP_URL_REGEX = concat!(r"^https?://(?:(?:(?:", DNS_NAME!(), "|", IPRE_BRACKET!(), ")(?::", PORT_REGEX_STR!() ,")?)|", IPV6RE!(),")(?:/[^\x00-\x1F\x7F]*)?$");
-
- pub SHA256_HEX_REGEX = r"^[a-f0-9]{64}$"; // fixme: define in common_regex ?
-
- pub PASSWORD_REGEX = r"^[[:^cntrl:]]*$"; // everything but control characters
-
- pub UUID_REGEX = r"^[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}$";
-
- pub SYSTEMD_DATETIME_REGEX = r"^\d{4}-\d{2}-\d{2}( \d{2}:\d{2}(:\d{2})?)?$"; // fixme: define in common_regex ?
-
- pub FINGERPRINT_SHA256_REGEX = r"^(?:[0-9a-fA-F][0-9a-fA-F])(?::[0-9a-fA-F][0-9a-fA-F]){31}$";
-
// just a rough check - dummy acceptor is used before persisting
pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$";
- /// Regex for safe identifiers.
- ///
- /// This
- /// [article](https://dwheeler.com/essays/fixing-unix-linux-filenames.html)
- /// contains further information why it is reasonable to restict
- /// names this way. This is not only useful for filenames, but for
- /// any identifier command line tools work with.
- pub PROXMOX_SAFE_ID_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r"$");
-
- pub SINGLE_LINE_COMMENT_REGEX = r"^[[:^cntrl:]]*$";
-
- pub MULTI_LINE_COMMENT_REGEX = r"(?m)^([[:^cntrl:]]*)$";
-
- pub BACKUP_REPO_URL_REGEX = concat!(
+ pub BACKUP_REPO_URL_REGEX = concatcp!(
r"^^(?:(?:(",
- USER_ID_REGEX_STR!(), "|", APITOKEN_ID_REGEX_STR!(),
+ USER_ID_REGEX_STR, "|", APITOKEN_ID_REGEX_STR,
")@)?(",
- DNS_NAME!(), "|", IPRE_BRACKET!(),
- "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR!(), r")$"
+ DNS_NAME_STR, "|", IPRE_BRACKET_STR,
+ "):)?(?:([0-9]{1,5}):)?(", PROXMOX_SAFE_ID_REGEX_STR, r")$"
);
- pub BLOCKDEVICE_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+)|(?:nvme\d+n\d+)$";
- pub BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX = r"^(?:(?:h|s|x?v)d[a-z]+\d*)|(?:nvme\d+n\d+(p\d+)?)$";
- pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
+ pub SUBSCRIPTION_KEY_REGEX = concat!(r"^pbs(?:[cbsp])-[0-9a-f]{10}$");
}
-pub const IP_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V4_REGEX);
-pub const IP_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_V6_REGEX);
-pub const IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&IP_REGEX);
-pub const CIDR_V4_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V4_REGEX);
-pub const CIDR_V6_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_V6_REGEX);
-pub const CIDR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&CIDR_REGEX);
pub const PVE_CONFIG_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX);
-pub const PASSWORD_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PASSWORD_REGEX);
-pub const UUID_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&UUID_REGEX);
-pub const BLOCKDEVICE_NAME_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&BLOCKDEVICE_NAME_REGEX);
-pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX);
+
pub const SUBSCRIPTION_KEY_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&SUBSCRIPTION_KEY_REGEX);
-pub const SYSTEMD_DATETIME_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&SYSTEMD_DATETIME_REGEX);
-pub const HOSTNAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOSTNAME_REGEX);
+
pub const OPENSSL_CIPHERS_TLS_FORMAT: ApiStringFormat =
ApiStringFormat::Pattern(&OPENSSL_CIPHERS_REGEX);
-pub const HOST_PORT_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HOST_PORT_REGEX);
-pub const HTTP_URL_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&HTTP_URL_REGEX);
-
-pub const DNS_ALIAS_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_ALIAS_REGEX);
pub const DAILY_DURATION_FORMAT: ApiStringFormat =
ApiStringFormat::VerifyFn(|s| parse_daily_duration(s).map(drop));
@@ -238,10 +186,6 @@ pub const THIRD_DNS_SERVER_SCHEMA: Schema = StringSchema::new("Third name server
.format(&IP_FORMAT)
.schema();
-pub const HOSTNAME_SCHEMA: Schema = StringSchema::new("Hostname (as defined in RFC1123).")
- .format(&HOSTNAME_FORMAT)
- .schema();
-
pub const OPENSSL_CIPHERS_TLS_1_2_SCHEMA: Schema =
StringSchema::new("OpenSSL cipher list used by the proxy for TLS <= 1.2")
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
@@ -252,62 +196,6 @@ pub const OPENSSL_CIPHERS_TLS_1_3_SCHEMA: Schema =
.format(&OPENSSL_CIPHERS_TLS_FORMAT)
.schema();
-pub const DNS_NAME_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_REGEX);
-
-pub const DNS_NAME_OR_IP_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&DNS_NAME_OR_IP_REGEX);
-
-pub const DNS_NAME_OR_IP_SCHEMA: Schema = StringSchema::new("DNS name or IP address.")
- .format(&DNS_NAME_OR_IP_FORMAT)
- .schema();
-
-pub const HOST_PORT_SCHEMA: Schema =
- StringSchema::new("host:port combination (Host can be DNS name or IP address).")
- .format(&HOST_PORT_FORMAT)
- .schema();
-
-pub const HTTP_URL_SCHEMA: Schema = StringSchema::new("HTTP(s) url with optional port.")
- .format(&HTTP_URL_FORMAT)
- .schema();
-
-pub const NODE_SCHEMA: Schema = StringSchema::new("Node name (or 'localhost')")
- .format(&HOSTNAME_FORMAT)
- .schema();
-
-pub const TIME_ZONE_SCHEMA: Schema = StringSchema::new(
- "Time zone. The file '/usr/share/zoneinfo/zone.tab' contains the list of valid names.",
-)
-.format(&SINGLE_LINE_COMMENT_FORMAT)
-.min_length(2)
-.max_length(64)
-.schema();
-
-pub const BLOCKDEVICE_NAME_SCHEMA: Schema =
- StringSchema::new("Block device name (/sys/block/).")
- .format(&BLOCKDEVICE_NAME_FORMAT)
- .min_length(3)
- .max_length(64)
- .schema();
-
-pub const BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA: Schema =
- StringSchema::new("(Partition) block device name (/sys/class/block/).")
- .format(&BLOCKDEVICE_DISK_AND_PARTITION_NAME_FORMAT)
- .min_length(3)
- .max_length(64)
- .schema();
-
-pub const DISK_ARRAY_SCHEMA: Schema =
- ArraySchema::new("Disk name list.", &BLOCKDEVICE_NAME_SCHEMA).schema();
-
-pub const DISK_LIST_SCHEMA: Schema = StringSchema::new("A list of disk names, comma separated.")
- .format(&ApiStringFormat::PropertyString(&DISK_ARRAY_SCHEMA))
- .schema();
-
-pub const PASSWORD_SCHEMA: Schema = StringSchema::new("Password.")
- .format(&PASSWORD_FORMAT)
- .min_length(1)
- .max_length(1024)
- .schema();
-
pub const PBS_PASSWORD_SCHEMA: Schema = StringSchema::new("User Password.")
.format(&PASSWORD_FORMAT)
.min_length(5)
@@ -320,31 +208,6 @@ pub const REALM_ID_SCHEMA: Schema = StringSchema::new("Realm name.")
.max_length(32)
.schema();
-pub const FINGERPRINT_SHA256_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&FINGERPRINT_SHA256_REGEX);
-
-pub const CERT_FINGERPRINT_SHA256_SCHEMA: Schema =
- StringSchema::new("X509 certificate fingerprint (sha256).")
- .format(&FINGERPRINT_SHA256_FORMAT)
- .schema();
-
-pub const PROXMOX_SAFE_ID_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
-
-pub const SINGLE_LINE_COMMENT_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&SINGLE_LINE_COMMENT_REGEX);
-
-pub const SINGLE_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (single line).")
- .format(&SINGLE_LINE_COMMENT_FORMAT)
- .schema();
-
-pub const MULTI_LINE_COMMENT_FORMAT: ApiStringFormat =
- ApiStringFormat::Pattern(&MULTI_LINE_COMMENT_REGEX);
-
-pub const MULTI_LINE_COMMENT_SCHEMA: Schema = StringSchema::new("Comment (multiple lines).")
- .format(&MULTI_LINE_COMMENT_FORMAT)
- .schema();
-
pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
StringSchema::new("Proxmox Backup Server subscription key.")
.format(&SUBSCRIPTION_KEY_FORMAT)
@@ -352,8 +215,6 @@ pub const SUBSCRIPTION_KEY_SCHEMA: Schema =
.max_length(16)
.schema();
-pub const SERVICE_ID_SCHEMA: Schema = StringSchema::new("Service ID.").max_length(256).schema();
-
pub const PROXMOX_CONFIG_DIGEST_SCHEMA: Schema = StringSchema::new(
"Prevent changes if current configuration file has different \
SHA256 digest. This can be used to prevent concurrent \
diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs
index 1b03ca942..a605cc17a 100644
--- a/pbs-api-types/src/maintenance.rs
+++ b/pbs-api-types/src/maintenance.rs
@@ -77,6 +77,12 @@ pub struct MaintenanceMode {
}
impl MaintenanceMode {
+ /// Used for deciding whether the datastore is cleared from the internal cache after the last
+ /// task finishes, so all open files are closed.
+ pub fn is_offline(&self) -> bool {
+ self.ty == MaintenanceType::Offline
+ }
+
pub fn check(&self, operation: Option) -> Result<(), Error> {
if self.ty == MaintenanceType::Delete {
bail!("datastore is being deleted");
diff --git a/pbs-api-types/src/openid.rs b/pbs-api-types/src/openid.rs
index 2c7646a3b..2c95c5c6e 100644
--- a/pbs-api-types/src/openid.rs
+++ b/pbs-api-types/src/openid.rs
@@ -3,7 +3,8 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::{api, ApiStringFormat, ArraySchema, Schema, StringSchema, Updater};
use super::{
- PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
+ GENERIC_URI_REGEX, PROXMOX_SAFE_ID_FORMAT, PROXMOX_SAFE_ID_REGEX, REALM_ID_SCHEMA,
+ SINGLE_LINE_COMMENT_SCHEMA,
};
pub const OPENID_SCOPE_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
@@ -24,11 +25,11 @@ pub const OPENID_SCOPE_LIST_SCHEMA: Schema = StringSchema::new("OpenID Scope Lis
.default(OPENID_DEFAILT_SCOPE_LIST)
.schema();
-pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PROXMOX_SAFE_ID_REGEX);
+pub const OPENID_ACR_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&GENERIC_URI_REGEX);
pub const OPENID_ACR_SCHEMA: Schema =
StringSchema::new("OpenID Authentication Context Class Reference.")
- .format(&OPENID_SCOPE_FORMAT)
+ .format(&OPENID_ACR_FORMAT)
.schema();
pub const OPENID_ACR_ARRAY_SCHEMA: Schema =
diff --git a/pbs-api-types/src/tape/mod.rs b/pbs-api-types/src/tape/mod.rs
index 99d7cb741..6a9d56bc9 100644
--- a/pbs-api-types/src/tape/mod.rs
+++ b/pbs-api-types/src/tape/mod.rs
@@ -22,15 +22,19 @@ pub use media_location::*;
mod media;
pub use media::*;
+use const_format::concatcp;
use serde::{Deserialize, Serialize};
use proxmox_schema::{api, const_regex, ApiStringFormat, Schema, StringSchema};
use proxmox_uuid::Uuid;
-use crate::{BackupType, BACKUP_ID_SCHEMA, FINGERPRINT_SHA256_FORMAT};
+use crate::{
+ BackupType, BACKUP_ID_SCHEMA, BACKUP_NS_PATH_RE, FINGERPRINT_SHA256_FORMAT,
+ PROXMOX_SAFE_ID_REGEX_STR, SNAPSHOT_PATH_REGEX_STR,
+};
const_regex! {
- pub TAPE_RESTORE_SNAPSHOT_REGEX = concat!(r"^", PROXMOX_SAFE_ID_REGEX_STR!(), r":(?:", BACKUP_NS_PATH_RE!(),")?", SNAPSHOT_PATH_REGEX_STR!(), r"$");
+ pub TAPE_RESTORE_SNAPSHOT_REGEX = concatcp!(r"^", PROXMOX_SAFE_ID_REGEX_STR, r":(?:", BACKUP_NS_PATH_RE,")?", SNAPSHOT_PATH_REGEX_STR, r"$");
}
pub const TAPE_RESTORE_SNAPSHOT_FORMAT: ApiStringFormat =
diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs
index e7053d9e3..60efb0ce5 100644
--- a/pbs-client/src/pxar/create.rs
+++ b/pbs-client/src/pxar/create.rs
@@ -41,6 +41,8 @@ pub struct PxarCreateOptions {
pub entries_max: usize,
/// Skip lost+found directory
pub skip_lost_and_found: bool,
+ /// Skip xattrs of files that return E2BIG error
+ pub skip_e2big_xattr: bool,
}
fn detect_fs_type(fd: RawFd) -> Result {
@@ -128,6 +130,7 @@ struct Archiver {
device_set: Option>,
hardlinks: HashMap,
file_copy_buffer: Vec,
+ skip_e2big_xattr: bool,
}
type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>;
@@ -158,6 +161,7 @@ where
feature_flags & fs_feature_flags,
fs_magic,
&mut fs_feature_flags,
+ options.skip_e2big_xattr,
)
.context("failed to get metadata for source directory")?;
@@ -192,6 +196,7 @@ where
device_set,
hardlinks: HashMap::new(),
file_copy_buffer: vec::undefined(4 * 1024 * 1024),
+ skip_e2big_xattr: options.skip_e2big_xattr,
};
archiver
@@ -222,9 +227,9 @@ impl Archiver {
}
}
- fn archive_dir_contents<'a, 'b, T: SeqWrite + Send>(
+ fn archive_dir_contents<'a, T: SeqWrite + Send>(
&'a mut self,
- encoder: &'a mut Encoder<'b, T>,
+ encoder: &'a mut Encoder<'_, T>,
mut dir: Dir,
is_root: bool,
) -> BoxFuture<'a, Result<(), Error>> {
@@ -534,14 +539,6 @@ impl Archiver {
None => return Ok(()),
};
- let metadata = get_metadata(
- fd.as_raw_fd(),
- stat,
- self.flags(),
- self.fs_magic,
- &mut self.fs_feature_flags,
- )?;
-
let match_path = PathBuf::from("/").join(self.path.clone());
if self
.patterns
@@ -551,6 +548,15 @@ impl Archiver {
return Ok(());
}
+ let metadata = get_metadata(
+ fd.as_raw_fd(),
+ stat,
+ self.flags(),
+ self.fs_magic,
+ &mut self.fs_feature_flags,
+ self.skip_e2big_xattr,
+ )?;
+
let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
match metadata.file_type() {
mode::IFREG => {
@@ -765,6 +771,7 @@ fn get_metadata(
flags: Flags,
fs_magic: i64,
fs_feature_flags: &mut Flags,
+ skip_e2big_xattr: bool,
) -> Result {
// required for some of these
let proc_path = Path::new("/proc/self/fd/").join(fd.to_string());
@@ -780,7 +787,14 @@ fn get_metadata(
..Default::default()
};
- get_xattr_fcaps_acl(&mut meta, fd, &proc_path, flags, fs_feature_flags)?;
+ get_xattr_fcaps_acl(
+ &mut meta,
+ fd,
+ &proc_path,
+ flags,
+ fs_feature_flags,
+ skip_e2big_xattr,
+ )?;
get_chattr(&mut meta, fd)?;
get_fat_attr(&mut meta, fd, fs_magic)?;
get_quota_project_id(&mut meta, fd, flags, fs_magic)?;
@@ -818,6 +832,7 @@ fn get_xattr_fcaps_acl(
proc_path: &Path,
flags: Flags,
fs_feature_flags: &mut Flags,
+ skip_e2big_xattr: bool,
) -> Result<(), Error> {
if !flags.contains(Flags::WITH_XATTRS) {
return Ok(());
@@ -829,6 +844,14 @@ fn get_xattr_fcaps_acl(
fs_feature_flags.remove(Flags::WITH_XATTRS);
return Ok(());
}
+ Err(Errno::E2BIG) => {
+ match skip_e2big_xattr {
+ true => return Ok(()),
+ false => {
+ bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string());
+ }
+ };
+ }
Err(Errno::EBADF) => return Ok(()), // symlinks
Err(err) => return Err(err).context("failed to read xattrs"),
};
@@ -855,6 +878,14 @@ fn get_xattr_fcaps_acl(
Err(Errno::ENODATA) => (), // it got removed while we were iterating...
Err(Errno::EOPNOTSUPP) => (), // shouldn't be possible so just ignore this
Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either
+ Err(Errno::E2BIG) => {
+ match skip_e2big_xattr {
+ true => return Ok(()),
+ false => {
+ bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string());
+ }
+ };
+ }
Err(err) => {
return Err(err).context(format!("error reading extended attribute {attr:?}"))
}
diff --git a/pbs-client/src/pxar/dir_stack.rs b/pbs-client/src/pxar/dir_stack.rs
index 43cbee1d5..616d7545b 100644
--- a/pbs-client/src/pxar/dir_stack.rs
+++ b/pbs-client/src/pxar/dir_stack.rs
@@ -40,16 +40,13 @@ impl PxarDir {
parent: RawFd,
allow_existing_dirs: bool,
) -> Result {
- match mkdirat(
+ if let Err(err) = mkdirat(
parent,
self.file_name.as_os_str(),
perms_from_metadata(&self.metadata)?,
) {
- Ok(()) => (),
- Err(err) => {
- if !(allow_existing_dirs && err.already_exists()) {
- return Err(err.into());
- }
+ if !(allow_existing_dirs && err.already_exists()) {
+ return Err(err.into());
}
}
diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs
index af18ecfc3..5f5ac6188 100644
--- a/pbs-client/src/pxar/extract.rs
+++ b/pbs-client/src/pxar/extract.rs
@@ -699,7 +699,7 @@ impl Extractor {
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
- Err(errno) if errno == nix::errno::Errno::EINTR => true,
+ Err(nix::errno::Errno::EINTR) => true,
Err(err) => return Err(err).context("error setting file size"),
} {}
}
@@ -758,7 +758,7 @@ impl Extractor {
if result.seeked_last {
while match nix::unistd::ftruncate(file.as_raw_fd(), size as i64) {
Ok(_) => false,
- Err(errno) if errno == nix::errno::Errno::EINTR => true,
+ Err(nix::errno::Errno::EINTR) => true,
Err(err) => return Err(err).context("error setting file size"),
} {}
}
diff --git a/pbs-config/Cargo.toml b/pbs-config/Cargo.toml
index 9daa6ef42..d11cd41ee 100644
--- a/pbs-config/Cargo.toml
+++ b/pbs-config/Cargo.toml
@@ -7,7 +7,7 @@ description = "Configuration file management for PBS"
[dependencies]
anyhow.workspace = true
-hex.workspace = true
+const_format.workspace = true
lazy_static.workspace = true
libc.workspace = true
nix.workspace = true
diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs
index 20269f5db..a0354a053 100644
--- a/pbs-config/src/acl.rs
+++ b/pbs-config/src/acl.rs
@@ -198,7 +198,7 @@ impl AclTreeNode {
}
/// Returns applicable [Role] and their propagation status for a given
- /// [Authid](pbs_api_types::Authid).
+ /// [Authid].
///
/// If the `Authid` is a [User](pbs_api_types::User) that has no specific `Roles` configured on
/// this node, applicable `Group` roles will be returned instead.
@@ -447,8 +447,8 @@ impl AclTree {
}
fn write_node_config(node: &AclTreeNode, path: &str, w: &mut dyn Write) -> Result<(), Error> {
- let mut role_ug_map0 = HashMap::new();
- let mut role_ug_map1 = HashMap::new();
+ let mut role_ug_map0: HashMap<_, BTreeSet<_>> = HashMap::new();
+ let mut role_ug_map1: HashMap<_, BTreeSet<_>> = HashMap::new();
for (auth_id, roles) in &node.users {
// no need to save, because root is always 'Administrator'
@@ -459,15 +459,9 @@ impl AclTree {
let role = role.as_str();
let auth_id = auth_id.to_string();
if *propagate {
- role_ug_map1
- .entry(role)
- .or_insert_with(BTreeSet::new)
- .insert(auth_id);
+ role_ug_map1.entry(role).or_default().insert(auth_id);
} else {
- role_ug_map0
- .entry(role)
- .or_insert_with(BTreeSet::new)
- .insert(auth_id);
+ role_ug_map0.entry(role).or_default().insert(auth_id);
}
}
}
@@ -476,15 +470,9 @@ impl AclTree {
for (role, propagate) in roles {
let group = format!("@{}", group);
if *propagate {
- role_ug_map1
- .entry(role)
- .or_insert_with(BTreeSet::new)
- .insert(group);
+ role_ug_map1.entry(role).or_default().insert(group);
} else {
- role_ug_map0
- .entry(role)
- .or_insert_with(BTreeSet::new)
- .insert(group);
+ role_ug_map0.entry(role).or_default().insert(group);
}
}
}
@@ -492,7 +480,7 @@ impl AclTree {
fn group_by_property_list(
item_property_map: &HashMap<&str, BTreeSet>,
) -> BTreeMap> {
- let mut result_map = BTreeMap::new();
+ let mut result_map: BTreeMap<_, BTreeSet<_>> = BTreeMap::new();
for (item, property_map) in item_property_map {
let item_list = property_map.iter().fold(String::new(), |mut acc, v| {
if !acc.is_empty() {
@@ -503,7 +491,7 @@ impl AclTree {
});
result_map
.entry(item_list)
- .or_insert_with(BTreeSet::new)
+ .or_default()
.insert(item.to_string());
}
result_map
diff --git a/pbs-config/src/network/helper.rs b/pbs-config/src/network/helper.rs
index 7180aaaaf..9e195d713 100644
--- a/pbs-config/src/network/helper.rs
+++ b/pbs-config/src/network/helper.rs
@@ -4,6 +4,7 @@ use std::path::Path;
use std::process::Command;
use anyhow::{bail, format_err, Error};
+use const_format::concatcp;
use lazy_static::lazy_static;
use nix::ioctl_read_bad;
use nix::sys::socket::{socket, AddressFamily, SockFlag, SockType};
@@ -89,11 +90,13 @@ pub fn check_netmask(mask: u8, is_v6: bool) -> Result<(), Error> {
// parse ip address with optional cidr mask
pub fn parse_address_or_cidr(cidr: &str) -> Result<(String, Option, bool), Error> {
+ // NOTE: This is NOT the same regex as in proxmox-schema as this one has capture groups for
+ // the addresses vs cidr portions!
lazy_static! {
pub static ref CIDR_V4_REGEX: Regex =
- Regex::new(concat!(r"^(", IPV4RE!(), r")(?:/(\d{1,2}))?$")).unwrap();
+ Regex::new(concatcp!(r"^(", IPV4RE_STR, r")(?:/(\d{1,2}))?$")).unwrap();
pub static ref CIDR_V6_REGEX: Regex =
- Regex::new(concat!(r"^(", IPV6RE!(), r")(?:/(\d{1,3}))?$")).unwrap();
+ Regex::new(concatcp!(r"^(", IPV6RE_STR, r")(?:/(\d{1,3}))?$")).unwrap();
}
if let Some(caps) = CIDR_V4_REGEX.captures(cidr) {
diff --git a/pbs-config/src/network/parser.rs b/pbs-config/src/network/parser.rs
index 2cff6587e..ec2c64eb9 100644
--- a/pbs-config/src/network/parser.rs
+++ b/pbs-config/src/network/parser.rs
@@ -192,7 +192,7 @@ impl NetworkParser {
self.eat(Token::Gateway)?;
let gateway = self.next_text()?;
- if pbs_api_types::common_regex::IP_REGEX.is_match(&gateway) {
+ if pbs_api_types::IP_REGEX.is_match(&gateway) {
if gateway.contains(':') {
set_gateway_v6(interface, gateway)?;
} else {
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index 128315ba3..bdfaeabc1 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -17,6 +17,36 @@ use crate::manifest::{
};
use crate::{DataBlob, DataStore};
+#[derive(Default)]
+pub struct BackupGroupDeleteStats {
+ // Count of protected snapshots, therefore not removed
+ unremoved_protected: usize,
+ // Count of deleted snapshots
+ removed_snapshots: usize,
+}
+
+impl BackupGroupDeleteStats {
+ pub fn all_removed(&self) -> bool {
+ self.unremoved_protected == 0
+ }
+
+ pub fn removed_snapshots(&self) -> usize {
+ self.removed_snapshots
+ }
+
+ pub fn protected_snapshots(&self) -> usize {
+ self.unremoved_protected
+ }
+
+ fn increment_removed_snapshots(&mut self) {
+ self.removed_snapshots += 1;
+ }
+
+ fn increment_protected_snapshots(&mut self) {
+ self.unremoved_protected += 1;
+ }
+}
+
/// BackupGroup is a directory containing a list of BackupDir
#[derive(Clone)]
pub struct BackupGroup {
@@ -197,30 +227,32 @@ impl BackupGroup {
/// Destroy the group inclusive all its backup snapshots (BackupDir's)
///
- /// Returns true if all snapshots were removed, and false if some were protected
- pub fn destroy(&self) -> Result {
+ /// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
+ /// and number of protected snaphsots, which therefore were not removed.
+ pub fn destroy(&self) -> Result {
let path = self.full_group_path();
let _guard =
proxmox_sys::fs::lock_dir_noblock(&path, "backup group", "possible running backup")?;
log::info!("removing backup group {:?}", path);
- let mut removed_all_snaps = true;
+ let mut delete_stats = BackupGroupDeleteStats::default();
for snap in self.iter_snapshots()? {
let snap = snap?;
if snap.is_protected() {
- removed_all_snaps = false;
+ delete_stats.increment_protected_snapshots();
continue;
}
snap.destroy(false)?;
+ delete_stats.increment_removed_snapshots();
}
- if removed_all_snaps {
+ if delete_stats.all_removed() {
std::fs::remove_dir_all(&path).map_err(|err| {
format_err!("removing group directory {:?} failed - {}", path, err)
})?;
}
- Ok(removed_all_snaps)
+ Ok(delete_stats)
}
/// Returns the backup owner.
diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index fb282749c..9f6289c9f 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -322,7 +322,7 @@ impl ChunkStore {
// start reading:
continue;
}
- Err(ref err) if err == &nix::errno::Errno::ENOENT => {
+ Err(nix::errno::Errno::ENOENT) => {
// non-existing directories are okay, just keep going:
continue;
}
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index de9488217..0685cc845 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -23,7 +23,7 @@ use pbs_api_types::{
DatastoreTuning, GarbageCollectionStatus, Operation, UPID,
};
-use crate::backup_info::{BackupDir, BackupGroup};
+use crate::backup_info::{BackupDir, BackupGroup, BackupGroupDeleteStats};
use crate::chunk_store::ChunkStore;
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
@@ -104,8 +104,26 @@ impl Clone for DataStore {
impl Drop for DataStore {
fn drop(&mut self) {
if let Some(operation) = self.operation {
- if let Err(e) = update_active_operations(self.name(), operation, -1) {
- log::error!("could not update active operations - {}", e);
+ let mut last_task = false;
+ match update_active_operations(self.name(), operation, -1) {
+ Err(e) => log::error!("could not update active operations - {}", e),
+ Ok(updated_operations) => {
+ last_task = updated_operations.read + updated_operations.write == 0;
+ }
+ }
+
+ // remove datastore from cache iff
+ // - last task finished, and
+ // - datastore is in a maintenance mode that mandates it
+ let remove_from_cache = last_task
+ && pbs_config::datastore::config()
+ .and_then(|(s, _)| s.lookup::("datastore", self.name()))
+ .map_or(false, |c| {
+ c.get_maintenance_mode().map_or(false, |m| m.is_offline())
+ });
+
+ if remove_from_cache {
+ DATASTORE_MAP.lock().unwrap().remove(self.name());
}
}
}
@@ -193,6 +211,24 @@ impl DataStore {
Ok(())
}
+ /// trigger clearing cache entry based on maintenance mode. Entry will only
+ /// be cleared iff there is no other task running, if there is, the end of the
+ /// last running task will trigger the clearing of the cache entry.
+ pub fn update_datastore_cache(name: &str) -> Result<(), Error> {
+ let (config, _digest) = pbs_config::datastore::config()?;
+ let datastore: DataStoreConfig = config.lookup("datastore", name)?;
+ if datastore
+ .get_maintenance_mode()
+ .map_or(false, |m| m.is_offline())
+ {
+ // the datastore drop handler does the checking if tasks are running and clears the
+ // cache entry, so we just have to trigger it here
+ let _ = DataStore::lookup_datastore(name, Some(Operation::Lookup));
+ }
+
+ Ok(())
+ }
+
/// Open a raw database given a name and a path.
///
/// # Safety
@@ -464,8 +500,8 @@ impl DataStore {
let mut removed_all_groups = true;
for group in self.iter_backup_groups(ns.to_owned())? {
- let removed_group = group?.destroy()?;
- removed_all_groups = removed_all_groups && removed_group;
+ let delete_stats = group?.destroy()?;
+ removed_all_groups = removed_all_groups && delete_stats.all_removed();
}
let base_file = std::fs::File::open(self.base_path())?;
@@ -545,12 +581,13 @@ impl DataStore {
/// Remove a complete backup group including all snapshots.
///
- /// Returns true if all snapshots were removed, and false if some were protected
+ /// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
+ /// and number of protected snaphsots, which therefore were not removed.
pub fn remove_backup_group(
self: &Arc,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
- ) -> Result {
+ ) -> Result {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
backup_group.destroy()
@@ -1022,7 +1059,7 @@ impl DataStore {
}
pub fn garbage_collection_running(&self) -> bool {
- !matches!(self.inner.gc_mutex.try_lock(), Ok(_))
+ self.inner.gc_mutex.try_lock().is_err()
}
pub fn garbage_collection(
diff --git a/pbs-datastore/src/task_tracking.rs b/pbs-datastore/src/task_tracking.rs
index 18fbd4ba8..ec06a0bcc 100644
--- a/pbs-datastore/src/task_tracking.rs
+++ b/pbs-datastore/src/task_tracking.rs
@@ -91,15 +91,23 @@ pub fn get_active_operations_locked(
Ok((data, lock.unwrap()))
}
-pub fn update_active_operations(name: &str, operation: Operation, count: i64) -> Result<(), Error> {
+pub fn update_active_operations(
+ name: &str,
+ operation: Operation,
+ count: i64,
+) -> Result {
let path = PathBuf::from(format!("{}/{}", crate::ACTIVE_OPERATIONS_DIR, name));
let (_lock, options) = open_lock_file(name)?;
let pid = std::process::id();
let starttime = procfs::PidStat::read_from_pid(Pid::from_raw(pid as pid_t))?.starttime;
- let mut updated = false;
+ let mut updated_active_operations = match operation {
+ Operation::Read => ActiveOperationStats { read: 1, write: 0 },
+ Operation::Write => ActiveOperationStats { read: 0, write: 1 },
+ Operation::Lookup => ActiveOperationStats { read: 0, write: 0 },
+ };
let mut updated_tasks: Vec = match file_read_optional_string(&path)? {
Some(data) => serde_json::from_str::>(&data)?
.iter_mut()
@@ -108,12 +116,12 @@ pub fn update_active_operations(name: &str, operation: Operation, count: i64) ->
Some(stat) if pid == task.pid && stat.starttime != task.starttime => None,
Some(_) => {
if pid == task.pid {
- updated = true;
match operation {
Operation::Read => task.active_operations.read += count,
Operation::Write => task.active_operations.write += count,
Operation::Lookup => (), // no IO must happen there
};
+ updated_active_operations = task.active_operations;
}
Some(task.clone())
}
@@ -124,15 +132,11 @@ pub fn update_active_operations(name: &str, operation: Operation, count: i64) ->
None => Vec::new(),
};
- if !updated {
+ if updated_tasks.is_empty() {
updated_tasks.push(TaskOperations {
pid,
starttime,
- active_operations: match operation {
- Operation::Read => ActiveOperationStats { read: 1, write: 0 },
- Operation::Write => ActiveOperationStats { read: 0, write: 1 },
- Operation::Lookup => ActiveOperationStats { read: 0, write: 0 },
- },
+ active_operations: updated_active_operations,
})
}
replace_file(
@@ -141,4 +145,5 @@ pub fn update_active_operations(name: &str, operation: Operation, count: i64) ->
options,
false,
)
+ .map(|_| updated_active_operations)
}
diff --git a/pbs-pxar-fuse/src/lib.rs b/pbs-pxar-fuse/src/lib.rs
index 98d28c579..bf196b6c4 100644
--- a/pbs-pxar-fuse/src/lib.rs
+++ b/pbs-pxar-fuse/src/lib.rs
@@ -525,9 +525,11 @@ impl SessionImpl {
let file = file?.decode_entry().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = file.file_name();
- match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
- ReplyBufState::Ok => (),
- ReplyBufState::Full => return Ok(lookups),
+ if request
+ .add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)?
+ .is_full()
+ {
+ return Ok(lookups);
}
lookups.push(self.make_lookup(request.inode, stat.st_ino, &file)?);
}
@@ -537,9 +539,11 @@ impl SessionImpl {
let file = dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new(".");
- match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
- ReplyBufState::Ok => (),
- ReplyBufState::Full => return Ok(lookups),
+ if request
+ .add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)?
+ .is_full()
+ {
+ return Ok(lookups);
}
lookups.push(LookupRef::clone(&dir_lookup));
}
@@ -551,9 +555,11 @@ impl SessionImpl {
let file = parent_dir.lookup_self().await?;
let stat = to_stat(to_inode(&file), &file)?;
let name = OsStr::new("..");
- match request.add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)? {
- ReplyBufState::Ok => (),
- ReplyBufState::Full => return Ok(lookups),
+ if request
+ .add_entry(name, &stat, next, 1, f64::MAX, f64::MAX)?
+ .is_full()
+ {
+ return Ok(lookups);
}
lookups.push(lookup);
}
@@ -619,9 +625,8 @@ impl SessionImpl {
let xattrs = self.listxattrs(request.inode).await?;
for entry in xattrs {
- match request.add_c_string(entry.name()) {
- ReplyBufState::Ok => (),
- ReplyBufState::Full => return Ok(ReplyBufState::Full),
+ if request.add_c_string(entry.name()).is_full() {
+ return Ok(ReplyBufState::Full);
}
}
diff --git a/pbs-tape/src/lib.rs b/pbs-tape/src/lib.rs
index 1de2bc543..8d408b70a 100644
--- a/pbs-tape/src/lib.rs
+++ b/pbs-tape/src/lib.rs
@@ -281,7 +281,7 @@ impl MtxStatus {
// (are there changers exposing more than one?)
// defaults to 0 for changer that do not report transports
self.transports
- .get(0)
+ .first()
.map(|t| t.element_address)
.unwrap_or(0u16)
}
diff --git a/pbs-tape/src/sg_pt_changer.rs b/pbs-tape/src/sg_pt_changer.rs
index 5b8596f0d..3945d18f3 100644
--- a/pbs-tape/src/sg_pt_changer.rs
+++ b/pbs-tape/src/sg_pt_changer.rs
@@ -850,7 +850,7 @@ mod test {
.map(|desc| build_storage_descriptor(desc, trailing))
.collect();
- let (desc_len, address) = if let Some(el) = descs.get(0) {
+ let (desc_len, address) = if let Some(el) = descs.first() {
(el.len() as u16, descriptors[0].address)
} else {
(0u16, 0u16)
diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs
index e5caf87de..546275cb1 100644
--- a/proxmox-backup-client/src/main.rs
+++ b/proxmox-backup-client/src/main.rs
@@ -665,6 +665,12 @@ fn spawn_catalog_upload(
optional: true,
default: false,
},
+ "skip-e2big-xattr": {
+ type: Boolean,
+ description: "Ignore the E2BIG error when retrieving xattrs. This includes the file, but discards the metadata.",
+ optional: true,
+ default: false,
+ },
}
}
)]
@@ -674,6 +680,7 @@ async fn create_backup(
all_file_systems: bool,
skip_lost_and_found: bool,
dry_run: bool,
+ skip_e2big_xattr: bool,
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result {
@@ -993,6 +1000,7 @@ async fn create_backup(
patterns: pattern_list.clone(),
entries_max: entries_max as usize,
skip_lost_and_found,
+ skip_e2big_xattr,
};
let upload_options = UploadOptions {
diff --git a/proxmox-backup-client/src/namespace.rs b/proxmox-backup-client/src/namespace.rs
index ce3f113bf..2929e394b 100644
--- a/proxmox-backup-client/src/namespace.rs
+++ b/proxmox-backup-client/src/namespace.rs
@@ -132,11 +132,15 @@ async fn create_namespace(param: Value) -> Result<(), Error> {
type: BackupNamespace,
optional: true,
},
+ "delete-groups": {
+ description: "Destroys all groups in the hierarchy.",
+ optional: true,
+ },
}
},
)]
/// Delete an existing namespace.
-async fn delete_namespace(param: Value) -> Result<(), Error> {
+async fn delete_namespace(param: Value, delete_groups: Option) -> Result<(), Error> {
let repo = extract_repository_from_value(¶m)?;
let backup_ns = optional_ns_param(¶m)?;
@@ -145,7 +149,11 @@ async fn delete_namespace(param: Value) -> Result<(), Error> {
}
let path = format!("api2/json/admin/datastore/{}/namespace", repo.store());
- let param = json!({ "ns": backup_ns });
+ let mut param = json!({ "ns": backup_ns });
+
+ if let Some(value) = delete_groups {
+ param["delete-groups"] = serde_json::to_value(value)?;
+ }
let client = connect(&repo)?;
diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
index c4e97d33e..c20552225 100644
--- a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
+++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
@@ -352,6 +352,7 @@ fn extract(
device_set: None,
patterns,
skip_lost_and_found: false,
+ skip_e2big_xattr: false,
};
let pxar_writer = TokioWriter::new(writer);
diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs
index bc0440351..2bbe90e34 100644
--- a/pxar-bin/src/main.rs
+++ b/pxar-bin/src/main.rs
@@ -335,6 +335,7 @@ async fn create_archive(
device_set,
patterns,
skip_lost_and_found: false,
+ skip_e2big_xattr: false,
};
let source = PathBuf::from(source);
diff --git a/src/api2/access/mod.rs b/src/api2/access/mod.rs
index 673e76bbc..15509fd9d 100644
--- a/src/api2/access/mod.rs
+++ b/src/api2/access/mod.rs
@@ -6,13 +6,15 @@ use serde_json::Value;
use std::collections::HashMap;
use std::collections::HashSet;
-use proxmox_router::{list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap};
+use proxmox_router::{
+ http_bail, http_err, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
+};
use proxmox_schema::api;
use proxmox_sortable_macro::sortable;
use pbs_api_types::{
- Authid, Userid, ACL_PATH_SCHEMA, PASSWORD_SCHEMA, PRIVILEGES, PRIV_PERMISSIONS_MODIFY,
- PRIV_SYS_AUDIT,
+ Authid, User, Userid, ACL_PATH_SCHEMA, PASSWORD_FORMAT, PASSWORD_SCHEMA, PRIVILEGES,
+ PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
};
use pbs_config::acl::AclTreeNode;
use pbs_config::CachedUserInfo;
@@ -24,6 +26,47 @@ pub mod role;
pub mod tfa;
pub mod user;
+/// Perform first-factor (password) authentication only. Ignore password for the root user.
+/// Otherwise check the current user's password.
+///
+/// This means that user admins need to type in their own password while editing a user, and
+/// regular users, which can only change their own settings (checked at the API level), can change
+/// their own settings using their own password.
+pub(self) async fn user_update_auth>(
+ rpcenv: &mut dyn RpcEnvironment,
+ userid: &Userid,
+ password: Option,
+ must_exist: bool,
+) -> Result<(), Error> {
+ let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+
+ if authid.user() != Userid::root_userid() {
+ let client_ip = rpcenv.get_client_ip().map(|sa| sa.ip());
+ let password = password.ok_or_else(|| http_err!(UNAUTHORIZED, "missing password"))?;
+ #[allow(clippy::let_unit_value)]
+ {
+ let _: () = crate::auth::authenticate_user(
+ authid.user(),
+ password.as_ref(),
+ client_ip.as_ref(),
+ )
+ .await
+ .map_err(|err| http_err!(UNAUTHORIZED, "{}", err))?;
+ }
+ }
+
+ // After authentication, verify that the to-be-modified user actually exists:
+ if must_exist && authid.user() != userid {
+ let (config, _digest) = pbs_config::user::config()?;
+
+ if config.lookup::("user", userid.as_str()).is_err() {
+ http_bail!(UNAUTHORIZED, "user '{}' does not exists.", userid);
+ }
+ }
+
+ Ok(())
+}
+
#[api(
protected: true,
input: {
@@ -34,6 +77,14 @@ pub mod user;
password: {
schema: PASSWORD_SCHEMA,
},
+ "confirmation-password": {
+ type: String,
+ description: "The current password for confirmation, unless logged in as root@pam",
+ min_length: 1,
+ max_length: 1024,
+ format: &PASSWORD_FORMAT,
+ optional: true,
+ },
},
},
access: {
@@ -45,11 +96,14 @@ pub mod user;
///
/// Each user is allowed to change his own password. Superuser
/// can change all passwords.
-pub fn change_password(
+pub async fn change_password(
userid: Userid,
password: String,
+ confirmation_password: Option,
rpcenv: &mut dyn RpcEnvironment,
) -> Result {
+ user_update_auth(rpcenv, &userid, confirmation_password, true).await?;
+
let current_auth: Authid = rpcenv
.get_auth_id()
.ok_or_else(|| format_err!("no authid available"))?
diff --git a/src/api2/access/tfa.rs b/src/api2/access/tfa.rs
index 589535a6e..e6b26b336 100644
--- a/src/api2/access/tfa.rs
+++ b/src/api2/access/tfa.rs
@@ -2,55 +2,15 @@
use anyhow::Error;
-use proxmox_router::{http_bail, http_err, Permission, Router, RpcEnvironment};
+use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use proxmox_tfa::api::methods;
-use pbs_api_types::{
- Authid, User, Userid, PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT,
-};
+use pbs_api_types::{Authid, Userid, PASSWORD_SCHEMA, PRIV_PERMISSIONS_MODIFY, PRIV_SYS_AUDIT};
use pbs_config::CachedUserInfo;
use crate::config::tfa::UserAccess;
-/// Perform first-factor (password) authentication only. Ignore password for the root user.
-/// Otherwise check the current user's password.
-///
-/// This means that user admins need to type in their own password while editing a user, and
-/// regular users, which can only change their own TFA settings (checked at the API level), can
-/// change their own settings using their own password.
-async fn tfa_update_auth(
- rpcenv: &mut dyn RpcEnvironment,
- userid: &Userid,
- password: Option,
- must_exist: bool,
-) -> Result<(), Error> {
- let authid: Authid = rpcenv.get_auth_id().unwrap().parse()?;
-
- if authid.user() != Userid::root_userid() {
- let client_ip = rpcenv.get_client_ip().map(|sa| sa.ip());
- let password = password.ok_or_else(|| http_err!(UNAUTHORIZED, "missing password"))?;
- #[allow(clippy::let_unit_value)]
- {
- let _: () =
- crate::auth::authenticate_user(authid.user(), &password, client_ip.as_ref())
- .await
- .map_err(|err| http_err!(UNAUTHORIZED, "{}", err))?;
- }
- }
-
- // After authentication, verify that the to-be-modified user actually exists:
- if must_exist && authid.user() != userid {
- let (config, _digest) = pbs_config::user::config()?;
-
- if config.lookup::("user", userid.as_str()).is_err() {
- http_bail!(UNAUTHORIZED, "user '{}' does not exists.", userid);
- }
- }
-
- Ok(())
-}
-
#[api(
protected: true,
input: {
@@ -128,7 +88,7 @@ pub async fn delete_tfa(
password: Option,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
- tfa_update_auth(rpcenv, &userid, password, false).await?;
+ super::user_update_auth(rpcenv, &userid, password, false).await?;
let _lock = crate::config::tfa::write_lock()?;
@@ -225,7 +185,7 @@ async fn add_tfa_entry(
r#type: methods::TfaType,
rpcenv: &mut dyn RpcEnvironment,
) -> Result {
- tfa_update_auth(rpcenv, &userid, password, true).await?;
+ super::user_update_auth(rpcenv, &userid, password, true).await?;
let _lock = crate::config::tfa::write_lock()?;
@@ -285,7 +245,7 @@ async fn update_tfa_entry(
password: Option,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
- tfa_update_auth(rpcenv, &userid, password, true).await?;
+ super::user_update_auth(rpcenv, &userid, password, true).await?;
let _lock = crate::config::tfa::write_lock()?;
diff --git a/src/api2/access/user.rs b/src/api2/access/user.rs
index 118838ce8..e2b74237b 100644
--- a/src/api2/access/user.rs
+++ b/src/api2/access/user.rs
@@ -253,7 +253,7 @@ pub enum DeletableProperty {
)]
/// Update user configuration.
#[allow(clippy::too_many_arguments)]
-pub fn update_user(
+pub async fn update_user(
userid: Userid,
update: UserUpdater,
password: Option,
@@ -261,6 +261,10 @@ pub fn update_user(
digest: Option,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
+ if password.is_some() {
+ super::user_update_auth(rpcenv, &userid, password.as_deref(), false).await?;
+ }
+
let _lock = pbs_config::user::lock_config()?;
let (mut config, expected_digest) = pbs_config::user::config()?;
@@ -381,28 +385,16 @@ pub fn delete_user(userid: Userid, digest: Option) -> Result<(), Error>
pbs_config::user::save_config(&config)?;
let authenticator = crate::auth::lookup_authenticator(userid.realm())?;
- match authenticator.remove_password(userid.name()) {
- Ok(()) => {}
- Err(err) => {
- eprintln!(
- "error removing password after deleting user {:?}: {}",
- userid, err
- );
- }
+ if let Err(err) = authenticator.remove_password(userid.name()) {
+ eprintln!("error removing password after deleting user {userid:?}: {err}",);
}
- match crate::config::tfa::read().and_then(|mut cfg| {
+ if let Err(err) = crate::config::tfa::read().and_then(|mut cfg| {
let _: proxmox_tfa::api::NeedsSaving =
cfg.remove_user(&crate::config::tfa::UserAccess, userid.as_str())?;
crate::config::tfa::write(&cfg)
}) {
- Ok(()) => (),
- Err(err) => {
- eprintln!(
- "error updating TFA config after deleting user {:?}: {}",
- userid, err
- );
- }
+ eprintln!("error updating TFA config after deleting user {userid:?} {err}",);
}
Ok(())
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index a95031e71..f7164b877 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -294,7 +294,8 @@ pub async fn delete_group(
&group,
)?;
- if !datastore.remove_backup_group(&ns, &group)? {
+ let delete_stats = datastore.remove_backup_group(&ns, &group)?;
+ if !delete_stats.all_removed() {
bail!("group only partially deleted due to protected snapshots");
}
diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs
index 18fad7455..013043dd0 100644
--- a/src/api2/backup/mod.rs
+++ b/src/api2/backup/mod.rs
@@ -3,7 +3,7 @@
use anyhow::{bail, format_err, Error};
use futures::*;
use hex::FromHex;
-use hyper::header::{HeaderValue, UPGRADE};
+use hyper::header::{HeaderValue, CONNECTION, UPGRADE};
use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
@@ -318,6 +318,7 @@ fn upgrade_to_backup_protocol(
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
+ .header(CONNECTION, HeaderValue::from_static("upgrade"))
.header(
UPGRADE,
HeaderValue::from_static(PROXMOX_BACKUP_PROTOCOL_ID_V1!()),
diff --git a/src/api2/config/access/ldap.rs b/src/api2/config/access/ldap.rs
index 911142a08..e60dc9c16 100644
--- a/src/api2/config/access/ldap.rs
+++ b/src/api2/config/access/ldap.rs
@@ -337,7 +337,7 @@ pub fn update_ldap_realm(
config.user_classes = Some(user_classes);
}
- let ldap_config = if let Some(_) = password {
+ let ldap_config = if password.is_some() {
LdapAuthenticator::api_type_to_config_with_password(&config, password.clone())?
} else {
LdapAuthenticator::api_type_to_config(&config)?
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index d571334c4..3081e1f48 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -389,7 +389,9 @@ pub fn update_datastore(
data.tuning = update.tuning;
}
+ let mut maintenance_mode_changed = false;
if update.maintenance_mode.is_some() {
+ maintenance_mode_changed = data.maintenance_mode != update.maintenance_mode;
data.maintenance_mode = update.maintenance_mode;
}
@@ -403,6 +405,25 @@ pub fn update_datastore(
jobstate::update_job_last_run_time("garbage_collection", &name)?;
}
+ // tell the proxy it might have to clear a cache entry
+ if maintenance_mode_changed {
+ tokio::spawn(async move {
+ if let Ok(proxy_pid) =
+ proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)
+ {
+ let sock = proxmox_rest_server::ctrl_sock_from_pid(proxy_pid);
+ let _ = proxmox_rest_server::send_raw_command(
+ sock,
+ &format!(
+ "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n",
+ &name
+ ),
+ )
+ .await;
+ }
+ });
+ }
+
Ok(())
}
diff --git a/src/api2/node/dns.rs b/src/api2/node/dns.rs
index 4f6822d87..87a117382 100644
--- a/src/api2/node/dns.rs
+++ b/src/api2/node/dns.rs
@@ -2,12 +2,14 @@ use std::sync::{Arc, Mutex};
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
+use const_format::concatcp;
use lazy_static::lazy_static;
use openssl::sha;
use regex::Regex;
use serde_json::{json, Value};
-use pbs_api_types::{IPRE, IPV4OCTET, IPV4RE, IPV6H16, IPV6LS32, IPV6RE};
+use pbs_api_types::IPRE_STR;
+
use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::api;
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
@@ -47,7 +49,7 @@ pub fn read_etc_resolv_conf() -> Result {
lazy_static! {
static ref DOMAIN_REGEX: Regex = Regex::new(r"^\s*(?:search|domain)\s+(\S+)\s*").unwrap();
static ref SERVER_REGEX: Regex =
- Regex::new(concat!(r"^\s*nameserver\s+(", IPRE!(), r")\s*")).unwrap();
+ Regex::new(concatcp!(r"^\s*nameserver\s+(", IPRE_STR, r")\s*")).unwrap();
}
let mut options = String::new();
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index eb9a21993..59db36603 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -13,6 +13,7 @@ use pbs_api_types::{
TRANSFER_LAST_SCHEMA,
};
use pbs_config::CachedUserInfo;
+use proxmox_human_byte::HumanByte;
use proxmox_rest_server::WorkerTask;
use crate::server::jobstate::Job;
@@ -144,7 +145,31 @@ pub fn do_sync_job(
sync_job.remote_store,
);
- pull_store(&worker, pull_params).await?;
+ let pull_stats = pull_store(&worker, pull_params).await?;
+
+ if pull_stats.bytes != 0 {
+ let amount = HumanByte::from(pull_stats.bytes);
+ let rate = HumanByte::new_binary(
+ pull_stats.bytes as f64 / pull_stats.elapsed.as_secs_f64(),
+ );
+ task_log!(
+ worker,
+ "Summary: sync job pulled {amount} in {} chunks (average rate: {rate}/s)",
+ pull_stats.chunk_count,
+ );
+ } else {
+ task_log!(worker, "Summary: sync job found no new data to pull");
+ }
+
+ if let Some(removed) = pull_stats.removed {
+ task_log!(
+ worker,
+ "Summary: removed vanished: snapshots: {}, groups: {}, namespaces: {}",
+ removed.snapshots,
+ removed.groups,
+ removed.namespaces,
+ );
+ }
task_log!(worker, "sync job '{}' end", &job_id);
diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs
index b1a5612b0..42b428385 100644
--- a/src/api2/reader/mod.rs
+++ b/src/api2/reader/mod.rs
@@ -3,7 +3,7 @@
use anyhow::{bail, format_err, Error};
use futures::*;
use hex::FromHex;
-use hyper::header::{self, HeaderValue, UPGRADE};
+use hyper::header::{self, HeaderValue, CONNECTION, UPGRADE};
use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
@@ -209,6 +209,7 @@ fn upgrade_to_backup_reader_protocol(
let response = Response::builder()
.status(StatusCode::SWITCHING_PROTOCOLS)
+ .header(CONNECTION, HeaderValue::from_static("upgrade"))
.header(
UPGRADE,
HeaderValue::from_static(PROXMOX_BACKUP_READER_PROTOCOL_ID_V1!()),
diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs
index 47e0586bb..8273c867a 100644
--- a/src/api2/tape/restore.rs
+++ b/src/api2/tape/restore.rs
@@ -75,7 +75,7 @@ impl TryFrom> for NamespaceMap {
let max_depth = mapping.max_depth.unwrap_or(MAX_NAMESPACE_DEPTH);
let ns_map: &mut HashMap =
- map.entry(mapping.store).or_insert_with(HashMap::new);
+ map.entry(mapping.store).or_default();
if ns_map.insert(source, (target, max_depth)).is_some() {
bail!("duplicate mapping found");
@@ -747,7 +747,7 @@ fn restore_list_worker(
let file_list = snapshot_file_hash
.entry(media_id.label.uuid.clone())
- .or_insert_with(Vec::new);
+ .or_default();
file_list.push(file_num);
task_log!(
@@ -808,10 +808,8 @@ fn restore_list_worker(
// we only want to restore chunks that we do not have yet
if !datastore.cond_touch_chunk(&digest, false)? {
if let Some((uuid, nr)) = catalog.lookup_chunk(&source_datastore, &digest) {
- let file = media_file_chunk_map
- .entry(uuid.clone())
- .or_insert_with(BTreeMap::new);
- let chunks = file.entry(nr).or_insert_with(HashSet::new);
+ let file = media_file_chunk_map.entry(uuid.clone()).or_default();
+ let chunks = file.entry(nr).or_default();
chunks.insert(digest);
}
}
@@ -1089,9 +1087,7 @@ fn restore_snapshots_to_tmpdir(
);
std::fs::create_dir_all(&tmp_path)?;
- let chunks = chunks_list
- .entry(source_datastore)
- .or_insert_with(HashSet::new);
+ let chunks = chunks_list.entry(source_datastore).or_default();
let manifest =
try_restore_snapshot_archive(worker.clone(), &mut decoder, &tmp_path)?;
diff --git a/src/auth.rs b/src/auth.rs
index ec0bc41f2..04fb3a1d7 100644
--- a/src/auth.rs
+++ b/src/auth.rs
@@ -185,22 +185,7 @@ impl LdapAuthenticator {
servers.push(server.clone());
}
- let tls_mode = match config.mode.unwrap_or_default() {
- LdapMode::Ldap => ConnectionMode::Ldap,
- LdapMode::StartTls => ConnectionMode::StartTls,
- LdapMode::Ldaps => ConnectionMode::Ldaps,
- };
-
- let (ca_store, trusted_cert) = if let Some(capath) = config.capath.as_deref() {
- let path = PathBuf::from(capath);
- if path.is_dir() {
- (Some(path), None)
- } else {
- (None, Some(vec![path]))
- }
- } else {
- (None, None)
- };
+ let (ca_store, trusted_cert) = lookup_ca_store_or_cert_path(config.capath.as_deref());
Ok(Config {
servers,
@@ -209,7 +194,7 @@ impl LdapAuthenticator {
base_dn: config.base_dn.clone(),
bind_dn: config.bind_dn.clone(),
bind_password: password,
- tls_mode,
+ tls_mode: ldap_to_conn_mode(config.mode.unwrap_or_default()),
verify_certificate: config.verify.unwrap_or_default(),
additional_trusted_certificates: trusted_cert,
certificate_store_path: ca_store,
@@ -217,6 +202,27 @@ impl LdapAuthenticator {
}
}
+fn ldap_to_conn_mode(mode: LdapMode) -> ConnectionMode {
+ match mode {
+ LdapMode::Ldap => ConnectionMode::Ldap,
+ LdapMode::StartTls => ConnectionMode::StartTls,
+ LdapMode::Ldaps => ConnectionMode::Ldaps,
+ }
+}
+
+fn lookup_ca_store_or_cert_path(capath: Option<&str>) -> (Option, Option>) {
+ if let Some(capath) = capath {
+ let path = PathBuf::from(capath);
+ if path.is_dir() {
+ (Some(path), None)
+ } else {
+ (None, Some(vec![path]))
+ }
+ } else {
+ (None, None)
+ }
+}
+
/// Lookup the authenticator for the specified realm
pub(crate) fn lookup_authenticator(
realm: &RealmRef,
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 9c49026b6..f79ec2f52 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -289,6 +289,16 @@ async fn run() -> Result<(), Error> {
Ok(Value::Null)
})?;
+ // clear cache entry for datastore that is in a specific maintenance mode
+ command_sock.register_command("update-datastore-cache".to_string(), |value| {
+ if let Some(name) = value.and_then(Value::as_str) {
+ if let Err(err) = DataStore::update_datastore_cache(name) {
+ log::error!("could not trigger update datastore cache: {err}");
+ }
+ }
+ Ok(Value::Null)
+ })?;
+
let connections = proxmox_rest_server::connection::AcceptBuilder::new()
.debug(debug)
.rate_limiter_lookup(Arc::new(lookup_rate_limiter))
@@ -871,13 +881,12 @@ async fn run_stat_generator() {
loop {
let delay_target = Instant::now() + Duration::from_secs(10);
- let stats = match tokio::task::spawn_blocking(|| {
+ let stats_future = tokio::task::spawn_blocking(|| {
let hoststats = collect_host_stats_sync();
let (hostdisk, datastores) = collect_disk_stats_sync();
Arc::new((hoststats, hostdisk, datastores))
- })
- .await
- {
+ });
+ let stats = match stats_future.await {
Ok(res) => res,
Err(err) => {
log::error!("collecting host stats panicked: {err}");
diff --git a/src/bin/proxmox-daily-update.rs b/src/bin/proxmox-daily-update.rs
index 6148a6935..4e2fc85b8 100644
--- a/src/bin/proxmox-daily-update.rs
+++ b/src/bin/proxmox-daily-update.rs
@@ -29,7 +29,7 @@ async fn do_update(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
match method.handler {
ApiHandler::Sync(handler) => {
if let Err(err) = (handler)(param.clone(), method, rpcenv) {
- log::error!("Error checking subscription - {}", err);
+ log::error!("Error checking subscription - {err}");
}
}
_ => unreachable!(),
@@ -37,7 +37,7 @@ async fn do_update(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
let notify = match api2::node::subscription::get_subscription(param, rpcenv) {
Ok(info) => info.status == SubscriptionStatus::Active,
Err(err) => {
- log::error!("Error reading subscription - {}", err);
+ log::error!("Error reading subscription - {err}");
false
}
};
@@ -49,7 +49,7 @@ async fn do_update(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
match method.handler {
ApiHandler::Sync(handler) => match (handler)(param, method, rpcenv) {
Err(err) => {
- log::error!("Error triggering apt database update - {}", err);
+ log::error!("Error triggering apt database update - {err}");
}
Ok(upid) => wait_for_local_worker(upid.as_str().unwrap()).await?,
},
@@ -57,12 +57,8 @@ async fn do_update(rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> {
};
*/
-
- match check_acme_certificates(rpcenv).await {
- Ok(()) => (),
- Err(err) => {
- log::error!("error checking certificates: {}", err);
- }
+ if let Err(err) = check_acme_certificates(rpcenv).await {
+ log::error!("error checking certificates: {err}");
}
// TODO: cleanup tasks like in PVE?
@@ -120,14 +116,14 @@ fn main() {
log::LevelFilter::Info,
Some("proxmox-daily-update"),
) {
- eprintln!("unable to inititialize syslog - {}", err);
+ eprintln!("unable to initialize syslog - {err}");
}
let mut rpcenv = CliEnvironment::new();
rpcenv.set_auth_id(Some(String::from("root@pam")));
if let Err(err) = proxmox_async::runtime::main(run(&mut rpcenv)) {
- log::error!("error during update: {}", err);
+ log::error!("error during update: {err}");
std::process::exit(1);
}
}
diff --git a/src/bin/proxmox_backup_manager/user.rs b/src/bin/proxmox_backup_manager/user.rs
index 743c5d16f..96b83fcc6 100644
--- a/src/bin/proxmox_backup_manager/user.rs
+++ b/src/bin/proxmox_backup_manager/user.rs
@@ -16,7 +16,7 @@ fn render_expire(value: &Value, _record: &Value) -> Result {
return Ok(never);
}
let text = match value.as_i64() {
- Some(epoch) if epoch == 0 => never,
+ Some(0) => never,
Some(epoch) => {
if let Ok(epoch_string) = proxmox_time::strftime_local("%c", epoch) {
epoch_string
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 5f235b0ac..14744e9c8 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -5,10 +5,11 @@ use std::io::{Seek, Write};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
-use std::time::SystemTime;
+use std::time::{Duration, SystemTime};
use anyhow::{bail, format_err, Error};
use http::StatusCode;
+use proxmox_human_byte::HumanByte;
use proxmox_rest_server::WorkerTask;
use proxmox_router::HttpError;
use proxmox_sys::{task_log, task_warn};
@@ -64,6 +65,54 @@ pub(crate) struct LocalSource {
ns: BackupNamespace,
}
+#[derive(Default)]
+pub(crate) struct RemovedVanishedStats {
+ pub(crate) groups: usize,
+ pub(crate) snapshots: usize,
+ pub(crate) namespaces: usize,
+}
+
+impl RemovedVanishedStats {
+ fn add(&mut self, rhs: RemovedVanishedStats) {
+ self.groups += rhs.groups;
+ self.snapshots += rhs.snapshots;
+ self.namespaces += rhs.namespaces;
+ }
+}
+
+#[derive(Default)]
+pub(crate) struct PullStats {
+ pub(crate) chunk_count: usize,
+ pub(crate) bytes: usize,
+ pub(crate) elapsed: Duration,
+ pub(crate) removed: Option,
+}
+
+impl From for PullStats {
+ fn from(removed: RemovedVanishedStats) -> Self {
+ Self {
+ removed: Some(removed),
+ ..Default::default()
+ }
+ }
+}
+
+impl PullStats {
+ fn add(&mut self, rhs: PullStats) {
+ self.chunk_count += rhs.chunk_count;
+ self.bytes += rhs.bytes;
+ self.elapsed += rhs.elapsed;
+
+ if let Some(rhs_removed) = rhs.removed {
+ if let Some(ref mut removed) = self.removed {
+ removed.add(rhs_removed);
+ } else {
+ self.removed = Some(rhs_removed);
+ }
+ }
+ }
+}
+
#[async_trait::async_trait]
/// `PullSource` is a trait that provides an interface for pulling data/information from a source.
/// The trait includes methods for listing namespaces, groups, and backup directories,
@@ -199,7 +248,7 @@ impl PullSource for RemoteSource {
});
if !namespace.is_root() {
- args["ns"] = serde_json::to_value(&namespace)?;
+ args["ns"] = serde_json::to_value(namespace)?;
}
self.client.login().await?;
@@ -230,7 +279,7 @@ impl PullSource for RemoteSource {
}
fn get_store(&self) -> &str {
- &self.repo.store()
+ self.repo.store()
}
async fn reader(
@@ -559,7 +608,7 @@ async fn pull_index_chunks(
target: Arc,
index: I,
downloaded_chunks: Arc>>,
-) -> Result<(), Error> {
+) -> Result {
use futures::stream::{self, StreamExt, TryStreamExt};
let start_time = SystemTime::now();
@@ -594,12 +643,14 @@ async fn pull_index_chunks(
let verify_and_write_channel = verify_pool.channel();
let bytes = Arc::new(AtomicUsize::new(0));
+ let chunk_count = Arc::new(AtomicUsize::new(0));
stream
.map(|info| {
let target = Arc::clone(&target);
let chunk_reader = chunk_reader.clone();
let bytes = Arc::clone(&bytes);
+ let chunk_count = Arc::clone(&chunk_count);
let verify_and_write_channel = verify_and_write_channel.clone();
Ok::<_, Error>(async move {
@@ -620,6 +671,7 @@ async fn pull_index_chunks(
})?;
bytes.fetch_add(raw_size, Ordering::SeqCst);
+ chunk_count.fetch_add(1, Ordering::SeqCst);
Ok(())
})
@@ -632,18 +684,24 @@ async fn pull_index_chunks(
verify_pool.complete()?;
- let elapsed = start_time.elapsed()?.as_secs_f64();
+ let elapsed = start_time.elapsed()?;
let bytes = bytes.load(Ordering::SeqCst);
+ let chunk_count = chunk_count.load(Ordering::SeqCst);
task_log!(
worker,
- "downloaded {} bytes ({:.2} MiB/s)",
- bytes,
- (bytes as f64) / (1024.0 * 1024.0 * elapsed)
+ "downloaded {} ({}/s)",
+ HumanByte::from(bytes),
+ HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
);
- Ok(())
+ Ok(PullStats {
+ chunk_count,
+ bytes,
+ elapsed,
+ removed: None,
+ })
}
fn verify_archive(info: &FileInfo, csum: &[u8; 32], size: u64) -> Result<(), Error> {
@@ -677,7 +735,7 @@ async fn pull_single_archive<'a>(
snapshot: &'a pbs_datastore::BackupDir,
archive_info: &'a FileInfo,
downloaded_chunks: Arc>>,
-) -> Result<(), Error> {
+) -> Result {
let archive_name = &archive_info.filename;
let mut path = snapshot.full_path();
path.push(archive_name);
@@ -685,6 +743,8 @@ async fn pull_single_archive<'a>(
let mut tmp_path = path.clone();
tmp_path.set_extension("tmp");
+ let mut pull_stats = PullStats::default();
+
task_log!(worker, "sync archive {}", archive_name);
reader
@@ -704,7 +764,7 @@ async fn pull_single_archive<'a>(
if reader.skip_chunk_sync(snapshot.datastore().name()) {
task_log!(worker, "skipping chunk sync for same datastore");
} else {
- pull_index_chunks(
+ let stats = pull_index_chunks(
worker,
reader.chunk_reader(archive_info.crypt_mode),
snapshot.datastore().clone(),
@@ -712,6 +772,7 @@ async fn pull_single_archive<'a>(
downloaded_chunks,
)
.await?;
+ pull_stats.add(stats);
}
}
ArchiveType::FixedIndex => {
@@ -724,7 +785,7 @@ async fn pull_single_archive<'a>(
if reader.skip_chunk_sync(snapshot.datastore().name()) {
task_log!(worker, "skipping chunk sync for same datastore");
} else {
- pull_index_chunks(
+ let stats = pull_index_chunks(
worker,
reader.chunk_reader(archive_info.crypt_mode),
snapshot.datastore().clone(),
@@ -732,6 +793,7 @@ async fn pull_single_archive<'a>(
downloaded_chunks,
)
.await?;
+ pull_stats.add(stats);
}
}
ArchiveType::Blob => {
@@ -743,7 +805,7 @@ async fn pull_single_archive<'a>(
if let Err(err) = std::fs::rename(&tmp_path, &path) {
bail!("Atomic rename file {:?} failed - {}", path, err);
}
- Ok(())
+ Ok(pull_stats)
}
/// Actual implementation of pulling a snapshot.
@@ -760,7 +822,8 @@ async fn pull_snapshot<'a>(
reader: Arc,
snapshot: &'a pbs_datastore::BackupDir,
downloaded_chunks: Arc>>,
-) -> Result<(), Error> {
+) -> Result {
+ let mut pull_stats = PullStats::default();
let mut manifest_name = snapshot.full_path();
manifest_name.push(MANIFEST_BLOB_NAME);
@@ -776,7 +839,7 @@ async fn pull_snapshot<'a>(
{
tmp_manifest_blob = data;
} else {
- return Ok(());
+ return Ok(pull_stats);
}
if manifest_name.exists() {
@@ -800,7 +863,7 @@ async fn pull_snapshot<'a>(
};
task_log!(worker, "no data changes");
let _ = std::fs::remove_file(&tmp_manifest_name);
- return Ok(()); // nothing changed
+ return Ok(pull_stats); // nothing changed
}
}
@@ -845,7 +908,7 @@ async fn pull_snapshot<'a>(
}
}
- pull_single_archive(
+ let stats = pull_single_archive(
worker,
reader.clone(),
snapshot,
@@ -853,6 +916,7 @@ async fn pull_snapshot<'a>(
downloaded_chunks.clone(),
)
.await?;
+ pull_stats.add(stats);
}
if let Err(err) = std::fs::rename(&tmp_manifest_name, &manifest_name) {
@@ -868,7 +932,7 @@ async fn pull_snapshot<'a>(
.cleanup_unreferenced_files(&manifest)
.map_err(|err| format_err!("failed to cleanup unreferenced files - {err}"))?;
- Ok(())
+ Ok(pull_stats)
}
/// Pulls a `snapshot`, removing newly created ones on error, but keeping existing ones in any case.
@@ -880,31 +944,36 @@ async fn pull_snapshot_from<'a>(
reader: Arc,
snapshot: &'a pbs_datastore::BackupDir,
downloaded_chunks: Arc>>,
-) -> Result<(), Error> {
+) -> Result {
let (_path, is_new, _snap_lock) = snapshot
.datastore()
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
- if is_new {
+ let pull_stats = if is_new {
task_log!(worker, "sync snapshot {}", snapshot.dir());
- if let Err(err) = pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
- if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
- snapshot.backup_ns(),
- snapshot.as_ref(),
- true,
- ) {
- task_log!(worker, "cleanup error - {}", cleanup_err);
+ match pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
+ Err(err) => {
+ if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
+ snapshot.backup_ns(),
+ snapshot.as_ref(),
+ true,
+ ) {
+ task_log!(worker, "cleanup error - {}", cleanup_err);
+ }
+ return Err(err);
+ }
+ Ok(pull_stats) => {
+ task_log!(worker, "sync snapshot {} done", snapshot.dir());
+ pull_stats
}
- return Err(err);
}
- task_log!(worker, "sync snapshot {} done", snapshot.dir());
} else {
task_log!(worker, "re-sync snapshot {}", snapshot.dir());
- pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?;
- }
+ pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?
+ };
- Ok(())
+ Ok(pull_stats)
}
#[derive(PartialEq, Eq)]
@@ -1009,7 +1078,7 @@ async fn pull_group(
source_namespace: &BackupNamespace,
group: &BackupGroup,
progress: &mut StoreProgress,
-) -> Result<(), Error> {
+) -> Result {
let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
@@ -1066,6 +1135,8 @@ async fn pull_group(
progress.group_snapshots = list.len() as u64;
+ let mut pull_stats = PullStats::default();
+
for (pos, from_snapshot) in list.into_iter().enumerate() {
let to_snapshot = params
.target
@@ -1082,7 +1153,8 @@ async fn pull_group(
progress.done_snapshots = pos as u64 + 1;
task_log!(worker, "percentage done: {}", progress);
- result?; // stop on error
+ let stats = result?; // stop on error
+ pull_stats.add(stats);
}
if params.remove_vanished {
@@ -1109,10 +1181,15 @@ async fn pull_group(
.target
.store
.remove_backup_dir(&target_ns, snapshot.as_ref(), false)?;
+ pull_stats.add(PullStats::from(RemovedVanishedStats {
+ snapshots: 1,
+ groups: 0,
+ namespaces: 0,
+ }));
}
}
- Ok(())
+ Ok(pull_stats)
}
fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result {
@@ -1161,8 +1238,9 @@ fn check_and_remove_vanished_ns(
worker: &WorkerTask,
params: &PullParameters,
synced_ns: HashSet,
-) -> Result {
+) -> Result<(bool, RemovedVanishedStats), Error> {
let mut errors = false;
+ let mut removed_stats = RemovedVanishedStats::default();
let user_info = CachedUserInfo::new()?;
// clamp like remote does so that we don't list more than we can ever have synced.
@@ -1197,7 +1275,10 @@ fn check_and_remove_vanished_ns(
continue;
}
match check_and_remove_ns(params, &local_ns) {
- Ok(true) => task_log!(worker, "Removed namespace {}", local_ns),
+ Ok(true) => {
+ task_log!(worker, "Removed namespace {local_ns}");
+ removed_stats.namespaces += 1;
+ }
Ok(false) => task_log!(
worker,
"Did not remove namespace {} - protected snapshots remain",
@@ -1210,7 +1291,7 @@ fn check_and_remove_vanished_ns(
}
}
- Ok(errors)
+ Ok((errors, removed_stats))
}
/// Pulls a store according to `params`.
@@ -1233,7 +1314,7 @@ fn check_and_remove_vanished_ns(
pub(crate) async fn pull_store(
worker: &WorkerTask,
mut params: PullParameters,
-) -> Result<(), Error> {
+) -> Result {
// explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = params.target.store.try_shared_chunk_store_lock()?;
let mut errors = false;
@@ -1269,6 +1350,7 @@ pub(crate) async fn pull_store(
let (mut groups, mut snapshots) = (0, 0);
let mut synced_ns = HashSet::with_capacity(namespaces.len());
+ let mut pull_stats = PullStats::default();
for namespace in namespaces {
let source_store_ns_str = print_store_and_ns(params.source.get_store(), &namespace);
@@ -1303,9 +1385,11 @@ pub(crate) async fn pull_store(
}
match pull_ns(worker, &namespace, &mut params).await {
- Ok((ns_progress, ns_errors)) => {
+ Ok((ns_progress, ns_pull_stats, ns_errors)) => {
errors |= ns_errors;
+ pull_stats.add(ns_pull_stats);
+
if params.max_depth != Some(0) {
groups += ns_progress.done_groups;
snapshots += ns_progress.done_snapshots;
@@ -1331,14 +1415,16 @@ pub(crate) async fn pull_store(
}
if params.remove_vanished {
- errors |= check_and_remove_vanished_ns(worker, ¶ms, synced_ns)?;
+ let (has_errors, stats) = check_and_remove_vanished_ns(worker, ¶ms, synced_ns)?;
+ errors |= has_errors;
+ pull_stats.add(PullStats::from(stats));
}
if errors {
bail!("sync failed with some errors.");
}
- Ok(())
+ Ok(pull_stats)
}
/// Pulls a namespace according to `params`.
@@ -1357,7 +1443,7 @@ pub(crate) async fn pull_ns(
worker: &WorkerTask,
namespace: &BackupNamespace,
params: &mut PullParameters,
-) -> Result<(StoreProgress, bool), Error> {
+) -> Result<(StoreProgress, PullStats, bool), Error> {
let mut list: Vec = params.source.list_groups(namespace, ¶ms.owner).await?;
list.sort_unstable_by(|a, b| {
@@ -1389,6 +1475,7 @@ pub(crate) async fn pull_ns(
}
let mut progress = StoreProgress::new(list.len() as u64);
+ let mut pull_stats = PullStats::default();
let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
@@ -1429,10 +1516,14 @@ pub(crate) async fn pull_ns(
owner
);
errors = true; // do not stop here, instead continue
- } else if let Err(err) = pull_group(worker, params, namespace, &group, &mut progress).await
- {
- task_log!(worker, "sync group {} failed - {}", &group, err,);
- errors = true; // do not stop here, instead continue
+ } else {
+ match pull_group(worker, params, namespace, &group, &mut progress).await {
+ Ok(stats) => pull_stats.add(stats),
+ Err(err) => {
+ task_log!(worker, "sync group {} failed - {}", &group, err,);
+ errors = true; // do not stop here, instead continue
+ }
+ }
}
}
@@ -1452,18 +1543,30 @@ pub(crate) async fn pull_ns(
continue;
}
task_log!(worker, "delete vanished group '{local_group}'",);
- match params
+ let delete_stats_result = params
.target
.store
- .remove_backup_group(&target_ns, local_group)
- {
- Ok(true) => {}
- Ok(false) => {
- task_log!(
- worker,
- "kept some protected snapshots of group '{}'",
- local_group
- );
+ .remove_backup_group(&target_ns, local_group);
+
+ match delete_stats_result {
+ Ok(stats) => {
+ if !stats.all_removed() {
+ task_log!(
+ worker,
+ "kept some protected snapshots of group '{local_group}'",
+ );
+ pull_stats.add(PullStats::from(RemovedVanishedStats {
+ snapshots: stats.removed_snapshots(),
+ groups: 0,
+ namespaces: 0,
+ }));
+ } else {
+ pull_stats.add(PullStats::from(RemovedVanishedStats {
+ snapshots: stats.removed_snapshots(),
+ groups: 1,
+ namespaces: 0,
+ }));
+ }
}
Err(err) => {
task_log!(worker, "{}", err);
@@ -1479,5 +1582,5 @@ pub(crate) async fn pull_ns(
};
}
- Ok((progress, errors))
+ Ok((progress, pull_stats, errors))
}
diff --git a/src/server/realm_sync_job.rs b/src/server/realm_sync_job.rs
index 9094c2fa8..972e9a6b5 100644
--- a/src/server/realm_sync_job.rs
+++ b/src/server/realm_sync_job.rs
@@ -81,9 +81,14 @@ impl LdapRealmSyncJob {
};
let sync_settings = GeneralSyncSettings::default()
- .apply_config(&config)?
+ .apply_config(config.sync_defaults_options.as_deref())?
.apply_override(override_settings)?;
- let sync_attributes = LdapSyncSettings::from_config(&config)?;
+ let sync_attributes = LdapSyncSettings::new(
+ &config.user_attr,
+ config.sync_attributes.as_deref(),
+ config.user_classes.as_deref(),
+ config.filter.as_deref(),
+ )?;
let ldap_config = auth::LdapAuthenticator::api_type_to_config(&config)?;
@@ -170,7 +175,7 @@ impl LdapRealmSyncJob {
"userid attribute `{user_id_attribute}` not in LDAP search result"
)
})?
- .get(0)
+ .first()
.context("userid attribute array is empty")?
.clone();
@@ -233,7 +238,7 @@ impl LdapRealmSyncJob {
existing_user: Option<&User>,
) -> User {
let lookup = |attribute: &str, ldap_attribute: Option<&String>, schema: &'static Schema| {
- let value = result.attributes.get(ldap_attribute?)?.get(0)?;
+ let value = result.attributes.get(ldap_attribute?)?.first()?;
let schema = schema.unwrap_string_schema();
if let Err(e) = schema.check_constraints(value) {
@@ -385,14 +390,19 @@ struct LdapSyncSettings {
}
impl LdapSyncSettings {
- fn from_config(config: &LdapRealmConfig) -> Result {
- let mut attributes = vec![config.user_attr.clone()];
+ fn new(
+ user_attr: &str,
+ sync_attributes: Option<&str>,
+ user_classes: Option<&str>,
+ user_filter: Option<&str>,
+ ) -> Result {
+ let mut attributes = vec![user_attr.to_owned()];
let mut email = None;
let mut firstname = None;
let mut lastname = None;
- if let Some(sync_attributes) = &config.sync_attributes {
+ if let Some(sync_attributes) = &sync_attributes {
let value = LdapSyncAttributes::API_SCHEMA.parse_property_string(sync_attributes)?;
let sync_attributes: LdapSyncAttributes = serde_json::from_value(value)?;
@@ -400,20 +410,20 @@ impl LdapSyncSettings {
firstname = sync_attributes.firstname.clone();
lastname = sync_attributes.lastname.clone();
- if let Some(email_attr) = sync_attributes.email {
- attributes.push(email_attr);
+ if let Some(email_attr) = &sync_attributes.email {
+ attributes.push(email_attr.clone());
}
- if let Some(firstname_attr) = sync_attributes.firstname {
- attributes.push(firstname_attr);
+ if let Some(firstname_attr) = &sync_attributes.firstname {
+ attributes.push(firstname_attr.clone());
}
- if let Some(lastname_attr) = sync_attributes.lastname {
- attributes.push(lastname_attr);
+ if let Some(lastname_attr) = &sync_attributes.lastname {
+ attributes.push(lastname_attr.clone());
}
}
- let user_classes = if let Some(user_classes) = &config.user_classes {
+ let user_classes = if let Some(user_classes) = &user_classes {
let a = USER_CLASSES_ARRAY.parse_property_string(user_classes)?;
serde_json::from_value(a)?
} else {
@@ -426,13 +436,13 @@ impl LdapSyncSettings {
};
Ok(Self {
- user_attr: config.user_attr.clone(),
+ user_attr: user_attr.to_owned(),
firstname_attr: firstname,
lastname_attr: lastname,
email_attr: email,
attributes,
user_classes,
- user_filter: config.filter.clone(),
+ user_filter: user_filter.map(ToOwned::to_owned),
})
}
}
@@ -447,11 +457,11 @@ impl Default for GeneralSyncSettings {
}
impl GeneralSyncSettings {
- fn apply_config(self, config: &LdapRealmConfig) -> Result {
+ fn apply_config(self, sync_defaults_options: Option<&str>) -> Result {
let mut enable_new = None;
let mut remove_vanished = None;
- if let Some(sync_defaults_options) = &config.sync_defaults_options {
+ if let Some(sync_defaults_options) = sync_defaults_options {
let sync_defaults_options = Self::parse_sync_defaults_options(sync_defaults_options)?;
enable_new = sync_defaults_options.enable_new;
diff --git a/src/server/report.rs b/src/server/report.rs
index f0f5b0c1c..9d4b2cdf0 100644
--- a/src/server/report.rs
+++ b/src/server/report.rs
@@ -126,9 +126,8 @@ fn get_directory_content(path: impl AsRef) -> String {
Ok(iter) => iter,
Err(err) => {
return format!(
- "`$ cat '{}*'`\n```\n# read dir failed - {}\n```",
+ "`$ cat '{}*'`\n```\n# read dir failed - {err}\n```",
path.as_ref().display(),
- err.to_string(),
);
}
};
@@ -138,7 +137,7 @@ fn get_directory_content(path: impl AsRef) -> String {
let entry = match entry {
Ok(entry) => entry,
Err(err) => {
- let _ = writeln!(out, "error during read-dir - {}", err.to_string());
+ let _ = writeln!(out, "error during read-dir - {err}");
continue;
}
};
@@ -190,7 +189,7 @@ pub fn generate_report() -> String {
.map(|file_name| {
let path = Path::new(file_name);
if path.is_dir() {
- get_directory_content(&path)
+ get_directory_content(path)
} else {
get_file_content(file_name)
}
diff --git a/src/tape/changer/online_status_map.rs b/src/tape/changer/online_status_map.rs
index c3da04154..d7f3ca7a7 100644
--- a/src/tape/changer/online_status_map.rs
+++ b/src/tape/changer/online_status_map.rs
@@ -88,7 +88,7 @@ impl OnlineStatusMap {
}
fn insert_into_online_set(inventory: &Inventory, label_text: &str, online_set: &mut HashSet) {
- match inventory.find_media_by_label_text(&label_text) {
+ match inventory.find_media_by_label_text(label_text) {
Ok(Some(media_id)) => {
online_set.insert(media_id.label.uuid.clone());
}
diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs
index 928d4701e..9aae0aa12 100644
--- a/src/tape/media_catalog.rs
+++ b/src/tape/media_catalog.rs
@@ -1,6 +1,6 @@
use std::collections::{HashMap, HashSet};
use std::fs::File;
-use std::io::{BufReader, Read, Seek, SeekFrom, Write};
+use std::io::{BufReader, Read, Seek, Write};
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
@@ -795,7 +795,7 @@ impl MediaCatalog {
let mut media_set_uuid = None;
loop {
- let pos = file.seek(SeekFrom::Current(0))?; // get current pos
+ let pos = file.stream_position()?; // get current pos
if pos == 0 {
// read/check magic number
diff --git a/src/tape/media_set.rs b/src/tape/media_set.rs
index 8c08efd9a..894c8ae39 100644
--- a/src/tape/media_set.rs
+++ b/src/tape/media_set.rs
@@ -13,8 +13,6 @@ pub struct MediaSet {
}
impl MediaSet {
- pub const MEDIA_SET_MAX_SEQ_NR: u64 = 100;
-
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let uuid = Uuid::generate();
@@ -41,14 +39,6 @@ impl MediaSet {
}
pub fn insert_media(&mut self, uuid: Uuid, seq_nr: u64) -> Result<(), Error> {
- if seq_nr > Self::MEDIA_SET_MAX_SEQ_NR {
- bail!(
- "media set sequence number to large in media set {} ({} > {})",
- self.uuid.to_string(),
- seq_nr,
- Self::MEDIA_SET_MAX_SEQ_NR
- );
- }
let seq_nr = seq_nr as usize;
if self.media_list.len() > seq_nr {
if self.media_list[seq_nr].is_some() {
diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs
index f1224bdd6..a6ba4a1d3 100644
--- a/src/tape/pool_writer/mod.rs
+++ b/src/tape/pool_writer/mod.rs
@@ -32,6 +32,10 @@ use super::file_formats::{
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
};
+// Warn when the sequence number reaches this limit, as large
+// media sets are error prone and take a very long time to restore from.
+const MEDIA_SET_SEQ_NR_WARN_LIMIT: u64 = 20;
+
struct PoolWriterState {
drive: Box,
// Media Uuid from loaded media
@@ -272,6 +276,14 @@ impl PoolWriter {
let media_set = media.media_set_label().unwrap();
+ if is_new_media && media_set.seq_nr >= MEDIA_SET_SEQ_NR_WARN_LIMIT {
+ task_warn!(
+ worker,
+ "large media-set detected ({}), consider using a different allocation policy",
+ media_set.seq_nr
+ );
+ }
+
drive.assert_encryption_mode(media_set.encryption_key_fingerprint.is_some())?;
self.status = Some(PoolWriterState {
diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs
index ae75b7b13..1454b33d2 100644
--- a/src/tape/pool_writer/new_chunks_iterator.rs
+++ b/src/tape/pool_writer/new_chunks_iterator.rs
@@ -57,12 +57,9 @@ impl NewChunksIterator {
let blob = datastore.load_chunk(&digest)?;
//println!("LOAD CHUNK {}", hex::encode(&digest));
- match tx.send(Ok(Some((digest, blob)))) {
- Ok(()) => {}
- Err(err) => {
- eprintln!("could not send chunk to reader thread: {}", err);
- break;
- }
+ if let Err(err) = tx.send(Ok(Some((digest, blob)))) {
+ eprintln!("could not send chunk to reader thread: {err}");
+ break;
}
chunk_index.insert(digest);
diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs
index 7a7723568..94f89e0a9 100644
--- a/src/tools/disks/mod.rs
+++ b/src/tools/disks/mod.rs
@@ -1158,7 +1158,7 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc) -> Result<(), Error>
of_path,
"bs=1M".into(),
"conv=fdatasync".into(),
- count_str.into(),
+ count_str,
];
dd_command.args(args);
@@ -1167,7 +1167,7 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc) -> Result<(), Error>
if is_partition {
// set the partition type to 0x83 'Linux filesystem'
- change_parttype(&disk, "8300", worker)?;
+ change_parttype(disk, "8300", worker)?;
}
Ok(())
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index f7e3b6e22..dfdbb7024 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -44,7 +44,7 @@ pub fn detect_modified_configuration_file(
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
pub const DEFAULT_USER_AGENT_STRING: &str = "proxmox-backup-client/1.0";
-/// Returns a new instance of [`Client`](proxmox_http::client::Client) configured for PBS usage.
+/// Returns a new instance of [`Client`] configured for PBS usage.
pub fn pbs_simple_http(proxy_config: Option) -> Client {
let options = HttpOptions {
proxy_config,
diff --git a/src/tools/parallel_handler.rs b/src/tools/parallel_handler.rs
index c4316ad06..17f701790 100644
--- a/src/tools/parallel_handler.rs
+++ b/src/tools/parallel_handler.rs
@@ -80,13 +80,10 @@ impl ParallelHandler {
Ok(data) => data,
Err(_) => return,
};
- match (handler_fn)(data) {
- Ok(()) => (),
- Err(err) => {
- let mut guard = abort.lock().unwrap();
- if guard.is_none() {
- *guard = Some(err.to_string());
- }
+ if let Err(err) = (handler_fn)(data) {
+ let mut guard = abort.lock().unwrap();
+ if guard.is_none() {
+ *guard = Some(err.to_string());
}
}
})
diff --git a/src/traffic_control_cache.rs b/src/traffic_control_cache.rs
index 2e097d704..4c3bccee3 100644
--- a/src/traffic_control_cache.rs
+++ b/src/traffic_control_cache.rs
@@ -164,11 +164,8 @@ impl TrafficControlCache {
self.last_traffic_control_generation = traffic_control_generation;
self.last_update = now;
- match self.reload_impl() {
- Ok(()) => (),
- Err(err) => {
- log::error!("TrafficControlCache::reload failed -> {}", err);
- }
+ if let Err(err) = self.reload_impl() {
+ log::error!("TrafficControlCache::reload failed -> {err}");
}
}
diff --git a/www/Makefile b/www/Makefile
index 275df2b94..45ea3aae7 100644
--- a/www/Makefile
+++ b/www/Makefile
@@ -80,7 +80,6 @@ JSSRC= \
window/SyncJobEdit.js \
window/PruneJobEdit.js \
window/UserEdit.js \
- window/UserPassword.js \
window/Settings.js \
window/TokenEdit.js \
window/VerifyJobEdit.js \
diff --git a/www/ServerStatus.js b/www/ServerStatus.js
index 37d2bb9a9..8210c899d 100644
--- a/www/ServerStatus.js
+++ b/www/ServerStatus.js
@@ -76,11 +76,9 @@ Ext.define('PBS.ServerStatus', {
{
xtype: 'button',
iconCls: 'fa fa-clipboard',
- handler: function(button) {
- window.getSelection().selectAllChildren(
- document.getElementById('pkgversions'),
- );
- document.execCommand("copy");
+ handler: async function(button) {
+ let el = document.getElementById('pkgversions');
+ await navigator.clipboard.writeText(el.textContent);
},
text: gettext('Copy'),
},
diff --git a/www/Subscription.js b/www/Subscription.js
index 0546d3bc0..f38879595 100644
--- a/www/Subscription.js
+++ b/www/Subscription.js
@@ -8,7 +8,8 @@
onlineHelp: 'get_help',
items: {
- xtype: 'textfield',
+ xtype: 'proxmoxtextfield',
+ trimValue: true,
labelWidth: 120,
name: 'key',
value: '',
diff --git a/www/SystemConfiguration.js b/www/SystemConfiguration.js
index 860d85c0a..e94fe7ca1 100644
--- a/www/SystemConfiguration.js
+++ b/www/SystemConfiguration.js
@@ -31,6 +31,7 @@ Ext.define('PBS.SystemConfiguration', {
},
{
xtype: 'proxmoxNodeDNSView',
+ deleteEmpty: true,
title: gettext('DNS'),
nodename: 'localhost',
},
diff --git a/www/config/UserView.js b/www/config/UserView.js
index 62eb0f169..19dce06f9 100644
--- a/www/config/UserView.js
+++ b/www/config/UserView.js
@@ -58,8 +58,9 @@ Ext.define('PBS.config.UserView', {
if (selection.length < 1) return;
- Ext.create('PBS.window.UserPassword', {
- url: '/api2/extjs/access/users/' + selection[0].data.userid,
+ Ext.create('Proxmox.window.PasswordEdit', {
+ userid: selection[0].data.userid,
+ confirmCurrentPassword: Proxmox.UserName !== 'root@pam',
}).show();
},
diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index 87317ec1d..c2403ff9c 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -479,6 +479,7 @@ Ext.define('PBS.DataStoreContent', {
ns,
dryrun: true,
canRecurse: true,
+ isCreate: true,
},
],
});
@@ -545,6 +546,10 @@ Ext.define('PBS.DataStoreContent', {
});
},
+ onCopy: async function(view, rI, cI, item, e, { data }) {
+ await navigator.clipboard.writeText(data.text);
+ },
+
onNotesEdit: function(view, data) {
let me = this;
@@ -883,6 +888,7 @@ Ext.define('PBS.DataStoreContent', {
if (record.data.ty === 'group') {
menu = Ext.create('PBS.datastore.GroupCmdMenu', {
title: gettext('Group'),
+ onCopy: createControllerCallback('onCopy'),
onVerify: createControllerCallback('onVerify'),
onChangeOwner: createControllerCallback('onChangeOwner'),
onPrune: createControllerCallback('onPrune'),
@@ -891,6 +897,7 @@ Ext.define('PBS.DataStoreContent', {
} else if (record.data.ty === 'dir') {
menu = Ext.create('PBS.datastore.SnapshotCmdMenu', {
title: gettext('Snapshot'),
+ onCopy: createControllerCallback('onCopy'),
onVerify: createControllerCallback('onVerify'),
onProtectionChange: createControllerCallback('onProtectionChange'),
onForget: createControllerCallback('onForget'),
@@ -1283,12 +1290,21 @@ Ext.define('PBS.datastore.GroupCmdMenu', {
extend: 'Ext.menu.Menu',
mixins: ['Proxmox.Mixin.CBind'],
+ onCopy: undefined,
onVerify: undefined,
onChangeOwner: undefined,
onPrune: undefined,
onForget: undefined,
items: [
+ {
+ text: gettext('Copy name to clipboard'),
+ iconCls: 'fa fa-clipboard',
+ handler: function() { this.up('menu').onCopy(); },
+ cbind: {
+ hidden: '{!onCopy}',
+ },
+ },
{
text: gettext('Verify'),
iconCls: 'pve-icon-verify-lettering',
@@ -1329,11 +1345,21 @@ Ext.define('PBS.datastore.SnapshotCmdMenu', {
extend: 'Ext.menu.Menu',
mixins: ['Proxmox.Mixin.CBind'],
+ onCopy: undefined,
onVerify: undefined,
onProtectionChange: undefined,
onForget: undefined,
items: [
+ {
+ text: gettext('Copy name to clipboard'),
+ iconCls: 'fa fa-clipboard',
+ handler: function() { this.up('menu').onCopy(); },
+ cbind: {
+ hidden: '{!onCopy}',
+ disabled: '{!onCopy}',
+ },
+ },
{
text: gettext('Verify'),
iconCls: 'pve-icon-verify-lettering',
diff --git a/www/panel/NodeInfo.js b/www/panel/NodeInfo.js
index 771ccbe65..026efbe84 100644
--- a/www/panel/NodeInfo.js
+++ b/www/panel/NodeInfo.js
@@ -45,10 +45,9 @@ Ext.define('PBS.NodeInfoPanel', {
{
xtype: 'button',
iconCls: 'fa fa-clipboard',
- handler: function(b) {
+ handler: async function(b) {
var el = document.getElementById('fingerprintField');
- el.select();
- document.execCommand("copy");
+ await navigator.clipboard.writeText(el.value);
},
text: gettext('Copy'),
},
diff --git a/www/panel/PrunePanel.js b/www/panel/PrunePanel.js
index 59c28a8e8..7ccdb1dee 100644
--- a/www/panel/PrunePanel.js
+++ b/www/panel/PrunePanel.js
@@ -27,7 +27,7 @@ Ext.define('PBS.panel.PruneInputPanel', {
if (me.ns && me.ns !== '') {
values.ns = me.ns;
}
- if (!values.recursive) {
+ if (!values.recursive && me.canRecurse) {
values['max-depth'] = 0;
}
delete values.recursive;
diff --git a/www/tape/ChangerStatus.js b/www/tape/ChangerStatus.js
index 27c7605c6..fdafc459e 100644
--- a/www/tape/ChangerStatus.js
+++ b/www/tape/ChangerStatus.js
@@ -60,6 +60,9 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
submitText: gettext('OK'),
method: 'POST',
url: `/api2/extjs/tape/changer/${changer}/transfer`,
+ submitOptions: {
+ timeout: 3*60*1000,
+ },
items: [
{
xtype: 'displayfield',
@@ -96,6 +99,9 @@ Ext.define('PBS.TapeManagement.ChangerStatus', {
submitText: gettext('OK'),
method: 'POST',
url: `/api2/extjs/tape/changer/${changer}/transfer`,
+ submitOptions: {
+ timeout: 3*60*1000,
+ },
items: [
{
xtype: 'displayfield',
diff --git a/www/tape/TapeInventory.js b/www/tape/TapeInventory.js
index 3039a95a4..47d19acc0 100644
--- a/www/tape/TapeInventory.js
+++ b/www/tape/TapeInventory.js
@@ -321,7 +321,7 @@ Ext.define('PBS.TapeManagement.TapeInventory', {
flex: 1,
},
{
- text: gettext('UUID'),
+ text: 'UUID',
dataIndex: 'uuid',
flex: 1,
hidden: true,
diff --git a/www/tape/window/TapeBackupJob.js b/www/tape/window/TapeBackupJob.js
index 2b18e0e04..abbbaa0b2 100644
--- a/www/tape/window/TapeBackupJob.js
+++ b/www/tape/window/TapeBackupJob.js
@@ -61,8 +61,16 @@ Ext.define('PBS.TapeManagement.BackupJobEdit', {
Proxmox.Utils.assemble_field_data(values, { "delete": 'eject-media' });
}
PBS.Utils.delete_if_default(values, 'notify-user');
+
+ if (me.isCreate) {
+ delete values.delete;
+ }
+
return values;
},
+ cbind: {
+ isCreate: '{isCreate}', // pass it through
+ },
column1: [
{
xtype: 'pmxDisplayEditField',
@@ -184,8 +192,14 @@ Ext.define('PBS.TapeManagement.BackupJobEdit', {
delete values['group-filter'];
values.delete = 'group-filter';
}
+ if (this.isCreate) {
+ delete values.delete;
+ }
return values;
},
+ cbind: {
+ isCreate: '{isCreate}', // pass it through
+ },
title: gettext('Group Filter'),
items: [
{
diff --git a/www/window/DatastoreRepoInfo.js b/www/window/DatastoreRepoInfo.js
index e862d7ade..2f2db4776 100644
--- a/www/window/DatastoreRepoInfo.js
+++ b/www/window/DatastoreRepoInfo.js
@@ -113,15 +113,14 @@ Ext.define('PBS.form.CopyField', {
iconCls: 'fa fa-clipboard x-btn-icon-el-default-toolbar-small',
baseCls: 'x-btn',
cls: 'x-btn-default-toolbar-small proxmox-inline-button',
- handler: function() {
+ handler: async function() {
let me = this;
let field = me.up('pbsCopyField');
let el = field.getComponent('inputField')?.inputEl;
if (!el?.dom) {
return;
}
- el.dom.select();
- document.execCommand("copy");
+ await navigator.clipboard.writeText(el.dom.value);
},
text: gettext('Copy'),
},
diff --git a/www/window/InfluxDbEdit.js b/www/window/InfluxDbEdit.js
index e44677375..b1927d6a4 100644
--- a/www/window/InfluxDbEdit.js
+++ b/www/window/InfluxDbEdit.js
@@ -205,14 +205,16 @@ Ext.define('PBS.window.InfluxDbUdpEdit', {
let me = this;
me.callParent();
- me.load({
- success: function(response, options) {
- let values = response.result.data;
- let [_match, host, port] = /^(.*):(\d+)$/.exec(values.host) || [];
- values.host = host;
- values.port = port;
- me.setValues(values);
- },
- });
+ if (me.serverid) {
+ me.load({
+ success: function(response, options) {
+ let values = response.result.data;
+ let [_match, host, port] = /^(.*):(\d+)$/.exec(values.host) || [];
+ values.host = host;
+ values.port = port;
+ me.setValues(values);
+ },
+ });
+ }
},
});
diff --git a/www/window/PruneJobEdit.js b/www/window/PruneJobEdit.js
index 84159a9dd..30ec8036b 100644
--- a/www/window/PruneJobEdit.js
+++ b/www/window/PruneJobEdit.js
@@ -105,7 +105,9 @@ Ext.define('PBS.window.PruneJobEdit', {
xtype: 'pbsNamespaceMaxDepthReduced',
name: 'max-depth',
fieldLabel: gettext('Max. Depth'),
- deleteEmpty: true,
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
},
],
@@ -114,10 +116,9 @@ Ext.define('PBS.window.PruneJobEdit', {
fieldLabel: gettext('Prune Schedule'),
xtype: 'pbsCalendarEvent',
name: 'schedule',
- emptyText: gettext('none (disabled)'),
+ allowBlank: false,
cbind: {
- deleteEmpty: '{!isCreate}',
- value: '{scheduleValue}',
+ value: '{scheduleValue}',
},
},
{
@@ -133,6 +134,9 @@ Ext.define('PBS.window.PruneJobEdit', {
columnB: [
{
xtype: 'pbsPruneInputPanel',
+ cbind: {
+ isCreate: '{isCreate}',
+ },
getValues: () => ({}), // let that handle our inputpanel here
},
{
diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js
index c11103eba..e06fdd81a 100644
--- a/www/window/SyncJobEdit.js
+++ b/www/window/SyncJobEdit.js
@@ -80,6 +80,9 @@ Ext.define('PBS.window.SyncJobEdit', {
}
return values;
},
+ cbind: {
+ isCreate: '{isCreate}', // pass it through
+ },
column1: [
{
xtype: 'pmxDisplayEditField',
@@ -266,7 +269,9 @@ Ext.define('PBS.window.SyncJobEdit', {
xtype: 'pbsNamespaceMaxDepthReduced',
name: 'max-depth',
fieldLabel: gettext('Max. Depth'),
- deleteEmpty: true,
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
},
{
fieldLabel: gettext('Remove vanished'),
@@ -321,6 +326,7 @@ Ext.define('PBS.window.SyncJobEdit', {
{
xtype: 'inputpanel',
onGetValues: function(values) {
+ let me = this;
PBS.Utils.delete_if_default(values, 'group-filter');
if (Ext.isArray(values['group-filter'])) {
if (values['group-filter'].length === 0) {
@@ -331,8 +337,14 @@ Ext.define('PBS.window.SyncJobEdit', {
values['group-filter'] = [...new Set(values['group-filter'])];
}
}
+ if (me.isCreate) {
+ delete values.delete;
+ }
return values;
},
+ cbind: {
+ isCreate: '{isCreate}', // pass it through
+ },
title: gettext('Group Filter'),
items: [
{
diff --git a/www/window/TokenEdit.js b/www/window/TokenEdit.js
index 805402126..c1856be87 100644
--- a/www/window/TokenEdit.js
+++ b/www/window/TokenEdit.js
@@ -203,9 +203,9 @@ Ext.define('PBS.window.TokenShow', {
],
buttons: [
{
- handler: function(b) {
- document.getElementById('token-secret-value').select();
- document.execCommand("copy");
+ handler: async function(b) {
+ let el = document.getElementById('token-secret-value');
+ await navigator.clipboard.writeText(el.value);
},
text: gettext('Copy Secret Value'),
},
diff --git a/www/window/UserEdit.js b/www/window/UserEdit.js
index 9a48b32fc..d2f4b316b 100644
--- a/www/window/UserEdit.js
+++ b/www/window/UserEdit.js
@@ -180,21 +180,19 @@ Ext.define('PBS.window.UserEdit', {
},
getValues: function(dirtyOnly) {
- var me = this;
+ let me = this;
- var values = me.callParent(arguments);
+ let values = me.callParent(arguments);
- // hack: ExtJS datefield does not submit 0, so we need to set that
if (!values.expire) {
- values.expire = 0;
+ values.expire = 0; // "no expiry" is encoded as 0, so set that explicitly if left empty
}
if (me.isCreate) {
values.userid = values.userid + '@' + values.realm;
+ delete values.realm;
}
- delete values.username;
-
if (!values.password) {
delete values.password;
}
diff --git a/www/window/UserPassword.js b/www/window/UserPassword.js
deleted file mode 100644
index 803ab0b65..000000000
--- a/www/window/UserPassword.js
+++ /dev/null
@@ -1,41 +0,0 @@
-Ext.define('PBS.window.UserPassword', {
- extend: 'Proxmox.window.Edit',
- alias: 'widget.pbsUserPassword',
-
- userid: undefined,
-
- method: 'PUT',
-
- subject: gettext('User Password'),
-
- fieldDefaults: { labelWidth: 120 },
-
- items: [
- {
- xtype: 'textfield',
- inputType: 'password',
- fieldLabel: gettext('Password'),
- minLength: 5,
- allowBlank: false,
- name: 'password',
- listeners: {
- change: function(field) {
- field.next().validate();
- },
- blur: function(field) {
- field.next().validate();
- },
- },
- },
- {
- xtype: 'textfield',
- inputType: 'password',
- fieldLabel: gettext('Confirm password'),
- name: 'verifypassword',
- vtype: 'password',
- initialPassField: 'password',
- allowBlank: false,
- submitValue: false,
- },
- ],
-});
diff --git a/www/window/VerifyJobEdit.js b/www/window/VerifyJobEdit.js
index 7899dcd48..554900259 100644
--- a/www/window/VerifyJobEdit.js
+++ b/www/window/VerifyJobEdit.js
@@ -86,7 +86,9 @@ Ext.define('PBS.window.VerifyJobEdit', {
xtype: 'pbsNamespaceMaxDepth',
name: 'max-depth',
fieldLabel: gettext('Max. Depth'),
- deleteEmpty: true,
+ cbind: {
+ deleteEmpty: '{!isCreate}',
+ },
},
],