switch from task_log! macro to tracing
Import `proxmox-log` and substitute all `task_log!` (and task_warn!, task_error!) invocations with tracing calls (info!, warn!, etc..). Remove worker references where it isn't necessary anymore. Reviewed-by: Lukas Wagner <l.wagner@proxmox.com> Tested-by: Lukas Wagner <l.wagner@proxmox.com> Signed-off-by: Gabriel Goller <g.goller@proxmox.com>
This commit is contained in:
parent
cd25c36d21
commit
7e2486e800
@ -65,6 +65,7 @@ proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "webs
|
|||||||
proxmox-human-byte = "0.1"
|
proxmox-human-byte = "0.1"
|
||||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||||
proxmox-lang = "1.1"
|
proxmox-lang = "1.1"
|
||||||
|
proxmox-log = "0.1"
|
||||||
proxmox-ldap = "0.2.1"
|
proxmox-ldap = "0.2.1"
|
||||||
proxmox-metrics = "0.3.1"
|
proxmox-metrics = "0.3.1"
|
||||||
proxmox-notify = "0.4"
|
proxmox-notify = "0.4"
|
||||||
@ -147,6 +148,8 @@ tokio = "1.6"
|
|||||||
tokio-openssl = "0.6.1"
|
tokio-openssl = "0.6.1"
|
||||||
tokio-stream = "0.1.0"
|
tokio-stream = "0.1.0"
|
||||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||||
|
tracing = "0.1"
|
||||||
|
tracing-subscriber = "0.3.16"
|
||||||
tower-service = "0.3.0"
|
tower-service = "0.3.0"
|
||||||
udev = "0.4"
|
udev = "0.4"
|
||||||
url = "2.1"
|
url = "2.1"
|
||||||
@ -194,6 +197,8 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
|
|||||||
tokio-openssl.workspace = true
|
tokio-openssl.workspace = true
|
||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
tokio-util = { workspace = true, features = [ "codec" ] }
|
tokio-util = { workspace = true, features = [ "codec" ] }
|
||||||
|
tracing.workspace = true
|
||||||
|
tracing-subscriber.workspace = true
|
||||||
tower-service.workspace = true
|
tower-service.workspace = true
|
||||||
udev.workspace = true
|
udev.workspace = true
|
||||||
url.workspace = true
|
url.workspace = true
|
||||||
@ -214,6 +219,7 @@ proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async",
|
|||||||
proxmox-human-byte.workspace = true
|
proxmox-human-byte.workspace = true
|
||||||
proxmox-io.workspace = true
|
proxmox-io.workspace = true
|
||||||
proxmox-lang.workspace = true
|
proxmox-lang.workspace = true
|
||||||
|
proxmox-log.workspace = true
|
||||||
proxmox-ldap.workspace = true
|
proxmox-ldap.workspace = true
|
||||||
proxmox-metrics.workspace = true
|
proxmox-metrics.workspace = true
|
||||||
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||||
@ -263,6 +269,7 @@ proxmox-rrd.workspace = true
|
|||||||
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
||||||
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||||
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||||
|
#proxmox-log = { path = "../proxmox/proxmox-log" }
|
||||||
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
||||||
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
||||||
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
||||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -136,6 +136,8 @@ Build-Depends: bash-completion,
|
|||||||
librust-tokio-util-0.7+default-dev,
|
librust-tokio-util-0.7+default-dev,
|
||||||
librust-tokio-util-0.7+io-dev,
|
librust-tokio-util-0.7+io-dev,
|
||||||
librust-tower-service-0.3+default-dev,
|
librust-tower-service-0.3+default-dev,
|
||||||
|
librust-tracing-0.1+default-dev,
|
||||||
|
librust-tracing-subscriber-0.3+default-dev (>= 0.3.16-~~),
|
||||||
librust-udev-0.4+default-dev,
|
librust-udev-0.4+default-dev,
|
||||||
librust-url-2+default-dev (>= 2.1-~~),
|
librust-url-2+default-dev (>= 2.1-~~),
|
||||||
librust-walkdir-2+default-dev,
|
librust-walkdir-2+default-dev,
|
||||||
|
@ -20,6 +20,7 @@ openssl.workspace = true
|
|||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
serde_json.workspace = true
|
serde_json.workspace = true
|
||||||
tokio = { workspace = true, features = [] }
|
tokio = { workspace = true, features = [] }
|
||||||
|
tracing.workspace = true
|
||||||
walkdir.workspace = true
|
walkdir.workspace = true
|
||||||
zstd.workspace = true
|
zstd.workspace = true
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@ use std::path::{Path, PathBuf};
|
|||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
@ -10,7 +11,6 @@ use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateO
|
|||||||
use proxmox_sys::process_locker::{
|
use proxmox_sys::process_locker::{
|
||||||
ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker,
|
ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker,
|
||||||
};
|
};
|
||||||
use proxmox_sys::task_log;
|
|
||||||
use proxmox_sys::WorkerTaskContext;
|
use proxmox_sys::WorkerTaskContext;
|
||||||
|
|
||||||
use crate::file_formats::{
|
use crate::file_formats::{
|
||||||
@ -92,7 +92,6 @@ impl ChunkStore {
|
|||||||
path: P,
|
path: P,
|
||||||
uid: nix::unistd::Uid,
|
uid: nix::unistd::Uid,
|
||||||
gid: nix::unistd::Gid,
|
gid: nix::unistd::Gid,
|
||||||
worker: Option<&dyn WorkerTaskContext>,
|
|
||||||
sync_level: DatastoreFSyncLevel,
|
sync_level: DatastoreFSyncLevel,
|
||||||
) -> Result<Self, Error>
|
) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
@ -143,9 +142,7 @@ impl ChunkStore {
|
|||||||
}
|
}
|
||||||
let percentage = (i * 100) / (64 * 1024);
|
let percentage = (i * 100) / (64 * 1024);
|
||||||
if percentage != last_percentage {
|
if percentage != last_percentage {
|
||||||
if let Some(worker) = worker {
|
info!("Chunkstore create: {percentage}%");
|
||||||
task_log!(worker, "Chunkstore create: {}%", percentage)
|
|
||||||
}
|
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -375,7 +372,7 @@ impl ChunkStore {
|
|||||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||||
if last_percentage != percentage {
|
if last_percentage != percentage {
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
task_log!(worker, "processed {}% ({} chunks)", percentage, chunk_count,);
|
info!("processed {percentage}% ({chunk_count} chunks)");
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
@ -579,15 +576,8 @@ fn test_chunk_store1() {
|
|||||||
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current())
|
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current())
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let chunk_store = ChunkStore::create(
|
let chunk_store =
|
||||||
"test",
|
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None).unwrap();
|
||||||
&path,
|
|
||||||
user.uid,
|
|
||||||
user.gid,
|
|
||||||
None,
|
|
||||||
DatastoreFSyncLevel::None,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8])
|
let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8])
|
||||||
.build()
|
.build()
|
||||||
@ -599,14 +589,8 @@ fn test_chunk_store1() {
|
|||||||
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
|
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
|
||||||
assert!(exists);
|
assert!(exists);
|
||||||
|
|
||||||
let chunk_store = ChunkStore::create(
|
let chunk_store =
|
||||||
"test",
|
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None);
|
||||||
&path,
|
|
||||||
user.uid,
|
|
||||||
user.gid,
|
|
||||||
None,
|
|
||||||
DatastoreFSyncLevel::None,
|
|
||||||
);
|
|
||||||
assert!(chunk_store.is_err());
|
assert!(chunk_store.is_err());
|
||||||
|
|
||||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||||
|
@ -7,6 +7,7 @@ use std::sync::{Arc, Mutex};
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
use proxmox_schema::ApiType;
|
use proxmox_schema::ApiType;
|
||||||
@ -16,7 +17,6 @@ use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
|
|||||||
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
|
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
|
||||||
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||||
use proxmox_sys::WorkerTaskContext;
|
use proxmox_sys::WorkerTaskContext;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel,
|
Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel,
|
||||||
@ -965,8 +965,7 @@ impl DataStore {
|
|||||||
let digest = index.index_digest(pos).unwrap();
|
let digest = index.index_digest(pos).unwrap();
|
||||||
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
||||||
let hex = hex::encode(digest);
|
let hex = hex::encode(digest);
|
||||||
task_warn!(
|
warn!(
|
||||||
worker,
|
|
||||||
"warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
|
"warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -1032,22 +1031,17 @@ impl DataStore {
|
|||||||
|
|
||||||
let percentage = (i + 1) * 100 / image_count;
|
let percentage = (i + 1) * 100 / image_count;
|
||||||
if percentage > last_percentage {
|
if percentage > last_percentage {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"marked {percentage}% ({} of {image_count} index files)",
|
||||||
"marked {}% ({} of {} index files)",
|
|
||||||
percentage,
|
|
||||||
i + 1,
|
i + 1,
|
||||||
image_count,
|
|
||||||
);
|
);
|
||||||
last_percentage = percentage;
|
last_percentage = percentage;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if strange_paths_count > 0 {
|
if strange_paths_count > 0 {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"found (and marked) {strange_paths_count} index files outside of expected directory scheme"
|
||||||
"found (and marked) {} index files outside of expected directory scheme",
|
|
||||||
strange_paths_count,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1085,11 +1079,11 @@ impl DataStore {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
task_log!(worker, "Start GC phase1 (mark used chunks)");
|
info!("Start GC phase1 (mark used chunks)");
|
||||||
|
|
||||||
self.mark_used_chunks(&mut gc_status, worker)?;
|
self.mark_used_chunks(&mut gc_status, worker)?;
|
||||||
|
|
||||||
task_log!(worker, "Start GC phase2 (sweep unused chunks)");
|
info!("Start GC phase2 (sweep unused chunks)");
|
||||||
self.inner.chunk_store.sweep_unused_chunks(
|
self.inner.chunk_store.sweep_unused_chunks(
|
||||||
oldest_writer,
|
oldest_writer,
|
||||||
phase1_start_time,
|
phase1_start_time,
|
||||||
@ -1097,30 +1091,27 @@ impl DataStore {
|
|||||||
worker,
|
worker,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"Removed garbage: {}",
|
"Removed garbage: {}",
|
||||||
HumanByte::from(gc_status.removed_bytes),
|
HumanByte::from(gc_status.removed_bytes),
|
||||||
);
|
);
|
||||||
task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
|
info!("Removed chunks: {}", gc_status.removed_chunks);
|
||||||
if gc_status.pending_bytes > 0 {
|
if gc_status.pending_bytes > 0 {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"Pending removals: {} (in {} chunks)",
|
"Pending removals: {} (in {} chunks)",
|
||||||
HumanByte::from(gc_status.pending_bytes),
|
HumanByte::from(gc_status.pending_bytes),
|
||||||
gc_status.pending_chunks,
|
gc_status.pending_chunks,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if gc_status.removed_bad > 0 {
|
if gc_status.removed_bad > 0 {
|
||||||
task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
|
info!("Removed bad chunks: {}", gc_status.removed_bad);
|
||||||
}
|
}
|
||||||
|
|
||||||
if gc_status.still_bad > 0 {
|
if gc_status.still_bad > 0 {
|
||||||
task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
|
info!("Leftover bad chunks: {}", gc_status.still_bad);
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"Original data usage: {}",
|
"Original data usage: {}",
|
||||||
HumanByte::from(gc_status.index_data_bytes),
|
HumanByte::from(gc_status.index_data_bytes),
|
||||||
);
|
);
|
||||||
@ -1128,15 +1119,13 @@ impl DataStore {
|
|||||||
if gc_status.index_data_bytes > 0 {
|
if gc_status.index_data_bytes > 0 {
|
||||||
let comp_per =
|
let comp_per =
|
||||||
(gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
|
(gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"On-Disk usage: {} ({comp_per:.2}%)",
|
||||||
"On-Disk usage: {} ({:.2}%)",
|
HumanByte::from(gc_status.disk_bytes)
|
||||||
HumanByte::from(gc_status.disk_bytes),
|
|
||||||
comp_per,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
info!("On-Disk chunks: {}", gc_status.disk_chunks);
|
||||||
|
|
||||||
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
||||||
(gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
|
(gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
|
||||||
@ -1144,11 +1133,11 @@ impl DataStore {
|
|||||||
1.0
|
1.0
|
||||||
};
|
};
|
||||||
|
|
||||||
task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
|
info!("Deduplication factor: {deduplication_factor:.2}");
|
||||||
|
|
||||||
if gc_status.disk_chunks > 0 {
|
if gc_status.disk_chunks > 0 {
|
||||||
let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
|
let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
|
||||||
task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
info!("Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
||||||
@ -1380,11 +1369,7 @@ impl DataStore {
|
|||||||
/// Destroy a datastore. This requires that there are no active operations on the datastore.
|
/// Destroy a datastore. This requires that there are no active operations on the datastore.
|
||||||
///
|
///
|
||||||
/// This is a synchronous operation and should be run in a worker-thread.
|
/// This is a synchronous operation and should be run in a worker-thread.
|
||||||
pub fn destroy(
|
pub fn destroy(name: &str, destroy_data: bool) -> Result<(), Error> {
|
||||||
name: &str,
|
|
||||||
destroy_data: bool,
|
|
||||||
worker: &dyn WorkerTaskContext,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let config_lock = pbs_config::datastore::lock_config()?;
|
let config_lock = pbs_config::datastore::lock_config()?;
|
||||||
|
|
||||||
let (mut config, _digest) = pbs_config::datastore::config()?;
|
let (mut config, _digest) = pbs_config::datastore::config()?;
|
||||||
@ -1412,13 +1397,13 @@ impl DataStore {
|
|||||||
let remove = |subdir, ok: &mut bool| {
|
let remove = |subdir, ok: &mut bool| {
|
||||||
if let Err(err) = std::fs::remove_dir_all(base.join(subdir)) {
|
if let Err(err) = std::fs::remove_dir_all(base.join(subdir)) {
|
||||||
if err.kind() != io::ErrorKind::NotFound {
|
if err.kind() != io::ErrorKind::NotFound {
|
||||||
task_warn!(worker, "failed to remove {subdir:?} subdirectory: {err}");
|
warn!("failed to remove {subdir:?} subdirectory: {err}");
|
||||||
*ok = false;
|
*ok = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
task_log!(worker, "Deleting datastore data...");
|
info!("Deleting datastore data...");
|
||||||
remove("ns", &mut ok); // ns first
|
remove("ns", &mut ok); // ns first
|
||||||
remove("ct", &mut ok);
|
remove("ct", &mut ok);
|
||||||
remove("vm", &mut ok);
|
remove("vm", &mut ok);
|
||||||
@ -1427,7 +1412,7 @@ impl DataStore {
|
|||||||
if ok {
|
if ok {
|
||||||
if let Err(err) = std::fs::remove_file(base.join(".gc-status")) {
|
if let Err(err) = std::fs::remove_file(base.join(".gc-status")) {
|
||||||
if err.kind() != io::ErrorKind::NotFound {
|
if err.kind() != io::ErrorKind::NotFound {
|
||||||
task_warn!(worker, "failed to remove .gc-status file: {err}");
|
warn!("failed to remove .gc-status file: {err}");
|
||||||
ok = false;
|
ok = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1441,7 +1426,7 @@ impl DataStore {
|
|||||||
|
|
||||||
// now the config
|
// now the config
|
||||||
if ok {
|
if ok {
|
||||||
task_log!(worker, "Removing datastore from config...");
|
info!("Removing datastore from config...");
|
||||||
let _lock = pbs_config::datastore::lock_config()?;
|
let _lock = pbs_config::datastore::lock_config()?;
|
||||||
let _ = config.sections.remove(name);
|
let _ = config.sections.remove(name);
|
||||||
pbs_config::datastore::save_config(&config)?;
|
pbs_config::datastore::save_config(&config)?;
|
||||||
@ -1452,35 +1437,32 @@ impl DataStore {
|
|||||||
if ok {
|
if ok {
|
||||||
if let Err(err) = std::fs::remove_file(base.join(".lock")) {
|
if let Err(err) = std::fs::remove_file(base.join(".lock")) {
|
||||||
if err.kind() != io::ErrorKind::NotFound {
|
if err.kind() != io::ErrorKind::NotFound {
|
||||||
task_warn!(worker, "failed to remove .lock file: {err}");
|
warn!("failed to remove .lock file: {err}");
|
||||||
ok = false;
|
ok = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
task_log!(worker, "Finished deleting data.");
|
info!("Finished deleting data.");
|
||||||
|
|
||||||
match std::fs::remove_dir(base) {
|
match std::fs::remove_dir(base) {
|
||||||
Ok(()) => task_log!(worker, "Removed empty datastore directory."),
|
Ok(()) => info!("Removed empty datastore directory."),
|
||||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||||
// weird, but ok
|
// weird, but ok
|
||||||
}
|
}
|
||||||
Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => {
|
Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => {
|
||||||
task_warn!(
|
warn!("Cannot delete datastore directory (is it a mount point?).")
|
||||||
worker,
|
|
||||||
"Cannot delete datastore directory (is it a mount point?)."
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => {
|
Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => {
|
||||||
task_warn!(worker, "Datastore directory not empty, not deleting.")
|
warn!("Datastore directory not empty, not deleting.")
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_warn!(worker, "Failed to remove datastore directory: {err}");
|
warn!("Failed to remove datastore directory: {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "There were errors deleting data.");
|
info!("There were errors deleting data.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,8 +5,9 @@ use std::sync::{Arc, Mutex};
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
use proxmox_sys::WorkerTaskContext;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
|
print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
|
||||||
@ -69,11 +70,7 @@ fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rename_corrupted_chunk(
|
fn rename_corrupted_chunk(datastore: Arc<DataStore>, digest: &[u8; 32]) {
|
||||||
datastore: Arc<DataStore>,
|
|
||||||
digest: &[u8; 32],
|
|
||||||
worker: &dyn WorkerTaskContext,
|
|
||||||
) {
|
|
||||||
let (path, digest_str) = datastore.chunk_path(digest);
|
let (path, digest_str) = datastore.chunk_path(digest);
|
||||||
|
|
||||||
let mut counter = 0;
|
let mut counter = 0;
|
||||||
@ -89,17 +86,12 @@ fn rename_corrupted_chunk(
|
|||||||
|
|
||||||
match std::fs::rename(&path, &new_path) {
|
match std::fs::rename(&path, &new_path) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
|
info!("corrupted chunk renamed to {:?}", &new_path);
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
match err.kind() {
|
match err.kind() {
|
||||||
std::io::ErrorKind::NotFound => { /* ignored */ }
|
std::io::ErrorKind::NotFound => { /* ignored */ }
|
||||||
_ => task_log!(
|
_ => info!("could not rename corrupted chunk {:?} - {err}", &path),
|
||||||
worker,
|
|
||||||
"could not rename corrupted chunk {:?} - {}",
|
|
||||||
&path,
|
|
||||||
err
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -117,7 +109,6 @@ fn verify_index_chunks(
|
|||||||
let mut read_bytes = 0;
|
let mut read_bytes = 0;
|
||||||
let mut decoded_bytes = 0;
|
let mut decoded_bytes = 0;
|
||||||
|
|
||||||
let worker2 = Arc::clone(&verify_worker.worker);
|
|
||||||
let datastore2 = Arc::clone(&verify_worker.datastore);
|
let datastore2 = Arc::clone(&verify_worker.datastore);
|
||||||
let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks);
|
let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks);
|
||||||
let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks);
|
let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks);
|
||||||
@ -130,7 +121,7 @@ fn verify_index_chunks(
|
|||||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
|
info!("can't verify chunk, unknown CryptMode - {err}");
|
||||||
errors2.fetch_add(1, Ordering::SeqCst);
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -138,20 +129,17 @@ fn verify_index_chunks(
|
|||||||
};
|
};
|
||||||
|
|
||||||
if chunk_crypt_mode != crypt_mode {
|
if chunk_crypt_mode != crypt_mode {
|
||||||
task_log!(
|
info!(
|
||||||
worker2,
|
"chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}"
|
||||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
|
||||||
chunk_crypt_mode,
|
|
||||||
crypt_mode
|
|
||||||
);
|
);
|
||||||
errors2.fetch_add(1, Ordering::SeqCst);
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||||
task_log!(worker2, "{}", err);
|
info!("{err}");
|
||||||
errors2.fetch_add(1, Ordering::SeqCst);
|
errors2.fetch_add(1, Ordering::SeqCst);
|
||||||
rename_corrupted_chunk(datastore2.clone(), &digest, &worker2);
|
rename_corrupted_chunk(datastore2.clone(), &digest);
|
||||||
} else {
|
} else {
|
||||||
verified_chunks2.lock().unwrap().insert(digest);
|
verified_chunks2.lock().unwrap().insert(digest);
|
||||||
}
|
}
|
||||||
@ -175,11 +163,7 @@ fn verify_index_chunks(
|
|||||||
.contains(digest)
|
.contains(digest)
|
||||||
{
|
{
|
||||||
let digest_str = hex::encode(digest);
|
let digest_str = hex::encode(digest);
|
||||||
task_log!(
|
info!("chunk {digest_str} was marked as corrupt");
|
||||||
verify_worker.worker,
|
|
||||||
"chunk {} was marked as corrupt",
|
|
||||||
digest_str
|
|
||||||
);
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
@ -218,17 +202,9 @@ fn verify_index_chunks(
|
|||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(info.digest);
|
.insert(info.digest);
|
||||||
task_log!(
|
error!("can't verify chunk, load failed - {err}");
|
||||||
verify_worker.worker,
|
|
||||||
"can't verify chunk, load failed - {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
errors.fetch_add(1, Ordering::SeqCst);
|
errors.fetch_add(1, Ordering::SeqCst);
|
||||||
rename_corrupted_chunk(
|
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest);
|
||||||
verify_worker.datastore.clone(),
|
|
||||||
&info.digest,
|
|
||||||
&verify_worker.worker,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Ok(chunk) => {
|
Ok(chunk) => {
|
||||||
let size = info.size();
|
let size = info.size();
|
||||||
@ -251,15 +227,8 @@ fn verify_index_chunks(
|
|||||||
|
|
||||||
let error_count = errors.load(Ordering::SeqCst);
|
let error_count = errors.load(Ordering::SeqCst);
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
" verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)"
|
||||||
" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
|
||||||
read_bytes_mib,
|
|
||||||
decoded_bytes_mib,
|
|
||||||
elapsed,
|
|
||||||
read_speed,
|
|
||||||
decode_speed,
|
|
||||||
error_count,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if errors.load(Ordering::SeqCst) > 0 {
|
if errors.load(Ordering::SeqCst) > 0 {
|
||||||
@ -329,8 +298,7 @@ pub fn verify_backup_dir(
|
|||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
if !backup_dir.full_path().exists() {
|
if !backup_dir.full_path().exists() {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"SKIPPED: verify {}:{} - snapshot does not exist (anymore).",
|
"SKIPPED: verify {}:{} - snapshot does not exist (anymore).",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir(),
|
backup_dir.dir(),
|
||||||
@ -348,8 +316,7 @@ pub fn verify_backup_dir(
|
|||||||
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir(),
|
backup_dir.dir(),
|
||||||
@ -371,8 +338,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
let manifest = match backup_dir.load_manifest() {
|
let manifest = match backup_dir.load_manifest() {
|
||||||
Ok((manifest, _)) => manifest,
|
Ok((manifest, _)) => manifest,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"verify {}:{} - manifest load error: {}",
|
"verify {}:{} - manifest load error: {}",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir(),
|
backup_dir.dir(),
|
||||||
@ -384,8 +350,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
|
|
||||||
if let Some(filter) = filter {
|
if let Some(filter) = filter {
|
||||||
if !filter(&manifest) {
|
if !filter(&manifest) {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"SKIPPED: verify {}:{} (recently verified)",
|
"SKIPPED: verify {}:{} (recently verified)",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir(),
|
backup_dir.dir(),
|
||||||
@ -394,8 +359,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"verify {}:{}",
|
"verify {}:{}",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir()
|
backup_dir.dir()
|
||||||
@ -406,7 +370,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
let mut verify_result = VerifyState::Ok;
|
let mut verify_result = VerifyState::Ok;
|
||||||
for info in manifest.files() {
|
for info in manifest.files() {
|
||||||
let result = proxmox_lang::try_block!({
|
let result = proxmox_lang::try_block!({
|
||||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
info!(" check {}", info.filename);
|
||||||
match archive_type(&info.filename)? {
|
match archive_type(&info.filename)? {
|
||||||
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
||||||
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
||||||
@ -418,8 +382,7 @@ pub fn verify_backup_dir_with_lock(
|
|||||||
verify_worker.worker.fail_on_shutdown()?;
|
verify_worker.worker.fail_on_shutdown()?;
|
||||||
|
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"verify {}:{}/{} failed: {}",
|
"verify {}:{}/{} failed: {}",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
backup_dir.dir(),
|
backup_dir.dir(),
|
||||||
@ -463,8 +426,7 @@ pub fn verify_backup_group(
|
|||||||
let mut list = match group.list_backups() {
|
let mut list = match group.list_backups() {
|
||||||
Ok(list) => list,
|
Ok(list) => list,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"verify {}, group {} - unable to list backups: {}",
|
"verify {}, group {} - unable to list backups: {}",
|
||||||
print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
|
print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
|
||||||
group.group(),
|
group.group(),
|
||||||
@ -475,8 +437,7 @@ pub fn verify_backup_group(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let snapshot_count = list.len();
|
let snapshot_count = list.len();
|
||||||
task_log!(
|
info!(
|
||||||
verify_worker.worker,
|
|
||||||
"verify group {}:{} ({} snapshots)",
|
"verify group {}:{} ({} snapshots)",
|
||||||
verify_worker.datastore.name(),
|
verify_worker.datastore.name(),
|
||||||
group.group(),
|
group.group(),
|
||||||
@ -494,9 +455,8 @@ pub fn verify_backup_group(
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
progress.done_snapshots = pos as u64 + 1;
|
progress.done_snapshots = pos as u64 + 1;
|
||||||
task_log!(verify_worker.worker, "percentage done: {}", progress);
|
info!("percentage done: {progress}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(errors)
|
Ok(errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,16 +476,11 @@ pub fn verify_all_backups(
|
|||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<Vec<String>, Error> {
|
) -> Result<Vec<String>, Error> {
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
let worker = Arc::clone(&verify_worker.worker);
|
|
||||||
|
|
||||||
task_log!(
|
info!("verify datastore {}", verify_worker.datastore.name());
|
||||||
worker,
|
|
||||||
"verify datastore {}",
|
|
||||||
verify_worker.datastore.name()
|
|
||||||
);
|
|
||||||
|
|
||||||
let owner_filtered = if let Some(owner) = &owner {
|
let owner_filtered = if let Some(owner) = &owner {
|
||||||
task_log!(worker, "limiting to backups owned by {}", owner);
|
info!("limiting to backups owned by {owner}");
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
false
|
false
|
||||||
@ -553,7 +508,7 @@ pub fn verify_all_backups(
|
|||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// we don't filter by owner, but we want to log the error
|
// we don't filter by owner, but we want to log the error
|
||||||
task_log!(worker, "error on iterating groups in ns '{ns}' - {err}");
|
info!("error on iterating groups in ns '{ns}' - {err}");
|
||||||
errors.push(err.to_string());
|
errors.push(err.to_string());
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -563,7 +518,7 @@ pub fn verify_all_backups(
|
|||||||
})
|
})
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "unable to list backups: {}", err,);
|
info!("unable to list backups: {err}");
|
||||||
return Ok(errors);
|
return Ok(errors);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -571,7 +526,7 @@ pub fn verify_all_backups(
|
|||||||
list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
|
list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
|
||||||
|
|
||||||
let group_count = list.len();
|
let group_count = list.len();
|
||||||
task_log!(worker, "found {} groups", group_count);
|
info!("found {group_count} groups");
|
||||||
|
|
||||||
let mut progress = StoreProgress::new(group_count as u64);
|
let mut progress = StoreProgress::new(group_count as u64);
|
||||||
|
|
||||||
|
@ -5,13 +5,14 @@ use anyhow::{bail, Error};
|
|||||||
use futures::*;
|
use futures::*;
|
||||||
use http::Response;
|
use http::Response;
|
||||||
use hyper::{Body, StatusCode};
|
use hyper::{Body, StatusCode};
|
||||||
|
use tracing_subscriber::filter::LevelFilter;
|
||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
|
use proxmox_log::init_logger;
|
||||||
|
use proxmox_rest_server::{daemon, ApiConfig, RestServer};
|
||||||
use proxmox_router::RpcEnvironmentType;
|
use proxmox_router::RpcEnvironmentType;
|
||||||
use proxmox_sys::fs::CreateOptions;
|
use proxmox_sys::fs::CreateOptions;
|
||||||
|
|
||||||
use proxmox_rest_server::{daemon, ApiConfig, RestServer};
|
|
||||||
|
|
||||||
use proxmox_backup::auth_helpers::*;
|
use proxmox_backup::auth_helpers::*;
|
||||||
use proxmox_backup::config;
|
use proxmox_backup::config;
|
||||||
use proxmox_backup::server::auth::check_pbs_auth;
|
use proxmox_backup::server::auth::check_pbs_auth;
|
||||||
@ -40,13 +41,7 @@ fn get_index() -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
if let Err(err) = syslog::init(
|
init_logger("PBS_LOG", LevelFilter::INFO, "proxmox-backup-api")?;
|
||||||
syslog::Facility::LOG_DAEMON,
|
|
||||||
log::LevelFilter::Info,
|
|
||||||
Some("proxmox-backup-api"),
|
|
||||||
) {
|
|
||||||
bail!("unable to inititialize syslog - {}", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
config::create_configdir()?;
|
config::create_configdir()?;
|
||||||
|
|
||||||
|
@ -7,18 +7,20 @@ use http::request::Parts;
|
|||||||
use http::Response;
|
use http::Response;
|
||||||
use hyper::header;
|
use hyper::header;
|
||||||
use hyper::{Body, StatusCode};
|
use hyper::{Body, StatusCode};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
use tracing_subscriber::filter::LevelFilter;
|
||||||
use url::form_urlencoded;
|
use url::form_urlencoded;
|
||||||
|
|
||||||
use openssl::ssl::SslAcceptor;
|
use openssl::ssl::SslAcceptor;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
|
use proxmox_log::init_logger;
|
||||||
use proxmox_metrics::MetricsData;
|
use proxmox_metrics::MetricsData;
|
||||||
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
||||||
use proxmox_sys::fs::{CreateOptions, FileSystemInformation};
|
use proxmox_sys::fs::{CreateOptions, FileSystemInformation};
|
||||||
use proxmox_sys::linux::procfs::{Loadavg, ProcFsMemInfo, ProcFsNetDev, ProcFsStat};
|
use proxmox_sys::linux::procfs::{Loadavg, ProcFsMemInfo, ProcFsNetDev, ProcFsStat};
|
||||||
use proxmox_sys::logrotate::LogRotate;
|
use proxmox_sys::logrotate::LogRotate;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
|
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
|
|
||||||
@ -181,21 +183,7 @@ async fn get_index_future(env: RestEnvironment, parts: Parts) -> Response<Body>
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn run() -> Result<(), Error> {
|
async fn run() -> Result<(), Error> {
|
||||||
// Note: To debug early connection error use
|
init_logger("PBS_LOG", LevelFilter::INFO, "proxmox-backup-proxy")?;
|
||||||
// PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy
|
|
||||||
let debug = std::env::var("PROXMOX_DEBUG").is_ok();
|
|
||||||
|
|
||||||
if let Err(err) = syslog::init(
|
|
||||||
syslog::Facility::LOG_DAEMON,
|
|
||||||
if debug {
|
|
||||||
log::LevelFilter::Debug
|
|
||||||
} else {
|
|
||||||
log::LevelFilter::Info
|
|
||||||
},
|
|
||||||
Some("proxmox-backup-proxy"),
|
|
||||||
) {
|
|
||||||
bail!("unable to inititialize syslog - {err}");
|
|
||||||
}
|
|
||||||
|
|
||||||
proxmox_backup::auth_helpers::setup_auth_context(false);
|
proxmox_backup::auth_helpers::setup_auth_context(false);
|
||||||
proxmox_backup::server::notifications::init()?;
|
proxmox_backup::server::notifications::init()?;
|
||||||
@ -301,7 +289,7 @@ async fn run() -> Result<(), Error> {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let connections = proxmox_rest_server::connection::AcceptBuilder::new()
|
let connections = proxmox_rest_server::connection::AcceptBuilder::new()
|
||||||
.debug(debug)
|
.debug(tracing::enabled!(tracing::Level::DEBUG))
|
||||||
.rate_limiter_lookup(Arc::new(lookup_rate_limiter))
|
.rate_limiter_lookup(Arc::new(lookup_rate_limiter))
|
||||||
.tcp_keepalive_time(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
.tcp_keepalive_time(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||||
|
|
||||||
@ -750,7 +738,7 @@ async fn schedule_task_log_rotate() {
|
|||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
task_log!(worker, "starting task log rotation");
|
info!("starting task log rotation");
|
||||||
|
|
||||||
let result = try_block!({
|
let result = try_block!({
|
||||||
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
||||||
@ -775,9 +763,9 @@ async fn schedule_task_log_rotate() {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
task_log!(worker, "task log archive was rotated");
|
info!("task log archive was rotated");
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "task log archive was not rotated");
|
info!("task log archive was not rotated");
|
||||||
}
|
}
|
||||||
|
|
||||||
let max_size = 32 * 1024 * 1024 - 1;
|
let max_size = 32 * 1024 * 1024 - 1;
|
||||||
@ -793,9 +781,9 @@ async fn schedule_task_log_rotate() {
|
|||||||
if logrotate.rotate(max_size)? {
|
if logrotate.rotate(max_size)? {
|
||||||
println!("rotated access log, telling daemons to re-open log file");
|
println!("rotated access log, telling daemons to re-open log file");
|
||||||
proxmox_async::runtime::block_on(command_reopen_access_logfiles())?;
|
proxmox_async::runtime::block_on(command_reopen_access_logfiles())?;
|
||||||
task_log!(worker, "API access log was rotated");
|
info!("API access log was rotated");
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "API access log was not rotated");
|
info!("API access log was not rotated");
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut logrotate = LogRotate::new(
|
let mut logrotate = LogRotate::new(
|
||||||
@ -808,15 +796,15 @@ async fn schedule_task_log_rotate() {
|
|||||||
if logrotate.rotate(max_size)? {
|
if logrotate.rotate(max_size)? {
|
||||||
println!("rotated auth log, telling daemons to re-open log file");
|
println!("rotated auth log, telling daemons to re-open log file");
|
||||||
proxmox_async::runtime::block_on(command_reopen_auth_logfiles())?;
|
proxmox_async::runtime::block_on(command_reopen_auth_logfiles())?;
|
||||||
task_log!(worker, "API authentication log was rotated");
|
info!("API authentication log was rotated");
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "API authentication log was not rotated");
|
info!("API authentication log was not rotated");
|
||||||
}
|
}
|
||||||
|
|
||||||
if has_rotated {
|
if has_rotated {
|
||||||
task_log!(worker, "cleaning up old task logs");
|
info!("cleaning up old task logs");
|
||||||
if let Err(err) = cleanup_old_tasks(&worker, true) {
|
if let Err(err) = cleanup_old_tasks(true) {
|
||||||
task_warn!(worker, "could not completely cleanup old tasks: {err}");
|
warn!("could not completely cleanup old tasks: {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use proxmox_sys::task_log;
|
use tracing::info;
|
||||||
|
|
||||||
use pbs_api_types::Authid;
|
use pbs_api_types::Authid;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
@ -28,9 +28,9 @@ pub fn do_garbage_collection_job(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker, "starting garbage collection on store {store}");
|
info!("starting garbage collection on store {store}");
|
||||||
if let Some(event_str) = schedule {
|
if let Some(event_str) = schedule {
|
||||||
task_log!(worker, "task triggered by schedule '{event_str}'");
|
info!("task triggered by schedule '{event_str}'");
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = datastore.garbage_collection(&*worker, worker.upid());
|
let result = datastore.garbage_collection(&*worker, worker.upid());
|
||||||
|
@ -1,8 +1,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
use tracing::{info, warn};
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
print_store_and_ns, Authid, KeepOptions, Operation, PruneJobOptions, MAX_NAMESPACE_DEPTH,
|
print_store_and_ns, Authid, KeepOptions, Operation, PruneJobOptions, MAX_NAMESPACE_DEPTH,
|
||||||
@ -16,7 +15,6 @@ use crate::backup::ListAccessibleBackupGroups;
|
|||||||
use crate::server::jobstate::Job;
|
use crate::server::jobstate::Job;
|
||||||
|
|
||||||
pub fn prune_datastore(
|
pub fn prune_datastore(
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
auth_id: Authid,
|
auth_id: Authid,
|
||||||
prune_options: PruneJobOptions,
|
prune_options: PruneJobOptions,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
@ -31,19 +29,19 @@ pub fn prune_datastore(
|
|||||||
};
|
};
|
||||||
let ns = prune_options.ns.clone().unwrap_or_default();
|
let ns = prune_options.ns.clone().unwrap_or_default();
|
||||||
let store_ns = print_store_and_ns(store, &ns);
|
let store_ns = print_store_and_ns(store, &ns);
|
||||||
task_log!(worker, "Starting datastore prune on {store_ns}, {depth}");
|
info!("Starting datastore prune on {store_ns}, {depth}");
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
task_log!(worker, "(dry test run)");
|
info!("(dry test run)");
|
||||||
}
|
}
|
||||||
|
|
||||||
let keep_all = !prune_options.keeps_something();
|
let keep_all = !prune_options.keeps_something();
|
||||||
|
|
||||||
if keep_all {
|
if keep_all {
|
||||||
task_log!(worker, "No prune selection - keeping all files.");
|
info!("No prune selection - keeping all files.");
|
||||||
} else {
|
} else {
|
||||||
let rendered_options = cli_prune_options_string(&prune_options);
|
let rendered_options = cli_prune_options_string(&prune_options);
|
||||||
task_log!(worker, "retention options: {rendered_options}");
|
info!("retention options: {rendered_options}");
|
||||||
}
|
}
|
||||||
|
|
||||||
for group in ListAccessibleBackupGroups::new_with_privs(
|
for group in ListAccessibleBackupGroups::new_with_privs(
|
||||||
@ -61,8 +59,7 @@ pub fn prune_datastore(
|
|||||||
let mut prune_info = compute_prune_info(list, &prune_options.keep)?;
|
let mut prune_info = compute_prune_info(list, &prune_options.keep)?;
|
||||||
prune_info.reverse(); // delete older snapshots first
|
prune_info.reverse(); // delete older snapshots first
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"Pruning group {ns}:\"{}/{}\"",
|
"Pruning group {ns}:\"{}/{}\"",
|
||||||
group.backup_type(),
|
group.backup_type(),
|
||||||
group.backup_id()
|
group.backup_id()
|
||||||
@ -70,11 +67,9 @@ pub fn prune_datastore(
|
|||||||
|
|
||||||
for (info, mark) in prune_info {
|
for (info, mark) in prune_info {
|
||||||
let keep = keep_all || mark.keep();
|
let keep = keep_all || mark.keep();
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"{}{mark} {}/{}/{}",
|
||||||
"{}{} {}/{}/{}",
|
|
||||||
if dry_run { "would " } else { "" },
|
if dry_run { "would " } else { "" },
|
||||||
mark,
|
|
||||||
group.backup_type(),
|
group.backup_type(),
|
||||||
group.backup_id(),
|
group.backup_id(),
|
||||||
info.backup_dir.backup_time_string()
|
info.backup_dir.backup_time_string()
|
||||||
@ -82,7 +77,7 @@ pub fn prune_datastore(
|
|||||||
if !keep && !dry_run {
|
if !keep && !dry_run {
|
||||||
if let Err(err) = datastore.remove_backup_dir(ns, info.backup_dir.as_ref(), false) {
|
if let Err(err) = datastore.remove_backup_dir(ns, info.backup_dir.as_ref(), false) {
|
||||||
let path = info.backup_dir.relative_path();
|
let path = info.backup_dir.relative_path();
|
||||||
task_warn!(worker, "failed to remove dir {path:?}: {err}");
|
warn!("failed to remove dir {path:?}: {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,13 +145,13 @@ pub fn do_prune_job(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker, "prune job '{}'", job.jobname());
|
info!("prune job '{}'", job.jobname());
|
||||||
|
|
||||||
if let Some(event_str) = schedule {
|
if let Some(event_str) = schedule {
|
||||||
task_log!(worker, "task triggered by schedule '{event_str}'");
|
info!("task triggered by schedule '{event_str}'");
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = prune_datastore(worker.clone(), auth_id, prune_options, datastore, false);
|
let result = prune_datastore(auth_id, prune_options, datastore, false);
|
||||||
|
|
||||||
let status = worker.create_state(&result);
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
|
@ -10,10 +10,9 @@ use std::time::{Duration, SystemTime};
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
use proxmox_human_byte::HumanByte;
|
use proxmox_human_byte::HumanByte;
|
||||||
use proxmox_rest_server::WorkerTask;
|
|
||||||
use proxmox_router::HttpError;
|
use proxmox_router::HttpError;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupFilter,
|
print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupFilter,
|
||||||
@ -122,7 +121,6 @@ trait PullSource: Send + Sync {
|
|||||||
async fn list_namespaces(
|
async fn list_namespaces(
|
||||||
&self,
|
&self,
|
||||||
max_depth: &mut Option<usize>,
|
max_depth: &mut Option<usize>,
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupNamespace>, Error>;
|
) -> Result<Vec<BackupNamespace>, Error>;
|
||||||
|
|
||||||
/// Lists groups within a specific namespace from the source.
|
/// Lists groups within a specific namespace from the source.
|
||||||
@ -137,7 +135,6 @@ trait PullSource: Send + Sync {
|
|||||||
&self,
|
&self,
|
||||||
namespace: &BackupNamespace,
|
namespace: &BackupNamespace,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupDir>, Error>;
|
) -> Result<Vec<BackupDir>, Error>;
|
||||||
fn get_ns(&self) -> BackupNamespace;
|
fn get_ns(&self) -> BackupNamespace;
|
||||||
fn get_store(&self) -> &str;
|
fn get_store(&self) -> &str;
|
||||||
@ -155,7 +152,6 @@ impl PullSource for RemoteSource {
|
|||||||
async fn list_namespaces(
|
async fn list_namespaces(
|
||||||
&self,
|
&self,
|
||||||
max_depth: &mut Option<usize>,
|
max_depth: &mut Option<usize>,
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupNamespace>, Error> {
|
) -> Result<Vec<BackupNamespace>, Error> {
|
||||||
if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) {
|
if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) {
|
||||||
return Ok(vec![self.ns.clone()]);
|
return Ok(vec![self.ns.clone()]);
|
||||||
@ -178,8 +174,8 @@ impl PullSource for RemoteSource {
|
|||||||
Some(HttpError { code, message }) => match code {
|
Some(HttpError { code, message }) => match code {
|
||||||
&StatusCode::NOT_FOUND => {
|
&StatusCode::NOT_FOUND => {
|
||||||
if self.ns.is_root() && max_depth.is_none() {
|
if self.ns.is_root() && max_depth.is_none() {
|
||||||
task_warn!(worker, "Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
|
warn!("Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
|
||||||
task_warn!(worker, "Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
|
warn!("Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
|
||||||
max_depth.replace(0);
|
max_depth.replace(0);
|
||||||
} else {
|
} else {
|
||||||
bail!("Remote namespace set/recursive sync requested, but remote does not support namespaces.")
|
bail!("Remote namespace set/recursive sync requested, but remote does not support namespaces.")
|
||||||
@ -238,7 +234,6 @@ impl PullSource for RemoteSource {
|
|||||||
&self,
|
&self,
|
||||||
namespace: &BackupNamespace,
|
namespace: &BackupNamespace,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupDir>, Error> {
|
) -> Result<Vec<BackupDir>, Error> {
|
||||||
let path = format!("api2/json/admin/datastore/{}/snapshots", self.repo.store());
|
let path = format!("api2/json/admin/datastore/{}/snapshots", self.repo.store());
|
||||||
|
|
||||||
@ -261,11 +256,7 @@ impl PullSource for RemoteSource {
|
|||||||
let snapshot = item.backup;
|
let snapshot = item.backup;
|
||||||
// in-progress backups can't be synced
|
// in-progress backups can't be synced
|
||||||
if item.size.is_none() {
|
if item.size.is_none() {
|
||||||
task_log!(
|
info!("skipping snapshot {snapshot} - in-progress backup");
|
||||||
worker,
|
|
||||||
"skipping snapshot {} - in-progress backup",
|
|
||||||
snapshot
|
|
||||||
);
|
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,7 +292,6 @@ impl PullSource for LocalSource {
|
|||||||
async fn list_namespaces(
|
async fn list_namespaces(
|
||||||
&self,
|
&self,
|
||||||
max_depth: &mut Option<usize>,
|
max_depth: &mut Option<usize>,
|
||||||
_worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupNamespace>, Error> {
|
) -> Result<Vec<BackupNamespace>, Error> {
|
||||||
ListNamespacesRecursive::new_max_depth(
|
ListNamespacesRecursive::new_max_depth(
|
||||||
self.store.clone(),
|
self.store.clone(),
|
||||||
@ -333,7 +323,6 @@ impl PullSource for LocalSource {
|
|||||||
&self,
|
&self,
|
||||||
namespace: &BackupNamespace,
|
namespace: &BackupNamespace,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
_worker: &WorkerTask,
|
|
||||||
) -> Result<Vec<BackupDir>, Error> {
|
) -> Result<Vec<BackupDir>, Error> {
|
||||||
Ok(self
|
Ok(self
|
||||||
.store
|
.store
|
||||||
@ -381,19 +370,10 @@ trait PullReader: Send + Sync {
|
|||||||
/// Asynchronously loads a file from the source into a local file.
|
/// Asynchronously loads a file from the source into a local file.
|
||||||
/// `filename` is the name of the file to load from the source.
|
/// `filename` is the name of the file to load from the source.
|
||||||
/// `into` is the path of the local file to load the source file into.
|
/// `into` is the path of the local file to load the source file into.
|
||||||
async fn load_file_into(
|
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error>;
|
||||||
&self,
|
|
||||||
filename: &str,
|
|
||||||
into: &Path,
|
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Option<DataBlob>, Error>;
|
|
||||||
|
|
||||||
/// Tries to download the client log from the source and save it into a local file.
|
/// Tries to download the client log from the source and save it into a local file.
|
||||||
async fn try_download_client_log(
|
async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error>;
|
||||||
&self,
|
|
||||||
to_path: &Path,
|
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<(), Error>;
|
|
||||||
|
|
||||||
fn skip_chunk_sync(&self, target_store_name: &str) -> bool;
|
fn skip_chunk_sync(&self, target_store_name: &str) -> bool;
|
||||||
}
|
}
|
||||||
@ -409,12 +389,7 @@ impl PullReader for RemoteReader {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load_file_into(
|
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error> {
|
||||||
&self,
|
|
||||||
filename: &str,
|
|
||||||
into: &Path,
|
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<Option<DataBlob>, Error> {
|
|
||||||
let mut tmp_file = std::fs::OpenOptions::new()
|
let mut tmp_file = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
@ -426,8 +401,7 @@ impl PullReader for RemoteReader {
|
|||||||
match err.downcast_ref::<HttpError>() {
|
match err.downcast_ref::<HttpError>() {
|
||||||
Some(HttpError { code, message }) => match *code {
|
Some(HttpError { code, message }) => match *code {
|
||||||
StatusCode::NOT_FOUND => {
|
StatusCode::NOT_FOUND => {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"skipping snapshot {} - vanished since start of sync",
|
"skipping snapshot {} - vanished since start of sync",
|
||||||
&self.dir,
|
&self.dir,
|
||||||
);
|
);
|
||||||
@ -446,11 +420,7 @@ impl PullReader for RemoteReader {
|
|||||||
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_download_client_log(
|
async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error> {
|
||||||
&self,
|
|
||||||
to_path: &Path,
|
|
||||||
worker: &WorkerTask,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut tmp_path = to_path.to_owned();
|
let mut tmp_path = to_path.to_owned();
|
||||||
tmp_path.set_extension("tmp");
|
tmp_path.set_extension("tmp");
|
||||||
|
|
||||||
@ -469,7 +439,7 @@ impl PullReader for RemoteReader {
|
|||||||
if let Err(err) = std::fs::rename(&tmp_path, to_path) {
|
if let Err(err) = std::fs::rename(&tmp_path, to_path) {
|
||||||
bail!("Atomic rename file {:?} failed - {}", to_path, err);
|
bail!("Atomic rename file {:?} failed - {}", to_path, err);
|
||||||
}
|
}
|
||||||
task_log!(worker, "got backup log file {:?}", CLIENT_LOG_BLOB_NAME);
|
info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -490,12 +460,7 @@ impl PullReader for LocalReader {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn load_file_into(
|
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error> {
|
||||||
&self,
|
|
||||||
filename: &str,
|
|
||||||
into: &Path,
|
|
||||||
_worker: &WorkerTask,
|
|
||||||
) -> Result<Option<DataBlob>, Error> {
|
|
||||||
let mut tmp_file = std::fs::OpenOptions::new()
|
let mut tmp_file = std::fs::OpenOptions::new()
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
@ -509,11 +474,7 @@ impl PullReader for LocalReader {
|
|||||||
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn try_download_client_log(
|
async fn try_download_client_log(&self, _to_path: &Path) -> Result<(), Error> {
|
||||||
&self,
|
|
||||||
_to_path: &Path,
|
|
||||||
_worker: &WorkerTask,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -603,7 +564,6 @@ impl PullParameters {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn pull_index_chunks<I: IndexFile>(
|
async fn pull_index_chunks<I: IndexFile>(
|
||||||
worker: &WorkerTask,
|
|
||||||
chunk_reader: Arc<dyn AsyncReadChunk>,
|
chunk_reader: Arc<dyn AsyncReadChunk>,
|
||||||
target: Arc<DataStore>,
|
target: Arc<DataStore>,
|
||||||
index: I,
|
index: I,
|
||||||
@ -658,10 +618,10 @@ async fn pull_index_chunks<I: IndexFile>(
|
|||||||
target.cond_touch_chunk(&info.digest, false)
|
target.cond_touch_chunk(&info.digest, false)
|
||||||
})?;
|
})?;
|
||||||
if chunk_exists {
|
if chunk_exists {
|
||||||
//task_log!(worker, "chunk {} exists {}", pos, hex::encode(digest));
|
//info!("chunk {} exists {}", pos, hex::encode(digest));
|
||||||
return Ok::<_, Error>(());
|
return Ok::<_, Error>(());
|
||||||
}
|
}
|
||||||
//task_log!(worker, "sync {} chunk {}", pos, hex::encode(digest));
|
//info!("sync {} chunk {}", pos, hex::encode(digest));
|
||||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||||
let raw_size = chunk.raw_size() as usize;
|
let raw_size = chunk.raw_size() as usize;
|
||||||
|
|
||||||
@ -689,8 +649,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
|||||||
let bytes = bytes.load(Ordering::SeqCst);
|
let bytes = bytes.load(Ordering::SeqCst);
|
||||||
let chunk_count = chunk_count.load(Ordering::SeqCst);
|
let chunk_count = chunk_count.load(Ordering::SeqCst);
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"downloaded {} ({}/s)",
|
"downloaded {} ({}/s)",
|
||||||
HumanByte::from(bytes),
|
HumanByte::from(bytes),
|
||||||
HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
|
HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
|
||||||
@ -730,7 +689,6 @@ fn verify_archive(info: &FileInfo, csum: &[u8; 32], size: u64) -> Result<(), Err
|
|||||||
/// - if archive is an index, pull referenced chunks
|
/// - if archive is an index, pull referenced chunks
|
||||||
/// - Rename tmp file into real path
|
/// - Rename tmp file into real path
|
||||||
async fn pull_single_archive<'a>(
|
async fn pull_single_archive<'a>(
|
||||||
worker: &'a WorkerTask,
|
|
||||||
reader: Arc<dyn PullReader + 'a>,
|
reader: Arc<dyn PullReader + 'a>,
|
||||||
snapshot: &'a pbs_datastore::BackupDir,
|
snapshot: &'a pbs_datastore::BackupDir,
|
||||||
archive_info: &'a FileInfo,
|
archive_info: &'a FileInfo,
|
||||||
@ -745,11 +703,9 @@ async fn pull_single_archive<'a>(
|
|||||||
|
|
||||||
let mut pull_stats = PullStats::default();
|
let mut pull_stats = PullStats::default();
|
||||||
|
|
||||||
task_log!(worker, "sync archive {}", archive_name);
|
info!("sync archive {archive_name}");
|
||||||
|
|
||||||
reader
|
reader.load_file_into(archive_name, &tmp_path).await?;
|
||||||
.load_file_into(archive_name, &tmp_path, worker)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut tmpfile = std::fs::OpenOptions::new().read(true).open(&tmp_path)?;
|
let mut tmpfile = std::fs::OpenOptions::new().read(true).open(&tmp_path)?;
|
||||||
|
|
||||||
@ -762,10 +718,9 @@ async fn pull_single_archive<'a>(
|
|||||||
verify_archive(archive_info, &csum, size)?;
|
verify_archive(archive_info, &csum, size)?;
|
||||||
|
|
||||||
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
||||||
task_log!(worker, "skipping chunk sync for same datastore");
|
info!("skipping chunk sync for same datastore");
|
||||||
} else {
|
} else {
|
||||||
let stats = pull_index_chunks(
|
let stats = pull_index_chunks(
|
||||||
worker,
|
|
||||||
reader.chunk_reader(archive_info.crypt_mode),
|
reader.chunk_reader(archive_info.crypt_mode),
|
||||||
snapshot.datastore().clone(),
|
snapshot.datastore().clone(),
|
||||||
index,
|
index,
|
||||||
@ -783,10 +738,9 @@ async fn pull_single_archive<'a>(
|
|||||||
verify_archive(archive_info, &csum, size)?;
|
verify_archive(archive_info, &csum, size)?;
|
||||||
|
|
||||||
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
||||||
task_log!(worker, "skipping chunk sync for same datastore");
|
info!("skipping chunk sync for same datastore");
|
||||||
} else {
|
} else {
|
||||||
let stats = pull_index_chunks(
|
let stats = pull_index_chunks(
|
||||||
worker,
|
|
||||||
reader.chunk_reader(archive_info.crypt_mode),
|
reader.chunk_reader(archive_info.crypt_mode),
|
||||||
snapshot.datastore().clone(),
|
snapshot.datastore().clone(),
|
||||||
index,
|
index,
|
||||||
@ -818,7 +772,6 @@ async fn pull_single_archive<'a>(
|
|||||||
/// -- if not, pull it from the remote
|
/// -- if not, pull it from the remote
|
||||||
/// - Download log if not already existing
|
/// - Download log if not already existing
|
||||||
async fn pull_snapshot<'a>(
|
async fn pull_snapshot<'a>(
|
||||||
worker: &'a WorkerTask,
|
|
||||||
reader: Arc<dyn PullReader + 'a>,
|
reader: Arc<dyn PullReader + 'a>,
|
||||||
snapshot: &'a pbs_datastore::BackupDir,
|
snapshot: &'a pbs_datastore::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
@ -834,7 +787,7 @@ async fn pull_snapshot<'a>(
|
|||||||
tmp_manifest_name.set_extension("tmp");
|
tmp_manifest_name.set_extension("tmp");
|
||||||
let tmp_manifest_blob;
|
let tmp_manifest_blob;
|
||||||
if let Some(data) = reader
|
if let Some(data) = reader
|
||||||
.load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name, worker)
|
.load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
tmp_manifest_blob = data;
|
tmp_manifest_blob = data;
|
||||||
@ -857,11 +810,9 @@ async fn pull_snapshot<'a>(
|
|||||||
|
|
||||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||||
if !client_log_name.exists() {
|
if !client_log_name.exists() {
|
||||||
reader
|
reader.try_download_client_log(&client_log_name).await?;
|
||||||
.try_download_client_log(&client_log_name, worker)
|
|
||||||
.await?;
|
|
||||||
};
|
};
|
||||||
task_log!(worker, "no data changes");
|
info!("no data changes");
|
||||||
let _ = std::fs::remove_file(&tmp_manifest_name);
|
let _ = std::fs::remove_file(&tmp_manifest_name);
|
||||||
return Ok(pull_stats); // nothing changed
|
return Ok(pull_stats); // nothing changed
|
||||||
}
|
}
|
||||||
@ -881,7 +832,7 @@ async fn pull_snapshot<'a>(
|
|||||||
match manifest.verify_file(&item.filename, &csum, size) {
|
match manifest.verify_file(&item.filename, &csum, size) {
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
info!("detected changed file {path:?} - {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -891,7 +842,7 @@ async fn pull_snapshot<'a>(
|
|||||||
match manifest.verify_file(&item.filename, &csum, size) {
|
match manifest.verify_file(&item.filename, &csum, size) {
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
info!("detected changed file {path:?} - {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -901,21 +852,15 @@ async fn pull_snapshot<'a>(
|
|||||||
match manifest.verify_file(&item.filename, &csum, size) {
|
match manifest.verify_file(&item.filename, &csum, size) {
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
info!("detected changed file {path:?} - {err}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let stats = pull_single_archive(
|
let stats =
|
||||||
worker,
|
pull_single_archive(reader.clone(), snapshot, item, downloaded_chunks.clone()).await?;
|
||||||
reader.clone(),
|
|
||||||
snapshot,
|
|
||||||
item,
|
|
||||||
downloaded_chunks.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
pull_stats.add(stats);
|
pull_stats.add(stats);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -924,9 +869,7 @@ async fn pull_snapshot<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !client_log_name.exists() {
|
if !client_log_name.exists() {
|
||||||
reader
|
reader.try_download_client_log(&client_log_name).await?;
|
||||||
.try_download_client_log(&client_log_name, worker)
|
|
||||||
.await?;
|
|
||||||
};
|
};
|
||||||
snapshot
|
snapshot
|
||||||
.cleanup_unreferenced_files(&manifest)
|
.cleanup_unreferenced_files(&manifest)
|
||||||
@ -940,7 +883,6 @@ async fn pull_snapshot<'a>(
|
|||||||
/// The `reader` is configured to read from the source backup directory, while the
|
/// The `reader` is configured to read from the source backup directory, while the
|
||||||
/// `snapshot` is pointing to the local datastore and target namespace.
|
/// `snapshot` is pointing to the local datastore and target namespace.
|
||||||
async fn pull_snapshot_from<'a>(
|
async fn pull_snapshot_from<'a>(
|
||||||
worker: &'a WorkerTask,
|
|
||||||
reader: Arc<dyn PullReader + 'a>,
|
reader: Arc<dyn PullReader + 'a>,
|
||||||
snapshot: &'a pbs_datastore::BackupDir,
|
snapshot: &'a pbs_datastore::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
@ -950,27 +892,27 @@ async fn pull_snapshot_from<'a>(
|
|||||||
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
||||||
|
|
||||||
let pull_stats = if is_new {
|
let pull_stats = if is_new {
|
||||||
task_log!(worker, "sync snapshot {}", snapshot.dir());
|
info!("sync snapshot {}", snapshot.dir());
|
||||||
|
|
||||||
match pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
|
match pull_snapshot(reader, snapshot, downloaded_chunks).await {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
||||||
snapshot.backup_ns(),
|
snapshot.backup_ns(),
|
||||||
snapshot.as_ref(),
|
snapshot.as_ref(),
|
||||||
true,
|
true,
|
||||||
) {
|
) {
|
||||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
info!("cleanup error - {cleanup_err}");
|
||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
Ok(pull_stats) => {
|
Ok(pull_stats) => {
|
||||||
task_log!(worker, "sync snapshot {} done", snapshot.dir());
|
info!("sync snapshot {} done", snapshot.dir());
|
||||||
pull_stats
|
pull_stats
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "re-sync snapshot {}", snapshot.dir());
|
info!("re-sync snapshot {}", snapshot.dir());
|
||||||
pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?
|
pull_snapshot(reader, snapshot, downloaded_chunks).await?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(pull_stats)
|
Ok(pull_stats)
|
||||||
@ -1073,7 +1015,6 @@ impl std::fmt::Display for SkipInfo {
|
|||||||
/// - remote snapshot access is checked by remote (twice: query and opening the backup reader)
|
/// - remote snapshot access is checked by remote (twice: query and opening the backup reader)
|
||||||
/// - local group owner is already checked by pull_store
|
/// - local group owner is already checked by pull_store
|
||||||
async fn pull_group(
|
async fn pull_group(
|
||||||
worker: &WorkerTask,
|
|
||||||
params: &PullParameters,
|
params: &PullParameters,
|
||||||
source_namespace: &BackupNamespace,
|
source_namespace: &BackupNamespace,
|
||||||
group: &BackupGroup,
|
group: &BackupGroup,
|
||||||
@ -1084,7 +1025,7 @@ async fn pull_group(
|
|||||||
|
|
||||||
let mut raw_list: Vec<BackupDir> = params
|
let mut raw_list: Vec<BackupDir> = params
|
||||||
.source
|
.source
|
||||||
.list_backup_dirs(source_namespace, group, worker)
|
.list_backup_dirs(source_namespace, group)
|
||||||
.await?;
|
.await?;
|
||||||
raw_list.sort_unstable_by(|a, b| a.time.cmp(&b.time));
|
raw_list.sort_unstable_by(|a, b| a.time.cmp(&b.time));
|
||||||
|
|
||||||
@ -1113,7 +1054,7 @@ async fn pull_group(
|
|||||||
already_synced_skip_info.update(dir.time);
|
already_synced_skip_info.update(dir.time);
|
||||||
return false;
|
return false;
|
||||||
} else if already_synced_skip_info.count > 0 {
|
} else if already_synced_skip_info.count > 0 {
|
||||||
task_log!(worker, "{}", already_synced_skip_info);
|
info!("{already_synced_skip_info}");
|
||||||
already_synced_skip_info.reset();
|
already_synced_skip_info.reset();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1122,7 +1063,7 @@ async fn pull_group(
|
|||||||
transfer_last_skip_info.update(dir.time);
|
transfer_last_skip_info.update(dir.time);
|
||||||
return false;
|
return false;
|
||||||
} else if transfer_last_skip_info.count > 0 {
|
} else if transfer_last_skip_info.count > 0 {
|
||||||
task_log!(worker, "{}", transfer_last_skip_info);
|
info!("{transfer_last_skip_info}");
|
||||||
transfer_last_skip_info.reset();
|
transfer_last_skip_info.reset();
|
||||||
}
|
}
|
||||||
true
|
true
|
||||||
@ -1147,11 +1088,10 @@ async fn pull_group(
|
|||||||
.source
|
.source
|
||||||
.reader(source_namespace, &from_snapshot)
|
.reader(source_namespace, &from_snapshot)
|
||||||
.await?;
|
.await?;
|
||||||
let result =
|
let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await;
|
||||||
pull_snapshot_from(worker, reader, &to_snapshot, downloaded_chunks.clone()).await;
|
|
||||||
|
|
||||||
progress.done_snapshots = pos as u64 + 1;
|
progress.done_snapshots = pos as u64 + 1;
|
||||||
task_log!(worker, "percentage done: {}", progress);
|
info!("percentage done: {progress}");
|
||||||
|
|
||||||
let stats = result?; // stop on error
|
let stats = result?; // stop on error
|
||||||
pull_stats.add(stats);
|
pull_stats.add(stats);
|
||||||
@ -1169,14 +1109,13 @@ async fn pull_group(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if snapshot.is_protected() {
|
if snapshot.is_protected() {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"don't delete vanished snapshot {} (protected)",
|
"don't delete vanished snapshot {} (protected)",
|
||||||
snapshot.dir()
|
snapshot.dir()
|
||||||
);
|
);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "delete vanished snapshot {}", snapshot.dir());
|
info!("delete vanished snapshot {}", snapshot.dir());
|
||||||
params
|
params
|
||||||
.target
|
.target
|
||||||
.store
|
.store
|
||||||
@ -1235,7 +1174,6 @@ fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> R
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn check_and_remove_vanished_ns(
|
fn check_and_remove_vanished_ns(
|
||||||
worker: &WorkerTask,
|
|
||||||
params: &PullParameters,
|
params: &PullParameters,
|
||||||
synced_ns: HashSet<BackupNamespace>,
|
synced_ns: HashSet<BackupNamespace>,
|
||||||
) -> Result<(bool, RemovedVanishedStats), Error> {
|
) -> Result<(bool, RemovedVanishedStats), Error> {
|
||||||
@ -1276,16 +1214,12 @@ fn check_and_remove_vanished_ns(
|
|||||||
}
|
}
|
||||||
match check_and_remove_ns(params, &local_ns) {
|
match check_and_remove_ns(params, &local_ns) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
task_log!(worker, "Removed namespace {local_ns}");
|
info!("Removed namespace {local_ns}");
|
||||||
removed_stats.namespaces += 1;
|
removed_stats.namespaces += 1;
|
||||||
}
|
}
|
||||||
Ok(false) => task_log!(
|
Ok(false) => info!("Did not remove namespace {local_ns} - protected snapshots remain"),
|
||||||
worker,
|
|
||||||
"Did not remove namespace {} - protected snapshots remain",
|
|
||||||
local_ns
|
|
||||||
),
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "Failed to remove namespace {} - {}", local_ns, err);
|
info!("Failed to remove namespace {local_ns} - {err}");
|
||||||
errors = true;
|
errors = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1311,10 +1245,7 @@ fn check_and_remove_vanished_ns(
|
|||||||
/// - remote namespaces are filtered by remote
|
/// - remote namespaces are filtered by remote
|
||||||
/// - creation and removal of sub-NS checked here
|
/// - creation and removal of sub-NS checked here
|
||||||
/// - access to sub-NS checked here
|
/// - access to sub-NS checked here
|
||||||
pub(crate) async fn pull_store(
|
pub(crate) async fn pull_store(mut params: PullParameters) -> Result<PullStats, Error> {
|
||||||
worker: &WorkerTask,
|
|
||||||
mut params: PullParameters,
|
|
||||||
) -> Result<PullStats, Error> {
|
|
||||||
// explicit create shared lock to prevent GC on newly created chunks
|
// explicit create shared lock to prevent GC on newly created chunks
|
||||||
let _shared_store_lock = params.target.store.try_shared_chunk_store_lock()?;
|
let _shared_store_lock = params.target.store.try_shared_chunk_store_lock()?;
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
@ -1323,10 +1254,7 @@ pub(crate) async fn pull_store(
|
|||||||
let mut namespaces = if params.source.get_ns().is_root() && old_max_depth == Some(0) {
|
let mut namespaces = if params.source.get_ns().is_root() && old_max_depth == Some(0) {
|
||||||
vec![params.source.get_ns()] // backwards compat - don't query remote namespaces!
|
vec![params.source.get_ns()] // backwards compat - don't query remote namespaces!
|
||||||
} else {
|
} else {
|
||||||
params
|
params.source.list_namespaces(&mut params.max_depth).await?
|
||||||
.source
|
|
||||||
.list_namespaces(&mut params.max_depth, worker)
|
|
||||||
.await?
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let ns_layers_to_be_pulled = namespaces
|
let ns_layers_to_be_pulled = namespaces
|
||||||
@ -1358,33 +1286,22 @@ pub(crate) async fn pull_store(
|
|||||||
let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
|
let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
|
||||||
let target_store_ns_str = print_store_and_ns(params.target.store.name(), &target_ns);
|
let target_store_ns_str = print_store_and_ns(params.target.store.name(), &target_ns);
|
||||||
|
|
||||||
task_log!(worker, "----");
|
info!("----");
|
||||||
task_log!(
|
info!("Syncing {source_store_ns_str} into {target_store_ns_str}");
|
||||||
worker,
|
|
||||||
"Syncing {} into {}",
|
|
||||||
source_store_ns_str,
|
|
||||||
target_store_ns_str
|
|
||||||
);
|
|
||||||
|
|
||||||
synced_ns.insert(target_ns.clone());
|
synced_ns.insert(target_ns.clone());
|
||||||
|
|
||||||
match check_and_create_ns(¶ms, &target_ns) {
|
match check_and_create_ns(¶ms, &target_ns) {
|
||||||
Ok(true) => task_log!(worker, "Created namespace {}", target_ns),
|
Ok(true) => info!("Created namespace {target_ns}"),
|
||||||
Ok(false) => {}
|
Ok(false) => {}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
info!("Cannot sync {source_store_ns_str} into {target_store_ns_str} - {err}");
|
||||||
worker,
|
|
||||||
"Cannot sync {} into {} - {}",
|
|
||||||
source_store_ns_str,
|
|
||||||
target_store_ns_str,
|
|
||||||
err,
|
|
||||||
);
|
|
||||||
errors = true;
|
errors = true;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match pull_ns(worker, &namespace, &mut params).await {
|
match pull_ns(&namespace, &mut params).await {
|
||||||
Ok((ns_progress, ns_pull_stats, ns_errors)) => {
|
Ok((ns_progress, ns_pull_stats, ns_errors)) => {
|
||||||
errors |= ns_errors;
|
errors |= ns_errors;
|
||||||
|
|
||||||
@ -1393,29 +1310,23 @@ pub(crate) async fn pull_store(
|
|||||||
if params.max_depth != Some(0) {
|
if params.max_depth != Some(0) {
|
||||||
groups += ns_progress.done_groups;
|
groups += ns_progress.done_groups;
|
||||||
snapshots += ns_progress.done_snapshots;
|
snapshots += ns_progress.done_snapshots;
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"Finished syncing namespace {namespace}, current progress: {groups} groups, {snapshots} snapshots"
|
||||||
"Finished syncing namespace {}, current progress: {} groups, {} snapshots",
|
|
||||||
namespace,
|
|
||||||
groups,
|
|
||||||
snapshots,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
errors = true;
|
errors = true;
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"Encountered errors while syncing namespace {} - {err}",
|
||||||
"Encountered errors while syncing namespace {} - {}",
|
|
||||||
&namespace,
|
&namespace,
|
||||||
err,
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if params.remove_vanished {
|
if params.remove_vanished {
|
||||||
let (has_errors, stats) = check_and_remove_vanished_ns(worker, ¶ms, synced_ns)?;
|
let (has_errors, stats) = check_and_remove_vanished_ns(¶ms, synced_ns)?;
|
||||||
errors |= has_errors;
|
errors |= has_errors;
|
||||||
pull_stats.add(PullStats::from(stats));
|
pull_stats.add(PullStats::from(stats));
|
||||||
}
|
}
|
||||||
@ -1440,7 +1351,6 @@ pub(crate) async fn pull_store(
|
|||||||
/// - remote namespaces are filtered by remote
|
/// - remote namespaces are filtered by remote
|
||||||
/// - owner check for vanished groups done here
|
/// - owner check for vanished groups done here
|
||||||
pub(crate) async fn pull_ns(
|
pub(crate) async fn pull_ns(
|
||||||
worker: &WorkerTask,
|
|
||||||
namespace: &BackupNamespace,
|
namespace: &BackupNamespace,
|
||||||
params: &mut PullParameters,
|
params: &mut PullParameters,
|
||||||
) -> Result<(StoreProgress, PullStats, bool), Error> {
|
) -> Result<(StoreProgress, PullStats, bool), Error> {
|
||||||
@ -1460,11 +1370,9 @@ pub(crate) async fn pull_ns(
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|group| group.apply_filters(¶ms.group_filter))
|
.filter(|group| group.apply_filters(¶ms.group_filter))
|
||||||
.collect();
|
.collect();
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"found {} groups to sync (out of {unfiltered_count} total)",
|
||||||
"found {} groups to sync (out of {} total)",
|
list.len()
|
||||||
list.len(),
|
|
||||||
unfiltered_count
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
@ -1492,15 +1400,10 @@ pub(crate) async fn pull_ns(
|
|||||||
{
|
{
|
||||||
Ok(result) => result,
|
Ok(result) => result,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(
|
info!("sync group {} failed - group lock failed: {err}", &group);
|
||||||
worker,
|
|
||||||
"sync group {} failed - group lock failed: {}",
|
|
||||||
&group,
|
|
||||||
err
|
|
||||||
);
|
|
||||||
errors = true;
|
errors = true;
|
||||||
// do not stop here, instead continue
|
// do not stop here, instead continue
|
||||||
task_log!(worker, "create_locked_backup_group failed");
|
info!("create_locked_backup_group failed");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -1508,19 +1411,16 @@ pub(crate) async fn pull_ns(
|
|||||||
// permission check
|
// permission check
|
||||||
if params.owner != owner {
|
if params.owner != owner {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"sync group {} failed - owner check failed ({} != {owner})",
|
||||||
"sync group {} failed - owner check failed ({} != {})",
|
&group, params.owner
|
||||||
&group,
|
|
||||||
params.owner,
|
|
||||||
owner
|
|
||||||
);
|
);
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
} else {
|
} else {
|
||||||
match pull_group(worker, params, namespace, &group, &mut progress).await {
|
match pull_group(params, namespace, &group, &mut progress).await {
|
||||||
Ok(stats) => pull_stats.add(stats),
|
Ok(stats) => pull_stats.add(stats),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "sync group {} failed - {}", &group, err,);
|
info!("sync group {} failed - {err}", &group);
|
||||||
errors = true; // do not stop here, instead continue
|
errors = true; // do not stop here, instead continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1542,7 +1442,7 @@ pub(crate) async fn pull_ns(
|
|||||||
if !local_group.apply_filters(¶ms.group_filter) {
|
if !local_group.apply_filters(¶ms.group_filter) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
task_log!(worker, "delete vanished group '{local_group}'",);
|
info!("delete vanished group '{local_group}'");
|
||||||
let delete_stats_result = params
|
let delete_stats_result = params
|
||||||
.target
|
.target
|
||||||
.store
|
.store
|
||||||
@ -1551,10 +1451,7 @@ pub(crate) async fn pull_ns(
|
|||||||
match delete_stats_result {
|
match delete_stats_result {
|
||||||
Ok(stats) => {
|
Ok(stats) => {
|
||||||
if !stats.all_removed() {
|
if !stats.all_removed() {
|
||||||
task_log!(
|
info!("kept some protected snapshots of group '{local_group}'");
|
||||||
worker,
|
|
||||||
"kept some protected snapshots of group '{local_group}'",
|
|
||||||
);
|
|
||||||
pull_stats.add(PullStats::from(RemovedVanishedStats {
|
pull_stats.add(PullStats::from(RemovedVanishedStats {
|
||||||
snapshots: stats.removed_snapshots(),
|
snapshots: stats.removed_snapshots(),
|
||||||
groups: 0,
|
groups: 0,
|
||||||
@ -1569,7 +1466,7 @@ pub(crate) async fn pull_ns(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_log!(worker, "{}", err);
|
info!("{err}");
|
||||||
errors = true;
|
errors = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1577,7 +1474,7 @@ pub(crate) async fn pull_ns(
|
|||||||
Ok(())
|
Ok(())
|
||||||
});
|
});
|
||||||
if let Err(err) = result {
|
if let Err(err) = result {
|
||||||
task_log!(worker, "error during cleanup: {}", err);
|
info!("error during cleanup: {err}");
|
||||||
errors = true;
|
errors = true;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
use anyhow::{bail, format_err, Context, Error};
|
use anyhow::{bail, format_err, Context, Error};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use pbs_config::{acl::AclTree, token_shadow, BackupLockGuard};
|
use pbs_config::{acl::AclTree, token_shadow, BackupLockGuard};
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_ldap::{Config, Connection, SearchParameters, SearchResult};
|
use proxmox_ldap::{Config, Connection, SearchParameters, SearchResult};
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use proxmox_schema::{ApiType, Schema};
|
use proxmox_schema::{ApiType, Schema};
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
|
|
||||||
use std::{collections::HashSet, sync::Arc};
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
AdRealmConfig, ApiToken, Authid, LdapRealmConfig, Realm, RealmType, RemoveVanished,
|
AdRealmConfig, ApiToken, Authid, LdapRealmConfig, Realm, RealmType, RemoveVanished,
|
||||||
@ -39,7 +40,7 @@ pub fn do_realm_sync_job(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string()).unwrap();
|
job.start(&worker.upid().to_string()).unwrap();
|
||||||
|
|
||||||
task_log!(worker, "starting realm sync for {}", realm.as_str());
|
info!("starting realm sync for {}", realm.as_str());
|
||||||
|
|
||||||
let override_settings = GeneralSyncSettingsOverride {
|
let override_settings = GeneralSyncSettingsOverride {
|
||||||
remove_vanished,
|
remove_vanished,
|
||||||
@ -49,12 +50,12 @@ pub fn do_realm_sync_job(
|
|||||||
async move {
|
async move {
|
||||||
match realm_type {
|
match realm_type {
|
||||||
RealmType::Ldap => {
|
RealmType::Ldap => {
|
||||||
LdapRealmSyncJob::new(worker, realm, &override_settings, dry_run)?
|
LdapRealmSyncJob::new(realm, &override_settings, dry_run)?
|
||||||
.sync()
|
.sync()
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
RealmType::Ad => {
|
RealmType::Ad => {
|
||||||
AdRealmSyncJob::new(worker, realm, &override_settings, dry_run)?
|
AdRealmSyncJob::new(realm, &override_settings, dry_run)?
|
||||||
.sync()
|
.sync()
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
@ -73,7 +74,6 @@ struct AdRealmSyncJob(LdapRealmSyncJob);
|
|||||||
|
|
||||||
impl AdRealmSyncJob {
|
impl AdRealmSyncJob {
|
||||||
fn new(
|
fn new(
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
realm: Realm,
|
realm: Realm,
|
||||||
override_settings: &GeneralSyncSettingsOverride,
|
override_settings: &GeneralSyncSettingsOverride,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
@ -98,7 +98,6 @@ impl AdRealmSyncJob {
|
|||||||
let ldap_config = auth::AdAuthenticator::api_type_to_config(&config)?;
|
let ldap_config = auth::AdAuthenticator::api_type_to_config(&config)?;
|
||||||
|
|
||||||
Ok(Self(LdapRealmSyncJob {
|
Ok(Self(LdapRealmSyncJob {
|
||||||
worker,
|
|
||||||
realm,
|
realm,
|
||||||
general_sync_settings: sync_settings,
|
general_sync_settings: sync_settings,
|
||||||
ldap_sync_settings: sync_attributes,
|
ldap_sync_settings: sync_attributes,
|
||||||
@ -114,7 +113,6 @@ impl AdRealmSyncJob {
|
|||||||
|
|
||||||
/// Implementation for syncing LDAP realms
|
/// Implementation for syncing LDAP realms
|
||||||
struct LdapRealmSyncJob {
|
struct LdapRealmSyncJob {
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
realm: Realm,
|
realm: Realm,
|
||||||
general_sync_settings: GeneralSyncSettings,
|
general_sync_settings: GeneralSyncSettings,
|
||||||
ldap_sync_settings: LdapSyncSettings,
|
ldap_sync_settings: LdapSyncSettings,
|
||||||
@ -125,7 +123,6 @@ struct LdapRealmSyncJob {
|
|||||||
impl LdapRealmSyncJob {
|
impl LdapRealmSyncJob {
|
||||||
/// Create new LdapRealmSyncJob
|
/// Create new LdapRealmSyncJob
|
||||||
fn new(
|
fn new(
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
realm: Realm,
|
realm: Realm,
|
||||||
override_settings: &GeneralSyncSettingsOverride,
|
override_settings: &GeneralSyncSettingsOverride,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
@ -150,7 +147,6 @@ impl LdapRealmSyncJob {
|
|||||||
let ldap_config = auth::LdapAuthenticator::api_type_to_config(&config)?;
|
let ldap_config = auth::LdapAuthenticator::api_type_to_config(&config)?;
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
worker,
|
|
||||||
realm,
|
realm,
|
||||||
general_sync_settings: sync_settings,
|
general_sync_settings: sync_settings,
|
||||||
ldap_sync_settings: sync_attributes,
|
ldap_sync_settings: sync_attributes,
|
||||||
@ -162,10 +158,7 @@ impl LdapRealmSyncJob {
|
|||||||
/// Perform realm synchronization
|
/// Perform realm synchronization
|
||||||
async fn sync(&self) -> Result<(), Error> {
|
async fn sync(&self) -> Result<(), Error> {
|
||||||
if self.dry_run {
|
if self.dry_run {
|
||||||
task_log!(
|
info!("this is a DRY RUN - changes will not be persisted");
|
||||||
self.worker,
|
|
||||||
"this is a DRY RUN - changes will not be persisted"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let ldap = Connection::new(self.ldap_config.clone());
|
let ldap = Connection::new(self.ldap_config.clone());
|
||||||
@ -247,7 +240,7 @@ impl LdapRealmSyncJob {
|
|||||||
anyhow::Ok(())
|
anyhow::Ok(())
|
||||||
});
|
});
|
||||||
if let Err(e) = result {
|
if let Err(e) = result {
|
||||||
task_log!(self.worker, "could not create/update user: {e}");
|
info!("could not create/update user: {e}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,18 +259,10 @@ impl LdapRealmSyncJob {
|
|||||||
|
|
||||||
if let Some(existing_user) = existing_user {
|
if let Some(existing_user) = existing_user {
|
||||||
if existing_user != new_or_updated_user {
|
if existing_user != new_or_updated_user {
|
||||||
task_log!(
|
info!("updating user {}", new_or_updated_user.userid.as_str());
|
||||||
self.worker,
|
|
||||||
"updating user {}",
|
|
||||||
new_or_updated_user.userid.as_str()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
task_log!(
|
info!("creating user {}", new_or_updated_user.userid.as_str());
|
||||||
self.worker,
|
|
||||||
"creating user {}",
|
|
||||||
new_or_updated_user.userid.as_str()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
user_config.set_data(
|
user_config.set_data(
|
||||||
@ -299,10 +284,7 @@ impl LdapRealmSyncJob {
|
|||||||
let schema = schema.unwrap_string_schema();
|
let schema = schema.unwrap_string_schema();
|
||||||
|
|
||||||
if let Err(e) = schema.check_constraints(value) {
|
if let Err(e) = schema.check_constraints(value) {
|
||||||
task_warn!(
|
warn!("{userid}: ignoring attribute `{attribute}`: {e}");
|
||||||
self.worker,
|
|
||||||
"{userid}: ignoring attribute `{attribute}`: {e}"
|
|
||||||
);
|
|
||||||
|
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@ -381,7 +363,7 @@ impl LdapRealmSyncJob {
|
|||||||
to_delete: &[Userid],
|
to_delete: &[Userid],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
for userid in to_delete {
|
for userid in to_delete {
|
||||||
task_log!(self.worker, "deleting user {}", userid.as_str());
|
info!("deleting user {}", userid.as_str());
|
||||||
|
|
||||||
// Delete the user
|
// Delete the user
|
||||||
user_config.sections.remove(userid.as_str());
|
user_config.sections.remove(userid.as_str());
|
||||||
@ -408,7 +390,7 @@ impl LdapRealmSyncJob {
|
|||||||
|
|
||||||
if !self.dry_run {
|
if !self.dry_run {
|
||||||
if let Err(e) = token_shadow::delete_secret(&tokenid) {
|
if let Err(e) = token_shadow::delete_secret(&tokenid) {
|
||||||
task_warn!(self.worker, "could not delete token for user {userid}: {e}",)
|
warn!("could not delete token for user {userid}: {e}",)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
use anyhow::{format_err, Error};
|
use anyhow::{format_err, Error};
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, Operation, VerificationJobConfig};
|
use pbs_api_types::{Authid, Operation, VerificationJobConfig};
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
use proxmox_sys::task_log;
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
backup::{verify_all_backups, verify_filter},
|
backup::{verify_all_backups, verify_filter},
|
||||||
@ -34,9 +34,9 @@ pub fn do_verification_job(
|
|||||||
move |worker| {
|
move |worker| {
|
||||||
job.start(&worker.upid().to_string())?;
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
task_log!(worker, "Starting datastore verify job '{}'", job_id);
|
info!("Starting datastore verify job '{job_id}'");
|
||||||
if let Some(event_str) = schedule {
|
if let Some(event_str) = schedule {
|
||||||
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
info!("task triggered by schedule '{event_str}'");
|
||||||
}
|
}
|
||||||
|
|
||||||
let ns = match verification_job.ns {
|
let ns = match verification_job.ns {
|
||||||
@ -58,9 +58,9 @@ pub fn do_verification_job(
|
|||||||
let job_result = match result {
|
let job_result = match result {
|
||||||
Ok(ref failed_dirs) if failed_dirs.is_empty() => Ok(()),
|
Ok(ref failed_dirs) if failed_dirs.is_empty() => Ok(()),
|
||||||
Ok(ref failed_dirs) => {
|
Ok(ref failed_dirs) => {
|
||||||
task_log!(worker, "Failed to verify the following snapshots/groups:");
|
error!("Failed to verify the following snapshots/groups:");
|
||||||
for dir in failed_dirs {
|
for dir in failed_dirs {
|
||||||
task_log!(worker, "\t{}", dir);
|
error!("\t{dir}");
|
||||||
}
|
}
|
||||||
|
|
||||||
Err(format_err!(
|
Err(format_err!(
|
||||||
|
@ -12,6 +12,7 @@ use nix::fcntl::OFlag;
|
|||||||
use nix::sys::stat::Mode;
|
use nix::sys::stat::Mode;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::Value;
|
use serde_json::Value;
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
use proxmox_sys::fs::{
|
use proxmox_sys::fs::{
|
||||||
atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
|
atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
|
||||||
@ -19,7 +20,7 @@ use proxmox_sys::fs::{
|
|||||||
|
|
||||||
use proxmox_io::ReadExt;
|
use proxmox_io::ReadExt;
|
||||||
use proxmox_section_config::SectionConfigData;
|
use proxmox_section_config::SectionConfigData;
|
||||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
use proxmox_sys::WorkerTaskContext;
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
|
||||||
use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive};
|
use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive};
|
||||||
@ -381,11 +382,9 @@ pub fn request_and_load_media(
|
|||||||
) -> Result<(Box<dyn TapeDriver>, MediaId), Error> {
|
) -> Result<(Box<dyn TapeDriver>, MediaId), Error> {
|
||||||
let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| {
|
let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| {
|
||||||
if let Ok((Some(media_id), _)) = handle.read_label() {
|
if let Ok((Some(media_id), _)) = handle.read_label() {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"found media label {} ({})",
|
"found media label {} ({})",
|
||||||
media_id.label.label_text,
|
media_id.label.label_text, media_id.label.uuid,
|
||||||
media_id.label.uuid,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
if media_id.label.uuid == *uuid {
|
if media_id.label.uuid == *uuid {
|
||||||
@ -423,19 +422,15 @@ pub fn request_and_load_media(
|
|||||||
let update_and_log_request_error =
|
let update_and_log_request_error =
|
||||||
|old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> {
|
|old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> {
|
||||||
if new != *old {
|
if new != *old {
|
||||||
task_log!(worker, "{}", new);
|
info!("{new}");
|
||||||
let (device_type, device) = if let Some(changer) = changer {
|
let (device_type, device) = if let Some(changer) = changer {
|
||||||
("changer", changer.as_str())
|
("changer", changer.as_str())
|
||||||
} else {
|
} else {
|
||||||
("drive", drive)
|
("drive", drive)
|
||||||
};
|
};
|
||||||
|
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
"Please insert media '{label_text}' into {device_type} '{device}'"
|
||||||
"Please insert media '{}' into {} '{}'",
|
|
||||||
label_text,
|
|
||||||
device_type,
|
|
||||||
device
|
|
||||||
);
|
);
|
||||||
send_load_media_notification(
|
send_load_media_notification(
|
||||||
notification_mode,
|
notification_mode,
|
||||||
@ -460,19 +455,9 @@ pub fn request_and_load_media(
|
|||||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
} else if drive_config.changer.is_none() {
|
} else if drive_config.changer.is_none() {
|
||||||
task_log!(
|
info!("Checking for media '{label_text}' in drive '{drive}'");
|
||||||
worker,
|
|
||||||
"Checking for media '{}' in drive '{}'",
|
|
||||||
label_text,
|
|
||||||
drive
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
task_log!(
|
info!("trying to load media '{label_text}' into drive '{drive}'");
|
||||||
worker,
|
|
||||||
"trying to load media '{}' into drive '{}'",
|
|
||||||
label_text,
|
|
||||||
drive
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if drive_config.changer.is_some() {
|
if drive_config.changer.is_some() {
|
||||||
@ -499,8 +484,7 @@ pub fn request_and_load_media(
|
|||||||
|
|
||||||
let request_error = match handle.read_label() {
|
let request_error = match handle.read_label() {
|
||||||
Ok((Some(media_id), _)) if media_id.label.uuid == label.uuid => {
|
Ok((Some(media_id), _)) if media_id.label.uuid == label.uuid => {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"found media label {} ({})",
|
"found media label {} ({})",
|
||||||
media_id.label.label_text,
|
media_id.label.label_text,
|
||||||
media_id.label.uuid.to_string(),
|
media_id.label.uuid.to_string(),
|
||||||
|
@ -11,8 +11,8 @@ use std::sync::{Arc, Mutex};
|
|||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
use proxmox_sys::{task_log, task_warn};
|
|
||||||
use proxmox_uuid::Uuid;
|
use proxmox_uuid::Uuid;
|
||||||
|
|
||||||
use pbs_datastore::{DataStore, SnapshotReader};
|
use pbs_datastore::{DataStore, SnapshotReader};
|
||||||
@ -62,7 +62,6 @@ impl PoolWriter {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
mut pool: MediaPool,
|
mut pool: MediaPool,
|
||||||
drive_name: &str,
|
drive_name: &str,
|
||||||
worker: &WorkerTask,
|
|
||||||
notification_mode: TapeNotificationMode,
|
notification_mode: TapeNotificationMode,
|
||||||
force_media_set: bool,
|
force_media_set: bool,
|
||||||
ns_magic: bool,
|
ns_magic: bool,
|
||||||
@ -71,11 +70,11 @@ impl PoolWriter {
|
|||||||
|
|
||||||
let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
|
let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
|
||||||
if let Some(reason) = new_media_set_reason {
|
if let Some(reason) = new_media_set_reason {
|
||||||
task_log!(worker, "starting new media set - reason: {}", reason,);
|
info!("starting new media set - reason: {reason}");
|
||||||
}
|
}
|
||||||
|
|
||||||
let media_set_uuid = pool.current_media_set().uuid();
|
let media_set_uuid = pool.current_media_set().uuid();
|
||||||
task_log!(worker, "media set uuid: {}", media_set_uuid);
|
info!("media set uuid: {media_set_uuid}");
|
||||||
|
|
||||||
let mut catalog_set = CatalogSet::new();
|
let mut catalog_set = CatalogSet::new();
|
||||||
|
|
||||||
@ -130,7 +129,7 @@ impl PoolWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Eject media and drop PoolWriterState (close drive)
|
/// Eject media and drop PoolWriterState (close drive)
|
||||||
pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
pub fn eject_media(&mut self) -> Result<(), Error> {
|
||||||
let mut status = match self.status.take() {
|
let mut status = match self.status.take() {
|
||||||
Some(status) => status,
|
Some(status) => status,
|
||||||
None => return Ok(()), // no media loaded
|
None => return Ok(()), // no media loaded
|
||||||
@ -139,13 +138,13 @@ impl PoolWriter {
|
|||||||
let (drive_config, _digest) = pbs_config::drive::config()?;
|
let (drive_config, _digest) = pbs_config::drive::config()?;
|
||||||
|
|
||||||
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
||||||
task_log!(worker, "eject media");
|
info!("eject media");
|
||||||
status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
|
status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
|
||||||
drop(status); // close drive
|
drop(status); // close drive
|
||||||
task_log!(worker, "unload media");
|
info!("unload media");
|
||||||
changer.unload_media(None)?; //eject and unload
|
changer.unload_media(None)?; //eject and unload
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "standalone drive - ejecting media");
|
info!("standalone drive - ejecting media");
|
||||||
status.drive.eject_media()?;
|
status.drive.eject_media()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -153,14 +152,14 @@ impl PoolWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Export current media set and drop PoolWriterState (close drive)
|
/// Export current media set and drop PoolWriterState (close drive)
|
||||||
pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
pub fn export_media_set(&mut self) -> Result<(), Error> {
|
||||||
let mut status = self.status.take();
|
let mut status = self.status.take();
|
||||||
|
|
||||||
let (drive_config, _digest) = pbs_config::drive::config()?;
|
let (drive_config, _digest) = pbs_config::drive::config()?;
|
||||||
|
|
||||||
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
||||||
if let Some(ref mut status) = status {
|
if let Some(ref mut status) = status {
|
||||||
task_log!(worker, "rewind media");
|
info!("rewind media");
|
||||||
// rewind first so that the unload command later does not run into a timeout
|
// rewind first so that the unload command later does not run into a timeout
|
||||||
status.drive.rewind()?;
|
status.drive.rewind()?;
|
||||||
}
|
}
|
||||||
@ -170,25 +169,15 @@ impl PoolWriter {
|
|||||||
let media = self.pool.lookup_media(media_uuid)?;
|
let media = self.pool.lookup_media(media_uuid)?;
|
||||||
let label_text = media.label_text();
|
let label_text = media.label_text();
|
||||||
if let Some(slot) = changer.export_media(label_text)? {
|
if let Some(slot) = changer.export_media(label_text)? {
|
||||||
task_log!(
|
info!("exported media '{label_text}' to import/export slot {slot}");
|
||||||
worker,
|
|
||||||
"exported media '{}' to import/export slot {}",
|
|
||||||
label_text,
|
|
||||||
slot
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
task_warn!(
|
warn!(
|
||||||
worker,
|
"export failed - media '{label_text}' is not online or in different drive"
|
||||||
"export failed - media '{}' is not online or in different drive",
|
|
||||||
label_text
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if let Some(mut status) = status {
|
} else if let Some(mut status) = status {
|
||||||
task_log!(
|
info!("standalone drive - ejecting media instead of export");
|
||||||
worker,
|
|
||||||
"standalone drive - ejecting media instead of export"
|
|
||||||
);
|
|
||||||
status.drive.eject_media()?;
|
status.drive.eject_media()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,11 +227,7 @@ impl PoolWriter {
|
|||||||
return Ok(media_uuid);
|
return Ok(media_uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(
|
info!("allocated new writable media '{}'", media.label_text());
|
||||||
worker,
|
|
||||||
"allocated new writable media '{}'",
|
|
||||||
media.label_text()
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Some(PoolWriterState { mut drive, .. }) = self.status.take() {
|
if let Some(PoolWriterState { mut drive, .. }) = self.status.take() {
|
||||||
if let Some(uuid) = &last_media_uuid {
|
if let Some(uuid) = &last_media_uuid {
|
||||||
@ -252,7 +237,7 @@ impl PoolWriter {
|
|||||||
.set_media_bytes_used(uuid, Some(stats.total_used_native_capacity))?;
|
.set_media_bytes_used(uuid, Some(stats.total_used_native_capacity))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(worker, "eject current media");
|
info!("eject current media");
|
||||||
drive.eject_media()?;
|
drive.eject_media()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -270,7 +255,7 @@ impl PoolWriter {
|
|||||||
// test for critical tape alert flags
|
// test for critical tape alert flags
|
||||||
if let Ok(alert_flags) = drive.tape_alert_flags() {
|
if let Ok(alert_flags) = drive.tape_alert_flags() {
|
||||||
if !alert_flags.is_empty() {
|
if !alert_flags.is_empty() {
|
||||||
task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
|
info!("TapeAlertFlags: {alert_flags:?}");
|
||||||
if tape_alert_flags_critical(alert_flags) {
|
if tape_alert_flags_critical(alert_flags) {
|
||||||
self.pool.set_media_status_damaged(&media_uuid)?;
|
self.pool.set_media_status_damaged(&media_uuid)?;
|
||||||
bail!(
|
bail!(
|
||||||
@ -281,20 +266,15 @@ impl PoolWriter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (catalog, is_new_media) = update_media_set_label(
|
let (catalog, is_new_media) =
|
||||||
worker,
|
update_media_set_label(drive.as_mut(), old_media_id.media_set_label, media.id())?;
|
||||||
drive.as_mut(),
|
|
||||||
old_media_id.media_set_label,
|
|
||||||
media.id(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
self.catalog_set.lock().unwrap().append_catalog(catalog)?;
|
self.catalog_set.lock().unwrap().append_catalog(catalog)?;
|
||||||
|
|
||||||
let media_set = media.media_set_label().unwrap();
|
let media_set = media.media_set_label().unwrap();
|
||||||
|
|
||||||
if is_new_media && media_set.seq_nr >= MEDIA_SET_SEQ_NR_WARN_LIMIT {
|
if is_new_media && media_set.seq_nr >= MEDIA_SET_SEQ_NR_WARN_LIMIT {
|
||||||
task_warn!(
|
warn!(
|
||||||
worker,
|
|
||||||
"large media-set detected ({}), consider using a different allocation policy",
|
"large media-set detected ({}), consider using a different allocation policy",
|
||||||
media_set.seq_nr
|
media_set.seq_nr
|
||||||
);
|
);
|
||||||
@ -311,7 +291,7 @@ impl PoolWriter {
|
|||||||
|
|
||||||
if is_new_media {
|
if is_new_media {
|
||||||
// add catalogs from previous media
|
// add catalogs from previous media
|
||||||
self.append_media_set_catalogs(worker)?;
|
self.append_media_set_catalogs()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.used_tapes.insert(media_uuid.clone());
|
self.used_tapes.insert(media_uuid.clone());
|
||||||
@ -331,12 +311,12 @@ impl PoolWriter {
|
|||||||
// Check it tape is loaded, then move to EOM (if not already there)
|
// Check it tape is loaded, then move to EOM (if not already there)
|
||||||
//
|
//
|
||||||
// Returns the tape position at EOM.
|
// Returns the tape position at EOM.
|
||||||
fn prepare_tape_write(status: &mut PoolWriterState, worker: &WorkerTask) -> Result<u64, Error> {
|
fn prepare_tape_write(status: &mut PoolWriterState) -> Result<u64, Error> {
|
||||||
if !status.at_eom {
|
if !status.at_eom {
|
||||||
task_log!(worker, "moving to end of media");
|
info!("moving to end of media");
|
||||||
status.drive.move_to_eom(true)?;
|
status.drive.move_to_eom(true)?;
|
||||||
status.at_eom = true;
|
status.at_eom = true;
|
||||||
task_log!(worker, "arrived at end of media");
|
info!("arrived at end of media");
|
||||||
}
|
}
|
||||||
|
|
||||||
let current_file_number = status.drive.current_file_number()?;
|
let current_file_number = status.drive.current_file_number()?;
|
||||||
@ -357,7 +337,7 @@ impl PoolWriter {
|
|||||||
/// on the media (return value 'Ok(false, _)'). In that case, the
|
/// on the media (return value 'Ok(false, _)'). In that case, the
|
||||||
/// archive is marked incomplete. The caller should mark the media
|
/// archive is marked incomplete. The caller should mark the media
|
||||||
/// as full and try again using another media.
|
/// as full and try again using another media.
|
||||||
pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result<bool, Error> {
|
pub fn append_catalog_archive(&mut self) -> Result<bool, Error> {
|
||||||
let catalog_magic = self.catalog_version();
|
let catalog_magic = self.catalog_version();
|
||||||
|
|
||||||
let status = match self.status {
|
let status = match self.status {
|
||||||
@ -365,7 +345,7 @@ impl PoolWriter {
|
|||||||
None => bail!("PoolWriter - no media loaded"),
|
None => bail!("PoolWriter - no media loaded"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self::prepare_tape_write(status, worker)?;
|
Self::prepare_tape_write(status)?;
|
||||||
|
|
||||||
let catalog_set = self.catalog_set.lock().unwrap();
|
let catalog_set = self.catalog_set.lock().unwrap();
|
||||||
|
|
||||||
@ -408,7 +388,7 @@ impl PoolWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Append catalogs for all previous media in set (without last)
|
// Append catalogs for all previous media in set (without last)
|
||||||
fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
fn append_media_set_catalogs(&mut self) -> Result<(), Error> {
|
||||||
let media_set = self.pool.current_media_set();
|
let media_set = self.pool.current_media_set();
|
||||||
|
|
||||||
let mut media_list = media_set.media_list();
|
let mut media_list = media_set.media_list();
|
||||||
@ -424,7 +404,7 @@ impl PoolWriter {
|
|||||||
None => bail!("PoolWriter - no media loaded"),
|
None => bail!("PoolWriter - no media loaded"),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self::prepare_tape_write(status, worker)?;
|
Self::prepare_tape_write(status)?;
|
||||||
|
|
||||||
for (seq_nr, uuid) in media_list.iter().enumerate() {
|
for (seq_nr, uuid) in media_list.iter().enumerate() {
|
||||||
let uuid = match uuid {
|
let uuid = match uuid {
|
||||||
@ -436,7 +416,7 @@ impl PoolWriter {
|
|||||||
|
|
||||||
let mut file = Self::open_catalog_file(uuid)?;
|
let mut file = Self::open_catalog_file(uuid)?;
|
||||||
|
|
||||||
task_log!(worker, "write catalog for previous media: {}", uuid);
|
info!("write catalog for previous media: {uuid}");
|
||||||
|
|
||||||
if tape_write_catalog(
|
if tape_write_catalog(
|
||||||
writer.as_mut(),
|
writer.as_mut(),
|
||||||
@ -467,7 +447,6 @@ impl PoolWriter {
|
|||||||
/// media.
|
/// media.
|
||||||
pub fn append_snapshot_archive(
|
pub fn append_snapshot_archive(
|
||||||
&mut self,
|
&mut self,
|
||||||
worker: &WorkerTask,
|
|
||||||
snapshot_reader: &SnapshotReader,
|
snapshot_reader: &SnapshotReader,
|
||||||
) -> Result<(bool, usize), Error> {
|
) -> Result<(bool, usize), Error> {
|
||||||
let status = match self.status {
|
let status = match self.status {
|
||||||
@ -475,7 +454,7 @@ impl PoolWriter {
|
|||||||
None => bail!("PoolWriter - no media loaded"),
|
None => bail!("PoolWriter - no media loaded"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_file_number = Self::prepare_tape_write(status, worker)?;
|
let current_file_number = Self::prepare_tape_write(status)?;
|
||||||
|
|
||||||
let (done, bytes_written) = {
|
let (done, bytes_written) = {
|
||||||
let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
|
let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
|
||||||
@ -521,7 +500,7 @@ impl PoolWriter {
|
|||||||
None => bail!("PoolWriter - no media loaded"),
|
None => bail!("PoolWriter - no media loaded"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let current_file_number = Self::prepare_tape_write(status, worker)?;
|
let current_file_number = Self::prepare_tape_write(status)?;
|
||||||
|
|
||||||
let writer = status.drive.write_file()?;
|
let writer = status.drive.write_file()?;
|
||||||
|
|
||||||
@ -533,8 +512,7 @@ impl PoolWriter {
|
|||||||
status.bytes_written_after_sync += bytes_written;
|
status.bytes_written_after_sync += bytes_written;
|
||||||
|
|
||||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"wrote {} chunks ({:.2} MB at {:.2} MB/s)",
|
"wrote {} chunks ({:.2} MB at {:.2} MB/s)",
|
||||||
saved_chunks.len(),
|
saved_chunks.len(),
|
||||||
bytes_written as f64 / 1_000_000.0,
|
bytes_written as f64 / 1_000_000.0,
|
||||||
@ -614,7 +592,7 @@ fn write_chunk_archive<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if writer.bytes_written() > max_size {
|
if writer.bytes_written() > max_size {
|
||||||
//task_log!(worker, "Chunk Archive max size reached, closing archive");
|
//info!("Chunk Archive max size reached, closing archive");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -628,7 +606,6 @@ fn write_chunk_archive<'a>(
|
|||||||
// set label does not match the expected media set, overwrite the
|
// set label does not match the expected media set, overwrite the
|
||||||
// media set label.
|
// media set label.
|
||||||
fn update_media_set_label(
|
fn update_media_set_label(
|
||||||
worker: &WorkerTask,
|
|
||||||
drive: &mut dyn TapeDriver,
|
drive: &mut dyn TapeDriver,
|
||||||
old_set: Option<MediaSetLabel>,
|
old_set: Option<MediaSetLabel>,
|
||||||
media_id: &MediaId,
|
media_id: &MediaId,
|
||||||
@ -657,7 +634,7 @@ fn update_media_set_label(
|
|||||||
|
|
||||||
let new_media = match old_set {
|
let new_media = match old_set {
|
||||||
None => {
|
None => {
|
||||||
task_log!(worker, "writing new media set label");
|
info!("writing new media set label");
|
||||||
drive.write_media_set_label(new_set, key_config.as_ref())?;
|
drive.write_media_set_label(new_set, key_config.as_ref())?;
|
||||||
media_catalog = MediaCatalog::overwrite(TAPE_STATUS_DIR, media_id, false)?;
|
media_catalog = MediaCatalog::overwrite(TAPE_STATUS_DIR, media_id, false)?;
|
||||||
true
|
true
|
||||||
@ -681,8 +658,7 @@ fn update_media_set_label(
|
|||||||
|
|
||||||
false
|
false
|
||||||
} else {
|
} else {
|
||||||
task_log!(
|
info!(
|
||||||
worker,
|
|
||||||
"writing new media set label (overwrite '{}/{}')",
|
"writing new media set label (overwrite '{}/{}')",
|
||||||
media_set_label.uuid.to_string(),
|
media_set_label.uuid.to_string(),
|
||||||
media_set_label.seq_nr,
|
media_set_label.seq_nr,
|
||||||
|
@ -16,14 +16,13 @@ use ::serde::{Deserialize, Serialize};
|
|||||||
|
|
||||||
use proxmox_lang::error::io_err_other;
|
use proxmox_lang::error::io_err_other;
|
||||||
use proxmox_lang::{io_bail, io_format_err};
|
use proxmox_lang::{io_bail, io_format_err};
|
||||||
use proxmox_rest_server::WorkerTask;
|
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
|
use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
|
||||||
use proxmox_sys::task_log;
|
|
||||||
|
|
||||||
use pbs_api_types::{BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX};
|
use pbs_api_types::{BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX};
|
||||||
|
|
||||||
mod zfs;
|
mod zfs;
|
||||||
|
use tracing::info;
|
||||||
pub use zfs::*;
|
pub use zfs::*;
|
||||||
mod zpool_status;
|
mod zpool_status;
|
||||||
pub use zpool_status::*;
|
pub use zpool_status::*;
|
||||||
@ -1116,7 +1115,7 @@ pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Erro
|
|||||||
|
|
||||||
/// Wipes all labels and the first 200 MiB of a disk/partition (or the whole if it is smaller).
|
/// Wipes all labels and the first 200 MiB of a disk/partition (or the whole if it is smaller).
|
||||||
/// If called with a partition, also sets the partition type to 0x83 'Linux filesystem'.
|
/// If called with a partition, also sets the partition type to 0x83 'Linux filesystem'.
|
||||||
pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error> {
|
pub fn wipe_blockdev(disk: &Disk) -> Result<(), Error> {
|
||||||
let disk_path = match disk.device_path() {
|
let disk_path = match disk.device_path() {
|
||||||
Some(path) => path,
|
Some(path) => path,
|
||||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||||
@ -1137,13 +1136,13 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error>
|
|||||||
|
|
||||||
to_wipe.push(disk_path.to_path_buf());
|
to_wipe.push(disk_path.to_path_buf());
|
||||||
|
|
||||||
task_log!(worker, "Wiping block device {}", disk_path.display());
|
info!("Wiping block device {}", disk_path.display());
|
||||||
|
|
||||||
let mut wipefs_command = std::process::Command::new("wipefs");
|
let mut wipefs_command = std::process::Command::new("wipefs");
|
||||||
wipefs_command.arg("--all").args(&to_wipe);
|
wipefs_command.arg("--all").args(&to_wipe);
|
||||||
|
|
||||||
let wipefs_output = proxmox_sys::command::run_command(wipefs_command, None)?;
|
let wipefs_output = proxmox_sys::command::run_command(wipefs_command, None)?;
|
||||||
task_log!(worker, "wipefs output: {}", wipefs_output);
|
info!("wipefs output: {wipefs_output}");
|
||||||
|
|
||||||
let size = disk.size().map(|size| size / 1024 / 1024)?;
|
let size = disk.size().map(|size| size / 1024 / 1024)?;
|
||||||
let count = size.min(200);
|
let count = size.min(200);
|
||||||
@ -1163,21 +1162,17 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error>
|
|||||||
dd_command.args(args);
|
dd_command.args(args);
|
||||||
|
|
||||||
let dd_output = proxmox_sys::command::run_command(dd_command, None)?;
|
let dd_output = proxmox_sys::command::run_command(dd_command, None)?;
|
||||||
task_log!(worker, "dd output: {}", dd_output);
|
info!("dd output: {dd_output}");
|
||||||
|
|
||||||
if is_partition {
|
if is_partition {
|
||||||
// set the partition type to 0x83 'Linux filesystem'
|
// set the partition type to 0x83 'Linux filesystem'
|
||||||
change_parttype(disk, "8300", worker)?;
|
change_parttype(disk, "8300")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn change_parttype(
|
pub fn change_parttype(part_disk: &Disk, part_type: &str) -> Result<(), Error> {
|
||||||
part_disk: &Disk,
|
|
||||||
part_type: &str,
|
|
||||||
worker: Arc<WorkerTask>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let part_path = match part_disk.device_path() {
|
let part_path = match part_disk.device_path() {
|
||||||
Some(path) => path,
|
Some(path) => path,
|
||||||
None => bail!("disk {:?} has no node in /dev", part_disk.syspath()),
|
None => bail!("disk {:?} has no node in /dev", part_disk.syspath()),
|
||||||
@ -1199,7 +1194,7 @@ pub fn change_parttype(
|
|||||||
};
|
};
|
||||||
sgdisk_command.arg(part_disk_parent_path);
|
sgdisk_command.arg(part_disk_parent_path);
|
||||||
let sgdisk_output = proxmox_sys::command::run_command(sgdisk_command, None)?;
|
let sgdisk_output = proxmox_sys::command::run_command(sgdisk_command, None)?;
|
||||||
task_log!(worker, "sgdisk output: {}", sgdisk_output);
|
info!("sgdisk output: {sgdisk_output}");
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -7,24 +7,25 @@ extern crate tokio;
|
|||||||
|
|
||||||
use proxmox_lang::try_block;
|
use proxmox_lang::try_block;
|
||||||
use proxmox_sys::fs::CreateOptions;
|
use proxmox_sys::fs::CreateOptions;
|
||||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
use proxmox_sys::WorkerTaskContext;
|
||||||
|
|
||||||
use pbs_api_types::{Authid, UPID};
|
use pbs_api_types::{Authid, UPID};
|
||||||
|
|
||||||
use proxmox_rest_server::{CommandSocket, WorkerTask};
|
use proxmox_rest_server::{CommandSocket, WorkerTask};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
fn garbage_collection(worker: &WorkerTask) -> Result<(), Error> {
|
fn garbage_collection(worker: &WorkerTask) -> Result<(), Error> {
|
||||||
task_log!(worker, "start garbage collection");
|
info!("start garbage collection");
|
||||||
|
|
||||||
for i in 0..50 {
|
for i in 0..50 {
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
task_log!(worker, "progress {}", i);
|
info!("progress {i}");
|
||||||
|
|
||||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||||
}
|
}
|
||||||
|
|
||||||
task_log!(worker, "end garbage collection");
|
info!("end garbage collection");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user