switch from task_log! macro to tracing
Import `proxmox-log` and substitute all `task_log!` (and task_warn!, task_error!) invocations with tracing calls (info!, warn!, etc..). Remove worker references where it isn't necessary anymore. Reviewed-by: Lukas Wagner <l.wagner@proxmox.com> Tested-by: Lukas Wagner <l.wagner@proxmox.com> Signed-off-by: Gabriel Goller <g.goller@proxmox.com>
This commit is contained in:
parent
cd25c36d21
commit
7e2486e800
@ -65,6 +65,7 @@ proxmox-http = { version = "0.9.0", features = [ "client", "http-helpers", "webs
|
||||
proxmox-human-byte = "0.1"
|
||||
proxmox-io = "1.0.1" # tools and client use "tokio" feature
|
||||
proxmox-lang = "1.1"
|
||||
proxmox-log = "0.1"
|
||||
proxmox-ldap = "0.2.1"
|
||||
proxmox-metrics = "0.3.1"
|
||||
proxmox-notify = "0.4"
|
||||
@ -147,6 +148,8 @@ tokio = "1.6"
|
||||
tokio-openssl = "0.6.1"
|
||||
tokio-stream = "0.1.0"
|
||||
tokio-util = { version = "0.7", features = [ "io" ] }
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = "0.3.16"
|
||||
tower-service = "0.3.0"
|
||||
udev = "0.4"
|
||||
url = "2.1"
|
||||
@ -194,6 +197,8 @@ tokio = { workspace = true, features = [ "fs", "io-util", "io-std", "macros", "n
|
||||
tokio-openssl.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
tokio-util = { workspace = true, features = [ "codec" ] }
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tower-service.workspace = true
|
||||
udev.workspace = true
|
||||
url.workspace = true
|
||||
@ -214,6 +219,7 @@ proxmox-http = { workspace = true, features = [ "client-trait", "proxmox-async",
|
||||
proxmox-human-byte.workspace = true
|
||||
proxmox-io.workspace = true
|
||||
proxmox-lang.workspace = true
|
||||
proxmox-log.workspace = true
|
||||
proxmox-ldap.workspace = true
|
||||
proxmox-metrics.workspace = true
|
||||
proxmox-notify = { workspace = true, features = [ "pbs-context" ] }
|
||||
@ -263,6 +269,7 @@ proxmox-rrd.workspace = true
|
||||
#proxmox-human-byte = { path = "../proxmox/proxmox-human-byte" }
|
||||
#proxmox-io = { path = "../proxmox/proxmox-io" }
|
||||
#proxmox-lang = { path = "../proxmox/proxmox-lang" }
|
||||
#proxmox-log = { path = "../proxmox/proxmox-log" }
|
||||
#proxmox-ldap = { path = "../proxmox/proxmox-ldap" }
|
||||
#proxmox-metrics = { path = "../proxmox/proxmox-metrics" }
|
||||
#proxmox-notify = { path = "../proxmox/proxmox-notify" }
|
||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -136,6 +136,8 @@ Build-Depends: bash-completion,
|
||||
librust-tokio-util-0.7+default-dev,
|
||||
librust-tokio-util-0.7+io-dev,
|
||||
librust-tower-service-0.3+default-dev,
|
||||
librust-tracing-0.1+default-dev,
|
||||
librust-tracing-subscriber-0.3+default-dev (>= 0.3.16-~~),
|
||||
librust-udev-0.4+default-dev,
|
||||
librust-url-2+default-dev (>= 2.1-~~),
|
||||
librust-walkdir-2+default-dev,
|
||||
|
@ -20,6 +20,7 @@ openssl.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tokio = { workspace = true, features = [] }
|
||||
tracing.workspace = true
|
||||
walkdir.workspace = true
|
||||
zstd.workspace = true
|
||||
|
||||
|
@ -3,6 +3,7 @@ use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use tracing::info;
|
||||
|
||||
use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
|
||||
use proxmox_io::ReadExt;
|
||||
@ -10,7 +11,6 @@ use proxmox_sys::fs::{create_dir, create_path, file_type_from_file_stat, CreateO
|
||||
use proxmox_sys::process_locker::{
|
||||
ProcessLockExclusiveGuard, ProcessLockSharedGuard, ProcessLocker,
|
||||
};
|
||||
use proxmox_sys::task_log;
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
|
||||
use crate::file_formats::{
|
||||
@ -92,7 +92,6 @@ impl ChunkStore {
|
||||
path: P,
|
||||
uid: nix::unistd::Uid,
|
||||
gid: nix::unistd::Gid,
|
||||
worker: Option<&dyn WorkerTaskContext>,
|
||||
sync_level: DatastoreFSyncLevel,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
@ -143,9 +142,7 @@ impl ChunkStore {
|
||||
}
|
||||
let percentage = (i * 100) / (64 * 1024);
|
||||
if percentage != last_percentage {
|
||||
if let Some(worker) = worker {
|
||||
task_log!(worker, "Chunkstore create: {}%", percentage)
|
||||
}
|
||||
info!("Chunkstore create: {percentage}%");
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
@ -375,7 +372,7 @@ impl ChunkStore {
|
||||
for (entry, percentage, bad) in self.get_chunk_iterator()? {
|
||||
if last_percentage != percentage {
|
||||
last_percentage = percentage;
|
||||
task_log!(worker, "processed {}% ({} chunks)", percentage, chunk_count,);
|
||||
info!("processed {percentage}% ({chunk_count} chunks)");
|
||||
}
|
||||
|
||||
worker.check_abort()?;
|
||||
@ -579,15 +576,8 @@ fn test_chunk_store1() {
|
||||
let user = nix::unistd::User::from_uid(nix::unistd::Uid::current())
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let chunk_store = ChunkStore::create(
|
||||
"test",
|
||||
&path,
|
||||
user.uid,
|
||||
user.gid,
|
||||
None,
|
||||
DatastoreFSyncLevel::None,
|
||||
)
|
||||
.unwrap();
|
||||
let chunk_store =
|
||||
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None).unwrap();
|
||||
|
||||
let (chunk, digest) = crate::data_blob::DataChunkBuilder::new(&[0u8, 1u8])
|
||||
.build()
|
||||
@ -599,14 +589,8 @@ fn test_chunk_store1() {
|
||||
let (exists, _) = chunk_store.insert_chunk(&chunk, &digest).unwrap();
|
||||
assert!(exists);
|
||||
|
||||
let chunk_store = ChunkStore::create(
|
||||
"test",
|
||||
&path,
|
||||
user.uid,
|
||||
user.gid,
|
||||
None,
|
||||
DatastoreFSyncLevel::None,
|
||||
);
|
||||
let chunk_store =
|
||||
ChunkStore::create("test", &path, user.uid, user.gid, DatastoreFSyncLevel::None);
|
||||
assert!(chunk_store.is_err());
|
||||
|
||||
if let Err(_e) = std::fs::remove_dir_all(".testdir") { /* ignore */ }
|
||||
|
@ -7,6 +7,7 @@ use std::sync::{Arc, Mutex};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use lazy_static::lazy_static;
|
||||
use nix::unistd::{unlinkat, UnlinkatFlags};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_schema::ApiType;
|
||||
@ -16,7 +17,6 @@ use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
|
||||
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
|
||||
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel,
|
||||
@ -965,8 +965,7 @@ impl DataStore {
|
||||
let digest = index.index_digest(pos).unwrap();
|
||||
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
||||
let hex = hex::encode(digest);
|
||||
task_warn!(
|
||||
worker,
|
||||
warn!(
|
||||
"warning: unable to access non-existent chunk {hex}, required by {file_name:?}"
|
||||
);
|
||||
|
||||
@ -1032,22 +1031,17 @@ impl DataStore {
|
||||
|
||||
let percentage = (i + 1) * 100 / image_count;
|
||||
if percentage > last_percentage {
|
||||
task_log!(
|
||||
worker,
|
||||
"marked {}% ({} of {} index files)",
|
||||
percentage,
|
||||
info!(
|
||||
"marked {percentage}% ({} of {image_count} index files)",
|
||||
i + 1,
|
||||
image_count,
|
||||
);
|
||||
last_percentage = percentage;
|
||||
}
|
||||
}
|
||||
|
||||
if strange_paths_count > 0 {
|
||||
task_log!(
|
||||
worker,
|
||||
"found (and marked) {} index files outside of expected directory scheme",
|
||||
strange_paths_count,
|
||||
info!(
|
||||
"found (and marked) {strange_paths_count} index files outside of expected directory scheme"
|
||||
);
|
||||
}
|
||||
|
||||
@ -1085,11 +1079,11 @@ impl DataStore {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
task_log!(worker, "Start GC phase1 (mark used chunks)");
|
||||
info!("Start GC phase1 (mark used chunks)");
|
||||
|
||||
self.mark_used_chunks(&mut gc_status, worker)?;
|
||||
|
||||
task_log!(worker, "Start GC phase2 (sweep unused chunks)");
|
||||
info!("Start GC phase2 (sweep unused chunks)");
|
||||
self.inner.chunk_store.sweep_unused_chunks(
|
||||
oldest_writer,
|
||||
phase1_start_time,
|
||||
@ -1097,30 +1091,27 @@ impl DataStore {
|
||||
worker,
|
||||
)?;
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"Removed garbage: {}",
|
||||
HumanByte::from(gc_status.removed_bytes),
|
||||
);
|
||||
task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
|
||||
info!("Removed chunks: {}", gc_status.removed_chunks);
|
||||
if gc_status.pending_bytes > 0 {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"Pending removals: {} (in {} chunks)",
|
||||
HumanByte::from(gc_status.pending_bytes),
|
||||
gc_status.pending_chunks,
|
||||
);
|
||||
}
|
||||
if gc_status.removed_bad > 0 {
|
||||
task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
|
||||
info!("Removed bad chunks: {}", gc_status.removed_bad);
|
||||
}
|
||||
|
||||
if gc_status.still_bad > 0 {
|
||||
task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
|
||||
info!("Leftover bad chunks: {}", gc_status.still_bad);
|
||||
}
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"Original data usage: {}",
|
||||
HumanByte::from(gc_status.index_data_bytes),
|
||||
);
|
||||
@ -1128,15 +1119,13 @@ impl DataStore {
|
||||
if gc_status.index_data_bytes > 0 {
|
||||
let comp_per =
|
||||
(gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
|
||||
task_log!(
|
||||
worker,
|
||||
"On-Disk usage: {} ({:.2}%)",
|
||||
HumanByte::from(gc_status.disk_bytes),
|
||||
comp_per,
|
||||
info!(
|
||||
"On-Disk usage: {} ({comp_per:.2}%)",
|
||||
HumanByte::from(gc_status.disk_bytes)
|
||||
);
|
||||
}
|
||||
|
||||
task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
||||
info!("On-Disk chunks: {}", gc_status.disk_chunks);
|
||||
|
||||
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
||||
(gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
|
||||
@ -1144,11 +1133,11 @@ impl DataStore {
|
||||
1.0
|
||||
};
|
||||
|
||||
task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
|
||||
info!("Deduplication factor: {deduplication_factor:.2}");
|
||||
|
||||
if gc_status.disk_chunks > 0 {
|
||||
let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
|
||||
task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||
info!("Average chunk size: {}", HumanByte::from(avg_chunk));
|
||||
}
|
||||
|
||||
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
||||
@ -1380,11 +1369,7 @@ impl DataStore {
|
||||
/// Destroy a datastore. This requires that there are no active operations on the datastore.
|
||||
///
|
||||
/// This is a synchronous operation and should be run in a worker-thread.
|
||||
pub fn destroy(
|
||||
name: &str,
|
||||
destroy_data: bool,
|
||||
worker: &dyn WorkerTaskContext,
|
||||
) -> Result<(), Error> {
|
||||
pub fn destroy(name: &str, destroy_data: bool) -> Result<(), Error> {
|
||||
let config_lock = pbs_config::datastore::lock_config()?;
|
||||
|
||||
let (mut config, _digest) = pbs_config::datastore::config()?;
|
||||
@ -1412,13 +1397,13 @@ impl DataStore {
|
||||
let remove = |subdir, ok: &mut bool| {
|
||||
if let Err(err) = std::fs::remove_dir_all(base.join(subdir)) {
|
||||
if err.kind() != io::ErrorKind::NotFound {
|
||||
task_warn!(worker, "failed to remove {subdir:?} subdirectory: {err}");
|
||||
warn!("failed to remove {subdir:?} subdirectory: {err}");
|
||||
*ok = false;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
task_log!(worker, "Deleting datastore data...");
|
||||
info!("Deleting datastore data...");
|
||||
remove("ns", &mut ok); // ns first
|
||||
remove("ct", &mut ok);
|
||||
remove("vm", &mut ok);
|
||||
@ -1427,7 +1412,7 @@ impl DataStore {
|
||||
if ok {
|
||||
if let Err(err) = std::fs::remove_file(base.join(".gc-status")) {
|
||||
if err.kind() != io::ErrorKind::NotFound {
|
||||
task_warn!(worker, "failed to remove .gc-status file: {err}");
|
||||
warn!("failed to remove .gc-status file: {err}");
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
@ -1441,7 +1426,7 @@ impl DataStore {
|
||||
|
||||
// now the config
|
||||
if ok {
|
||||
task_log!(worker, "Removing datastore from config...");
|
||||
info!("Removing datastore from config...");
|
||||
let _lock = pbs_config::datastore::lock_config()?;
|
||||
let _ = config.sections.remove(name);
|
||||
pbs_config::datastore::save_config(&config)?;
|
||||
@ -1452,35 +1437,32 @@ impl DataStore {
|
||||
if ok {
|
||||
if let Err(err) = std::fs::remove_file(base.join(".lock")) {
|
||||
if err.kind() != io::ErrorKind::NotFound {
|
||||
task_warn!(worker, "failed to remove .lock file: {err}");
|
||||
warn!("failed to remove .lock file: {err}");
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ok {
|
||||
task_log!(worker, "Finished deleting data.");
|
||||
info!("Finished deleting data.");
|
||||
|
||||
match std::fs::remove_dir(base) {
|
||||
Ok(()) => task_log!(worker, "Removed empty datastore directory."),
|
||||
Ok(()) => info!("Removed empty datastore directory."),
|
||||
Err(err) if err.kind() == io::ErrorKind::NotFound => {
|
||||
// weird, but ok
|
||||
}
|
||||
Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => {
|
||||
task_warn!(
|
||||
worker,
|
||||
"Cannot delete datastore directory (is it a mount point?)."
|
||||
)
|
||||
warn!("Cannot delete datastore directory (is it a mount point?).")
|
||||
}
|
||||
Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => {
|
||||
task_warn!(worker, "Datastore directory not empty, not deleting.")
|
||||
warn!("Datastore directory not empty, not deleting.")
|
||||
}
|
||||
Err(err) => {
|
||||
task_warn!(worker, "Failed to remove datastore directory: {err}");
|
||||
warn!("Failed to remove datastore directory: {err}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
task_log!(worker, "There were errors deleting data.");
|
||||
info!("There were errors deleting data.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5,8 +5,9 @@ use std::sync::{Arc, Mutex};
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use tracing::{error, info};
|
||||
|
||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
|
||||
use pbs_api_types::{
|
||||
print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
|
||||
@ -69,11 +70,7 @@ fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
||||
}
|
||||
}
|
||||
|
||||
fn rename_corrupted_chunk(
|
||||
datastore: Arc<DataStore>,
|
||||
digest: &[u8; 32],
|
||||
worker: &dyn WorkerTaskContext,
|
||||
) {
|
||||
fn rename_corrupted_chunk(datastore: Arc<DataStore>, digest: &[u8; 32]) {
|
||||
let (path, digest_str) = datastore.chunk_path(digest);
|
||||
|
||||
let mut counter = 0;
|
||||
@ -89,17 +86,12 @@ fn rename_corrupted_chunk(
|
||||
|
||||
match std::fs::rename(&path, &new_path) {
|
||||
Ok(_) => {
|
||||
task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
|
||||
info!("corrupted chunk renamed to {:?}", &new_path);
|
||||
}
|
||||
Err(err) => {
|
||||
match err.kind() {
|
||||
std::io::ErrorKind::NotFound => { /* ignored */ }
|
||||
_ => task_log!(
|
||||
worker,
|
||||
"could not rename corrupted chunk {:?} - {}",
|
||||
&path,
|
||||
err
|
||||
),
|
||||
_ => info!("could not rename corrupted chunk {:?} - {err}", &path),
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -117,7 +109,6 @@ fn verify_index_chunks(
|
||||
let mut read_bytes = 0;
|
||||
let mut decoded_bytes = 0;
|
||||
|
||||
let worker2 = Arc::clone(&verify_worker.worker);
|
||||
let datastore2 = Arc::clone(&verify_worker.datastore);
|
||||
let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks);
|
||||
let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks);
|
||||
@ -130,7 +121,7 @@ fn verify_index_chunks(
|
||||
let chunk_crypt_mode = match chunk.crypt_mode() {
|
||||
Err(err) => {
|
||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||
task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
|
||||
info!("can't verify chunk, unknown CryptMode - {err}");
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
return Ok(());
|
||||
}
|
||||
@ -138,20 +129,17 @@ fn verify_index_chunks(
|
||||
};
|
||||
|
||||
if chunk_crypt_mode != crypt_mode {
|
||||
task_log!(
|
||||
worker2,
|
||||
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
||||
chunk_crypt_mode,
|
||||
crypt_mode
|
||||
info!(
|
||||
"chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}"
|
||||
);
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
||||
corrupt_chunks2.lock().unwrap().insert(digest);
|
||||
task_log!(worker2, "{}", err);
|
||||
info!("{err}");
|
||||
errors2.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(datastore2.clone(), &digest, &worker2);
|
||||
rename_corrupted_chunk(datastore2.clone(), &digest);
|
||||
} else {
|
||||
verified_chunks2.lock().unwrap().insert(digest);
|
||||
}
|
||||
@ -175,11 +163,7 @@ fn verify_index_chunks(
|
||||
.contains(digest)
|
||||
{
|
||||
let digest_str = hex::encode(digest);
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
"chunk {} was marked as corrupt",
|
||||
digest_str
|
||||
);
|
||||
info!("chunk {digest_str} was marked as corrupt");
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
true
|
||||
} else {
|
||||
@ -218,17 +202,9 @@ fn verify_index_chunks(
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(info.digest);
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
"can't verify chunk, load failed - {}",
|
||||
err
|
||||
);
|
||||
error!("can't verify chunk, load failed - {err}");
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(
|
||||
verify_worker.datastore.clone(),
|
||||
&info.digest,
|
||||
&verify_worker.worker,
|
||||
);
|
||||
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest);
|
||||
}
|
||||
Ok(chunk) => {
|
||||
let size = info.size();
|
||||
@ -251,15 +227,8 @@ fn verify_index_chunks(
|
||||
|
||||
let error_count = errors.load(Ordering::SeqCst);
|
||||
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
||||
read_bytes_mib,
|
||||
decoded_bytes_mib,
|
||||
elapsed,
|
||||
read_speed,
|
||||
decode_speed,
|
||||
error_count,
|
||||
info!(
|
||||
" verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)"
|
||||
);
|
||||
|
||||
if errors.load(Ordering::SeqCst) > 0 {
|
||||
@ -329,8 +298,7 @@ pub fn verify_backup_dir(
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<bool, Error> {
|
||||
if !backup_dir.full_path().exists() {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"SKIPPED: verify {}:{} - snapshot does not exist (anymore).",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir(),
|
||||
@ -348,8 +316,7 @@ pub fn verify_backup_dir(
|
||||
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
||||
}
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir(),
|
||||
@ -371,8 +338,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
let manifest = match backup_dir.load_manifest() {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"verify {}:{} - manifest load error: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir(),
|
||||
@ -384,8 +350,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
|
||||
if let Some(filter) = filter {
|
||||
if !filter(&manifest) {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir(),
|
||||
@ -394,8 +359,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
}
|
||||
}
|
||||
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"verify {}:{}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir()
|
||||
@ -406,7 +370,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
let mut verify_result = VerifyState::Ok;
|
||||
for info in manifest.files() {
|
||||
let result = proxmox_lang::try_block!({
|
||||
task_log!(verify_worker.worker, " check {}", info.filename);
|
||||
info!(" check {}", info.filename);
|
||||
match archive_type(&info.filename)? {
|
||||
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
||||
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
||||
@ -418,8 +382,7 @@ pub fn verify_backup_dir_with_lock(
|
||||
verify_worker.worker.fail_on_shutdown()?;
|
||||
|
||||
if let Err(err) = result {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"verify {}:{}/{} failed: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir.dir(),
|
||||
@ -463,8 +426,7 @@ pub fn verify_backup_group(
|
||||
let mut list = match group.list_backups() {
|
||||
Ok(list) => list,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"verify {}, group {} - unable to list backups: {}",
|
||||
print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
|
||||
group.group(),
|
||||
@ -475,8 +437,7 @@ pub fn verify_backup_group(
|
||||
};
|
||||
|
||||
let snapshot_count = list.len();
|
||||
task_log!(
|
||||
verify_worker.worker,
|
||||
info!(
|
||||
"verify group {}:{} ({} snapshots)",
|
||||
verify_worker.datastore.name(),
|
||||
group.group(),
|
||||
@ -494,9 +455,8 @@ pub fn verify_backup_group(
|
||||
));
|
||||
}
|
||||
progress.done_snapshots = pos as u64 + 1;
|
||||
task_log!(verify_worker.worker, "percentage done: {}", progress);
|
||||
info!("percentage done: {progress}");
|
||||
}
|
||||
|
||||
Ok(errors)
|
||||
}
|
||||
|
||||
@ -516,16 +476,11 @@ pub fn verify_all_backups(
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let mut errors = Vec::new();
|
||||
let worker = Arc::clone(&verify_worker.worker);
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
"verify datastore {}",
|
||||
verify_worker.datastore.name()
|
||||
);
|
||||
info!("verify datastore {}", verify_worker.datastore.name());
|
||||
|
||||
let owner_filtered = if let Some(owner) = &owner {
|
||||
task_log!(worker, "limiting to backups owned by {}", owner);
|
||||
info!("limiting to backups owned by {owner}");
|
||||
true
|
||||
} else {
|
||||
false
|
||||
@ -553,7 +508,7 @@ pub fn verify_all_backups(
|
||||
}
|
||||
Err(err) => {
|
||||
// we don't filter by owner, but we want to log the error
|
||||
task_log!(worker, "error on iterating groups in ns '{ns}' - {err}");
|
||||
info!("error on iterating groups in ns '{ns}' - {err}");
|
||||
errors.push(err.to_string());
|
||||
None
|
||||
}
|
||||
@ -563,7 +518,7 @@ pub fn verify_all_backups(
|
||||
})
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
task_log!(worker, "unable to list backups: {}", err,);
|
||||
info!("unable to list backups: {err}");
|
||||
return Ok(errors);
|
||||
}
|
||||
};
|
||||
@ -571,7 +526,7 @@ pub fn verify_all_backups(
|
||||
list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
|
||||
|
||||
let group_count = list.len();
|
||||
task_log!(worker, "found {} groups", group_count);
|
||||
info!("found {group_count} groups");
|
||||
|
||||
let mut progress = StoreProgress::new(group_count as u64);
|
||||
|
||||
|
@ -5,13 +5,14 @@ use anyhow::{bail, Error};
|
||||
use futures::*;
|
||||
use http::Response;
|
||||
use hyper::{Body, StatusCode};
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
|
||||
use proxmox_lang::try_block;
|
||||
use proxmox_log::init_logger;
|
||||
use proxmox_rest_server::{daemon, ApiConfig, RestServer};
|
||||
use proxmox_router::RpcEnvironmentType;
|
||||
use proxmox_sys::fs::CreateOptions;
|
||||
|
||||
use proxmox_rest_server::{daemon, ApiConfig, RestServer};
|
||||
|
||||
use proxmox_backup::auth_helpers::*;
|
||||
use proxmox_backup::config;
|
||||
use proxmox_backup::server::auth::check_pbs_auth;
|
||||
@ -40,13 +41,7 @@ fn get_index() -> Pin<Box<dyn Future<Output = Response<Body>> + Send>> {
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
if let Err(err) = syslog::init(
|
||||
syslog::Facility::LOG_DAEMON,
|
||||
log::LevelFilter::Info,
|
||||
Some("proxmox-backup-api"),
|
||||
) {
|
||||
bail!("unable to inititialize syslog - {}", err);
|
||||
}
|
||||
init_logger("PBS_LOG", LevelFilter::INFO, "proxmox-backup-api")?;
|
||||
|
||||
config::create_configdir()?;
|
||||
|
||||
|
@ -7,18 +7,20 @@ use http::request::Parts;
|
||||
use http::Response;
|
||||
use hyper::header;
|
||||
use hyper::{Body, StatusCode};
|
||||
use tracing::{info, warn};
|
||||
use tracing_subscriber::filter::LevelFilter;
|
||||
use url::form_urlencoded;
|
||||
|
||||
use openssl::ssl::SslAcceptor;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox_lang::try_block;
|
||||
use proxmox_log::init_logger;
|
||||
use proxmox_metrics::MetricsData;
|
||||
use proxmox_router::{RpcEnvironment, RpcEnvironmentType};
|
||||
use proxmox_sys::fs::{CreateOptions, FileSystemInformation};
|
||||
use proxmox_sys::linux::procfs::{Loadavg, ProcFsMemInfo, ProcFsNetDev, ProcFsStat};
|
||||
use proxmox_sys::logrotate::LogRotate;
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
|
||||
use pbs_datastore::DataStore;
|
||||
|
||||
@ -181,21 +183,7 @@ async fn get_index_future(env: RestEnvironment, parts: Parts) -> Response<Body>
|
||||
}
|
||||
|
||||
async fn run() -> Result<(), Error> {
|
||||
// Note: To debug early connection error use
|
||||
// PROXMOX_DEBUG=1 ./target/release/proxmox-backup-proxy
|
||||
let debug = std::env::var("PROXMOX_DEBUG").is_ok();
|
||||
|
||||
if let Err(err) = syslog::init(
|
||||
syslog::Facility::LOG_DAEMON,
|
||||
if debug {
|
||||
log::LevelFilter::Debug
|
||||
} else {
|
||||
log::LevelFilter::Info
|
||||
},
|
||||
Some("proxmox-backup-proxy"),
|
||||
) {
|
||||
bail!("unable to inititialize syslog - {err}");
|
||||
}
|
||||
init_logger("PBS_LOG", LevelFilter::INFO, "proxmox-backup-proxy")?;
|
||||
|
||||
proxmox_backup::auth_helpers::setup_auth_context(false);
|
||||
proxmox_backup::server::notifications::init()?;
|
||||
@ -301,7 +289,7 @@ async fn run() -> Result<(), Error> {
|
||||
})?;
|
||||
|
||||
let connections = proxmox_rest_server::connection::AcceptBuilder::new()
|
||||
.debug(debug)
|
||||
.debug(tracing::enabled!(tracing::Level::DEBUG))
|
||||
.rate_limiter_lookup(Arc::new(lookup_rate_limiter))
|
||||
.tcp_keepalive_time(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME);
|
||||
|
||||
@ -750,7 +738,7 @@ async fn schedule_task_log_rotate() {
|
||||
false,
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
task_log!(worker, "starting task log rotation");
|
||||
info!("starting task log rotation");
|
||||
|
||||
let result = try_block!({
|
||||
let max_size = 512 * 1024 - 1; // an entry has ~ 100b, so > 5000 entries/file
|
||||
@ -775,9 +763,9 @@ async fn schedule_task_log_rotate() {
|
||||
)?;
|
||||
|
||||
if has_rotated {
|
||||
task_log!(worker, "task log archive was rotated");
|
||||
info!("task log archive was rotated");
|
||||
} else {
|
||||
task_log!(worker, "task log archive was not rotated");
|
||||
info!("task log archive was not rotated");
|
||||
}
|
||||
|
||||
let max_size = 32 * 1024 * 1024 - 1;
|
||||
@ -793,9 +781,9 @@ async fn schedule_task_log_rotate() {
|
||||
if logrotate.rotate(max_size)? {
|
||||
println!("rotated access log, telling daemons to re-open log file");
|
||||
proxmox_async::runtime::block_on(command_reopen_access_logfiles())?;
|
||||
task_log!(worker, "API access log was rotated");
|
||||
info!("API access log was rotated");
|
||||
} else {
|
||||
task_log!(worker, "API access log was not rotated");
|
||||
info!("API access log was not rotated");
|
||||
}
|
||||
|
||||
let mut logrotate = LogRotate::new(
|
||||
@ -808,15 +796,15 @@ async fn schedule_task_log_rotate() {
|
||||
if logrotate.rotate(max_size)? {
|
||||
println!("rotated auth log, telling daemons to re-open log file");
|
||||
proxmox_async::runtime::block_on(command_reopen_auth_logfiles())?;
|
||||
task_log!(worker, "API authentication log was rotated");
|
||||
info!("API authentication log was rotated");
|
||||
} else {
|
||||
task_log!(worker, "API authentication log was not rotated");
|
||||
info!("API authentication log was not rotated");
|
||||
}
|
||||
|
||||
if has_rotated {
|
||||
task_log!(worker, "cleaning up old task logs");
|
||||
if let Err(err) = cleanup_old_tasks(&worker, true) {
|
||||
task_warn!(worker, "could not completely cleanup old tasks: {err}");
|
||||
info!("cleaning up old task logs");
|
||||
if let Err(err) = cleanup_old_tasks(true) {
|
||||
warn!("could not completely cleanup old tasks: {err}");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
use anyhow::Error;
|
||||
use std::sync::Arc;
|
||||
|
||||
use proxmox_sys::task_log;
|
||||
use tracing::info;
|
||||
|
||||
use pbs_api_types::Authid;
|
||||
use pbs_datastore::DataStore;
|
||||
@ -28,9 +28,9 @@ pub fn do_garbage_collection_job(
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker, "starting garbage collection on store {store}");
|
||||
info!("starting garbage collection on store {store}");
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker, "task triggered by schedule '{event_str}'");
|
||||
info!("task triggered by schedule '{event_str}'");
|
||||
}
|
||||
|
||||
let result = datastore.garbage_collection(&*worker, worker.upid());
|
||||
|
@ -1,8 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Error;
|
||||
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use pbs_api_types::{
|
||||
print_store_and_ns, Authid, KeepOptions, Operation, PruneJobOptions, MAX_NAMESPACE_DEPTH,
|
||||
@ -16,7 +15,6 @@ use crate::backup::ListAccessibleBackupGroups;
|
||||
use crate::server::jobstate::Job;
|
||||
|
||||
pub fn prune_datastore(
|
||||
worker: Arc<WorkerTask>,
|
||||
auth_id: Authid,
|
||||
prune_options: PruneJobOptions,
|
||||
datastore: Arc<DataStore>,
|
||||
@ -31,19 +29,19 @@ pub fn prune_datastore(
|
||||
};
|
||||
let ns = prune_options.ns.clone().unwrap_or_default();
|
||||
let store_ns = print_store_and_ns(store, &ns);
|
||||
task_log!(worker, "Starting datastore prune on {store_ns}, {depth}");
|
||||
info!("Starting datastore prune on {store_ns}, {depth}");
|
||||
|
||||
if dry_run {
|
||||
task_log!(worker, "(dry test run)");
|
||||
info!("(dry test run)");
|
||||
}
|
||||
|
||||
let keep_all = !prune_options.keeps_something();
|
||||
|
||||
if keep_all {
|
||||
task_log!(worker, "No prune selection - keeping all files.");
|
||||
info!("No prune selection - keeping all files.");
|
||||
} else {
|
||||
let rendered_options = cli_prune_options_string(&prune_options);
|
||||
task_log!(worker, "retention options: {rendered_options}");
|
||||
info!("retention options: {rendered_options}");
|
||||
}
|
||||
|
||||
for group in ListAccessibleBackupGroups::new_with_privs(
|
||||
@ -61,8 +59,7 @@ pub fn prune_datastore(
|
||||
let mut prune_info = compute_prune_info(list, &prune_options.keep)?;
|
||||
prune_info.reverse(); // delete older snapshots first
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"Pruning group {ns}:\"{}/{}\"",
|
||||
group.backup_type(),
|
||||
group.backup_id()
|
||||
@ -70,11 +67,9 @@ pub fn prune_datastore(
|
||||
|
||||
for (info, mark) in prune_info {
|
||||
let keep = keep_all || mark.keep();
|
||||
task_log!(
|
||||
worker,
|
||||
"{}{} {}/{}/{}",
|
||||
info!(
|
||||
"{}{mark} {}/{}/{}",
|
||||
if dry_run { "would " } else { "" },
|
||||
mark,
|
||||
group.backup_type(),
|
||||
group.backup_id(),
|
||||
info.backup_dir.backup_time_string()
|
||||
@ -82,7 +77,7 @@ pub fn prune_datastore(
|
||||
if !keep && !dry_run {
|
||||
if let Err(err) = datastore.remove_backup_dir(ns, info.backup_dir.as_ref(), false) {
|
||||
let path = info.backup_dir.relative_path();
|
||||
task_warn!(worker, "failed to remove dir {path:?}: {err}");
|
||||
warn!("failed to remove dir {path:?}: {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -150,13 +145,13 @@ pub fn do_prune_job(
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker, "prune job '{}'", job.jobname());
|
||||
info!("prune job '{}'", job.jobname());
|
||||
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker, "task triggered by schedule '{event_str}'");
|
||||
info!("task triggered by schedule '{event_str}'");
|
||||
}
|
||||
|
||||
let result = prune_datastore(worker.clone(), auth_id, prune_options, datastore, false);
|
||||
let result = prune_datastore(auth_id, prune_options, datastore, false);
|
||||
|
||||
let status = worker.create_state(&result);
|
||||
|
||||
|
@ -10,10 +10,9 @@ use std::time::{Duration, SystemTime};
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use http::StatusCode;
|
||||
use proxmox_human_byte::HumanByte;
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
use proxmox_router::HttpError;
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
use serde_json::json;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use pbs_api_types::{
|
||||
print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupFilter,
|
||||
@ -122,7 +121,6 @@ trait PullSource: Send + Sync {
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
max_depth: &mut Option<usize>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupNamespace>, Error>;
|
||||
|
||||
/// Lists groups within a specific namespace from the source.
|
||||
@ -137,7 +135,6 @@ trait PullSource: Send + Sync {
|
||||
&self,
|
||||
namespace: &BackupNamespace,
|
||||
group: &BackupGroup,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupDir>, Error>;
|
||||
fn get_ns(&self) -> BackupNamespace;
|
||||
fn get_store(&self) -> &str;
|
||||
@ -155,7 +152,6 @@ impl PullSource for RemoteSource {
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
max_depth: &mut Option<usize>,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupNamespace>, Error> {
|
||||
if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) {
|
||||
return Ok(vec![self.ns.clone()]);
|
||||
@ -178,8 +174,8 @@ impl PullSource for RemoteSource {
|
||||
Some(HttpError { code, message }) => match code {
|
||||
&StatusCode::NOT_FOUND => {
|
||||
if self.ns.is_root() && max_depth.is_none() {
|
||||
task_warn!(worker, "Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
|
||||
task_warn!(worker, "Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
|
||||
warn!("Could not query remote for namespaces (404) -> temporarily switching to backwards-compat mode");
|
||||
warn!("Either make backwards-compat mode explicit (max-depth == 0) or upgrade remote system.");
|
||||
max_depth.replace(0);
|
||||
} else {
|
||||
bail!("Remote namespace set/recursive sync requested, but remote does not support namespaces.")
|
||||
@ -238,7 +234,6 @@ impl PullSource for RemoteSource {
|
||||
&self,
|
||||
namespace: &BackupNamespace,
|
||||
group: &BackupGroup,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupDir>, Error> {
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", self.repo.store());
|
||||
|
||||
@ -261,11 +256,7 @@ impl PullSource for RemoteSource {
|
||||
let snapshot = item.backup;
|
||||
// in-progress backups can't be synced
|
||||
if item.size.is_none() {
|
||||
task_log!(
|
||||
worker,
|
||||
"skipping snapshot {} - in-progress backup",
|
||||
snapshot
|
||||
);
|
||||
info!("skipping snapshot {snapshot} - in-progress backup");
|
||||
return None;
|
||||
}
|
||||
|
||||
@ -301,7 +292,6 @@ impl PullSource for LocalSource {
|
||||
async fn list_namespaces(
|
||||
&self,
|
||||
max_depth: &mut Option<usize>,
|
||||
_worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupNamespace>, Error> {
|
||||
ListNamespacesRecursive::new_max_depth(
|
||||
self.store.clone(),
|
||||
@ -333,7 +323,6 @@ impl PullSource for LocalSource {
|
||||
&self,
|
||||
namespace: &BackupNamespace,
|
||||
group: &BackupGroup,
|
||||
_worker: &WorkerTask,
|
||||
) -> Result<Vec<BackupDir>, Error> {
|
||||
Ok(self
|
||||
.store
|
||||
@ -381,19 +370,10 @@ trait PullReader: Send + Sync {
|
||||
/// Asynchronously loads a file from the source into a local file.
|
||||
/// `filename` is the name of the file to load from the source.
|
||||
/// `into` is the path of the local file to load the source file into.
|
||||
async fn load_file_into(
|
||||
&self,
|
||||
filename: &str,
|
||||
into: &Path,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Option<DataBlob>, Error>;
|
||||
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error>;
|
||||
|
||||
/// Tries to download the client log from the source and save it into a local file.
|
||||
async fn try_download_client_log(
|
||||
&self,
|
||||
to_path: &Path,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error>;
|
||||
async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error>;
|
||||
|
||||
fn skip_chunk_sync(&self, target_store_name: &str) -> bool;
|
||||
}
|
||||
@ -409,12 +389,7 @@ impl PullReader for RemoteReader {
|
||||
))
|
||||
}
|
||||
|
||||
async fn load_file_into(
|
||||
&self,
|
||||
filename: &str,
|
||||
into: &Path,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<Option<DataBlob>, Error> {
|
||||
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error> {
|
||||
let mut tmp_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
@ -426,8 +401,7 @@ impl PullReader for RemoteReader {
|
||||
match err.downcast_ref::<HttpError>() {
|
||||
Some(HttpError { code, message }) => match *code {
|
||||
StatusCode::NOT_FOUND => {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"skipping snapshot {} - vanished since start of sync",
|
||||
&self.dir,
|
||||
);
|
||||
@ -446,11 +420,7 @@ impl PullReader for RemoteReader {
|
||||
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
||||
}
|
||||
|
||||
async fn try_download_client_log(
|
||||
&self,
|
||||
to_path: &Path,
|
||||
worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
async fn try_download_client_log(&self, to_path: &Path) -> Result<(), Error> {
|
||||
let mut tmp_path = to_path.to_owned();
|
||||
tmp_path.set_extension("tmp");
|
||||
|
||||
@ -469,7 +439,7 @@ impl PullReader for RemoteReader {
|
||||
if let Err(err) = std::fs::rename(&tmp_path, to_path) {
|
||||
bail!("Atomic rename file {:?} failed - {}", to_path, err);
|
||||
}
|
||||
task_log!(worker, "got backup log file {:?}", CLIENT_LOG_BLOB_NAME);
|
||||
info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -490,12 +460,7 @@ impl PullReader for LocalReader {
|
||||
))
|
||||
}
|
||||
|
||||
async fn load_file_into(
|
||||
&self,
|
||||
filename: &str,
|
||||
into: &Path,
|
||||
_worker: &WorkerTask,
|
||||
) -> Result<Option<DataBlob>, Error> {
|
||||
async fn load_file_into(&self, filename: &str, into: &Path) -> Result<Option<DataBlob>, Error> {
|
||||
let mut tmp_file = std::fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
@ -509,11 +474,7 @@ impl PullReader for LocalReader {
|
||||
Ok(DataBlob::load_from_reader(&mut tmp_file).ok())
|
||||
}
|
||||
|
||||
async fn try_download_client_log(
|
||||
&self,
|
||||
_to_path: &Path,
|
||||
_worker: &WorkerTask,
|
||||
) -> Result<(), Error> {
|
||||
async fn try_download_client_log(&self, _to_path: &Path) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -603,7 +564,6 @@ impl PullParameters {
|
||||
}
|
||||
|
||||
async fn pull_index_chunks<I: IndexFile>(
|
||||
worker: &WorkerTask,
|
||||
chunk_reader: Arc<dyn AsyncReadChunk>,
|
||||
target: Arc<DataStore>,
|
||||
index: I,
|
||||
@ -658,10 +618,10 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
target.cond_touch_chunk(&info.digest, false)
|
||||
})?;
|
||||
if chunk_exists {
|
||||
//task_log!(worker, "chunk {} exists {}", pos, hex::encode(digest));
|
||||
//info!("chunk {} exists {}", pos, hex::encode(digest));
|
||||
return Ok::<_, Error>(());
|
||||
}
|
||||
//task_log!(worker, "sync {} chunk {}", pos, hex::encode(digest));
|
||||
//info!("sync {} chunk {}", pos, hex::encode(digest));
|
||||
let chunk = chunk_reader.read_raw_chunk(&info.digest).await?;
|
||||
let raw_size = chunk.raw_size() as usize;
|
||||
|
||||
@ -689,8 +649,7 @@ async fn pull_index_chunks<I: IndexFile>(
|
||||
let bytes = bytes.load(Ordering::SeqCst);
|
||||
let chunk_count = chunk_count.load(Ordering::SeqCst);
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"downloaded {} ({}/s)",
|
||||
HumanByte::from(bytes),
|
||||
HumanByte::new_binary(bytes as f64 / elapsed.as_secs_f64()),
|
||||
@ -730,7 +689,6 @@ fn verify_archive(info: &FileInfo, csum: &[u8; 32], size: u64) -> Result<(), Err
|
||||
/// - if archive is an index, pull referenced chunks
|
||||
/// - Rename tmp file into real path
|
||||
async fn pull_single_archive<'a>(
|
||||
worker: &'a WorkerTask,
|
||||
reader: Arc<dyn PullReader + 'a>,
|
||||
snapshot: &'a pbs_datastore::BackupDir,
|
||||
archive_info: &'a FileInfo,
|
||||
@ -745,11 +703,9 @@ async fn pull_single_archive<'a>(
|
||||
|
||||
let mut pull_stats = PullStats::default();
|
||||
|
||||
task_log!(worker, "sync archive {}", archive_name);
|
||||
info!("sync archive {archive_name}");
|
||||
|
||||
reader
|
||||
.load_file_into(archive_name, &tmp_path, worker)
|
||||
.await?;
|
||||
reader.load_file_into(archive_name, &tmp_path).await?;
|
||||
|
||||
let mut tmpfile = std::fs::OpenOptions::new().read(true).open(&tmp_path)?;
|
||||
|
||||
@ -762,10 +718,9 @@ async fn pull_single_archive<'a>(
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
||||
task_log!(worker, "skipping chunk sync for same datastore");
|
||||
info!("skipping chunk sync for same datastore");
|
||||
} else {
|
||||
let stats = pull_index_chunks(
|
||||
worker,
|
||||
reader.chunk_reader(archive_info.crypt_mode),
|
||||
snapshot.datastore().clone(),
|
||||
index,
|
||||
@ -783,10 +738,9 @@ async fn pull_single_archive<'a>(
|
||||
verify_archive(archive_info, &csum, size)?;
|
||||
|
||||
if reader.skip_chunk_sync(snapshot.datastore().name()) {
|
||||
task_log!(worker, "skipping chunk sync for same datastore");
|
||||
info!("skipping chunk sync for same datastore");
|
||||
} else {
|
||||
let stats = pull_index_chunks(
|
||||
worker,
|
||||
reader.chunk_reader(archive_info.crypt_mode),
|
||||
snapshot.datastore().clone(),
|
||||
index,
|
||||
@ -818,7 +772,6 @@ async fn pull_single_archive<'a>(
|
||||
/// -- if not, pull it from the remote
|
||||
/// - Download log if not already existing
|
||||
async fn pull_snapshot<'a>(
|
||||
worker: &'a WorkerTask,
|
||||
reader: Arc<dyn PullReader + 'a>,
|
||||
snapshot: &'a pbs_datastore::BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
@ -834,7 +787,7 @@ async fn pull_snapshot<'a>(
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
let tmp_manifest_blob;
|
||||
if let Some(data) = reader
|
||||
.load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name, worker)
|
||||
.load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name)
|
||||
.await?
|
||||
{
|
||||
tmp_manifest_blob = data;
|
||||
@ -857,11 +810,9 @@ async fn pull_snapshot<'a>(
|
||||
|
||||
if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() {
|
||||
if !client_log_name.exists() {
|
||||
reader
|
||||
.try_download_client_log(&client_log_name, worker)
|
||||
.await?;
|
||||
reader.try_download_client_log(&client_log_name).await?;
|
||||
};
|
||||
task_log!(worker, "no data changes");
|
||||
info!("no data changes");
|
||||
let _ = std::fs::remove_file(&tmp_manifest_name);
|
||||
return Ok(pull_stats); // nothing changed
|
||||
}
|
||||
@ -881,7 +832,7 @@ async fn pull_snapshot<'a>(
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
||||
info!("detected changed file {path:?} - {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -891,7 +842,7 @@ async fn pull_snapshot<'a>(
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
||||
info!("detected changed file {path:?} - {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -901,21 +852,15 @@ async fn pull_snapshot<'a>(
|
||||
match manifest.verify_file(&item.filename, &csum, size) {
|
||||
Ok(_) => continue,
|
||||
Err(err) => {
|
||||
task_log!(worker, "detected changed file {:?} - {}", path, err);
|
||||
info!("detected changed file {path:?} - {err}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let stats = pull_single_archive(
|
||||
worker,
|
||||
reader.clone(),
|
||||
snapshot,
|
||||
item,
|
||||
downloaded_chunks.clone(),
|
||||
)
|
||||
.await?;
|
||||
let stats =
|
||||
pull_single_archive(reader.clone(), snapshot, item, downloaded_chunks.clone()).await?;
|
||||
pull_stats.add(stats);
|
||||
}
|
||||
|
||||
@ -924,9 +869,7 @@ async fn pull_snapshot<'a>(
|
||||
}
|
||||
|
||||
if !client_log_name.exists() {
|
||||
reader
|
||||
.try_download_client_log(&client_log_name, worker)
|
||||
.await?;
|
||||
reader.try_download_client_log(&client_log_name).await?;
|
||||
};
|
||||
snapshot
|
||||
.cleanup_unreferenced_files(&manifest)
|
||||
@ -940,7 +883,6 @@ async fn pull_snapshot<'a>(
|
||||
/// The `reader` is configured to read from the source backup directory, while the
|
||||
/// `snapshot` is pointing to the local datastore and target namespace.
|
||||
async fn pull_snapshot_from<'a>(
|
||||
worker: &'a WorkerTask,
|
||||
reader: Arc<dyn PullReader + 'a>,
|
||||
snapshot: &'a pbs_datastore::BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
@ -950,27 +892,27 @@ async fn pull_snapshot_from<'a>(
|
||||
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
||||
|
||||
let pull_stats = if is_new {
|
||||
task_log!(worker, "sync snapshot {}", snapshot.dir());
|
||||
info!("sync snapshot {}", snapshot.dir());
|
||||
|
||||
match pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
|
||||
match pull_snapshot(reader, snapshot, downloaded_chunks).await {
|
||||
Err(err) => {
|
||||
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
||||
snapshot.backup_ns(),
|
||||
snapshot.as_ref(),
|
||||
true,
|
||||
) {
|
||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
||||
info!("cleanup error - {cleanup_err}");
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
Ok(pull_stats) => {
|
||||
task_log!(worker, "sync snapshot {} done", snapshot.dir());
|
||||
info!("sync snapshot {} done", snapshot.dir());
|
||||
pull_stats
|
||||
}
|
||||
}
|
||||
} else {
|
||||
task_log!(worker, "re-sync snapshot {}", snapshot.dir());
|
||||
pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?
|
||||
info!("re-sync snapshot {}", snapshot.dir());
|
||||
pull_snapshot(reader, snapshot, downloaded_chunks).await?
|
||||
};
|
||||
|
||||
Ok(pull_stats)
|
||||
@ -1073,7 +1015,6 @@ impl std::fmt::Display for SkipInfo {
|
||||
/// - remote snapshot access is checked by remote (twice: query and opening the backup reader)
|
||||
/// - local group owner is already checked by pull_store
|
||||
async fn pull_group(
|
||||
worker: &WorkerTask,
|
||||
params: &PullParameters,
|
||||
source_namespace: &BackupNamespace,
|
||||
group: &BackupGroup,
|
||||
@ -1084,7 +1025,7 @@ async fn pull_group(
|
||||
|
||||
let mut raw_list: Vec<BackupDir> = params
|
||||
.source
|
||||
.list_backup_dirs(source_namespace, group, worker)
|
||||
.list_backup_dirs(source_namespace, group)
|
||||
.await?;
|
||||
raw_list.sort_unstable_by(|a, b| a.time.cmp(&b.time));
|
||||
|
||||
@ -1113,7 +1054,7 @@ async fn pull_group(
|
||||
already_synced_skip_info.update(dir.time);
|
||||
return false;
|
||||
} else if already_synced_skip_info.count > 0 {
|
||||
task_log!(worker, "{}", already_synced_skip_info);
|
||||
info!("{already_synced_skip_info}");
|
||||
already_synced_skip_info.reset();
|
||||
return true;
|
||||
}
|
||||
@ -1122,7 +1063,7 @@ async fn pull_group(
|
||||
transfer_last_skip_info.update(dir.time);
|
||||
return false;
|
||||
} else if transfer_last_skip_info.count > 0 {
|
||||
task_log!(worker, "{}", transfer_last_skip_info);
|
||||
info!("{transfer_last_skip_info}");
|
||||
transfer_last_skip_info.reset();
|
||||
}
|
||||
true
|
||||
@ -1147,11 +1088,10 @@ async fn pull_group(
|
||||
.source
|
||||
.reader(source_namespace, &from_snapshot)
|
||||
.await?;
|
||||
let result =
|
||||
pull_snapshot_from(worker, reader, &to_snapshot, downloaded_chunks.clone()).await;
|
||||
let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await;
|
||||
|
||||
progress.done_snapshots = pos as u64 + 1;
|
||||
task_log!(worker, "percentage done: {}", progress);
|
||||
info!("percentage done: {progress}");
|
||||
|
||||
let stats = result?; // stop on error
|
||||
pull_stats.add(stats);
|
||||
@ -1169,14 +1109,13 @@ async fn pull_group(
|
||||
continue;
|
||||
}
|
||||
if snapshot.is_protected() {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"don't delete vanished snapshot {} (protected)",
|
||||
snapshot.dir()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
task_log!(worker, "delete vanished snapshot {}", snapshot.dir());
|
||||
info!("delete vanished snapshot {}", snapshot.dir());
|
||||
params
|
||||
.target
|
||||
.store
|
||||
@ -1235,7 +1174,6 @@ fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> R
|
||||
}
|
||||
|
||||
fn check_and_remove_vanished_ns(
|
||||
worker: &WorkerTask,
|
||||
params: &PullParameters,
|
||||
synced_ns: HashSet<BackupNamespace>,
|
||||
) -> Result<(bool, RemovedVanishedStats), Error> {
|
||||
@ -1276,16 +1214,12 @@ fn check_and_remove_vanished_ns(
|
||||
}
|
||||
match check_and_remove_ns(params, &local_ns) {
|
||||
Ok(true) => {
|
||||
task_log!(worker, "Removed namespace {local_ns}");
|
||||
info!("Removed namespace {local_ns}");
|
||||
removed_stats.namespaces += 1;
|
||||
}
|
||||
Ok(false) => task_log!(
|
||||
worker,
|
||||
"Did not remove namespace {} - protected snapshots remain",
|
||||
local_ns
|
||||
),
|
||||
Ok(false) => info!("Did not remove namespace {local_ns} - protected snapshots remain"),
|
||||
Err(err) => {
|
||||
task_log!(worker, "Failed to remove namespace {} - {}", local_ns, err);
|
||||
info!("Failed to remove namespace {local_ns} - {err}");
|
||||
errors = true;
|
||||
}
|
||||
}
|
||||
@ -1311,10 +1245,7 @@ fn check_and_remove_vanished_ns(
|
||||
/// - remote namespaces are filtered by remote
|
||||
/// - creation and removal of sub-NS checked here
|
||||
/// - access to sub-NS checked here
|
||||
pub(crate) async fn pull_store(
|
||||
worker: &WorkerTask,
|
||||
mut params: PullParameters,
|
||||
) -> Result<PullStats, Error> {
|
||||
pub(crate) async fn pull_store(mut params: PullParameters) -> Result<PullStats, Error> {
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
let _shared_store_lock = params.target.store.try_shared_chunk_store_lock()?;
|
||||
let mut errors = false;
|
||||
@ -1323,10 +1254,7 @@ pub(crate) async fn pull_store(
|
||||
let mut namespaces = if params.source.get_ns().is_root() && old_max_depth == Some(0) {
|
||||
vec![params.source.get_ns()] // backwards compat - don't query remote namespaces!
|
||||
} else {
|
||||
params
|
||||
.source
|
||||
.list_namespaces(&mut params.max_depth, worker)
|
||||
.await?
|
||||
params.source.list_namespaces(&mut params.max_depth).await?
|
||||
};
|
||||
|
||||
let ns_layers_to_be_pulled = namespaces
|
||||
@ -1358,33 +1286,22 @@ pub(crate) async fn pull_store(
|
||||
let target_ns = namespace.map_prefix(¶ms.source.get_ns(), ¶ms.target.ns)?;
|
||||
let target_store_ns_str = print_store_and_ns(params.target.store.name(), &target_ns);
|
||||
|
||||
task_log!(worker, "----");
|
||||
task_log!(
|
||||
worker,
|
||||
"Syncing {} into {}",
|
||||
source_store_ns_str,
|
||||
target_store_ns_str
|
||||
);
|
||||
info!("----");
|
||||
info!("Syncing {source_store_ns_str} into {target_store_ns_str}");
|
||||
|
||||
synced_ns.insert(target_ns.clone());
|
||||
|
||||
match check_and_create_ns(¶ms, &target_ns) {
|
||||
Ok(true) => task_log!(worker, "Created namespace {}", target_ns),
|
||||
Ok(true) => info!("Created namespace {target_ns}"),
|
||||
Ok(false) => {}
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
worker,
|
||||
"Cannot sync {} into {} - {}",
|
||||
source_store_ns_str,
|
||||
target_store_ns_str,
|
||||
err,
|
||||
);
|
||||
info!("Cannot sync {source_store_ns_str} into {target_store_ns_str} - {err}");
|
||||
errors = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
match pull_ns(worker, &namespace, &mut params).await {
|
||||
match pull_ns(&namespace, &mut params).await {
|
||||
Ok((ns_progress, ns_pull_stats, ns_errors)) => {
|
||||
errors |= ns_errors;
|
||||
|
||||
@ -1393,29 +1310,23 @@ pub(crate) async fn pull_store(
|
||||
if params.max_depth != Some(0) {
|
||||
groups += ns_progress.done_groups;
|
||||
snapshots += ns_progress.done_snapshots;
|
||||
task_log!(
|
||||
worker,
|
||||
"Finished syncing namespace {}, current progress: {} groups, {} snapshots",
|
||||
namespace,
|
||||
groups,
|
||||
snapshots,
|
||||
info!(
|
||||
"Finished syncing namespace {namespace}, current progress: {groups} groups, {snapshots} snapshots"
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
errors = true;
|
||||
task_log!(
|
||||
worker,
|
||||
"Encountered errors while syncing namespace {} - {}",
|
||||
info!(
|
||||
"Encountered errors while syncing namespace {} - {err}",
|
||||
&namespace,
|
||||
err,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if params.remove_vanished {
|
||||
let (has_errors, stats) = check_and_remove_vanished_ns(worker, ¶ms, synced_ns)?;
|
||||
let (has_errors, stats) = check_and_remove_vanished_ns(¶ms, synced_ns)?;
|
||||
errors |= has_errors;
|
||||
pull_stats.add(PullStats::from(stats));
|
||||
}
|
||||
@ -1440,7 +1351,6 @@ pub(crate) async fn pull_store(
|
||||
/// - remote namespaces are filtered by remote
|
||||
/// - owner check for vanished groups done here
|
||||
pub(crate) async fn pull_ns(
|
||||
worker: &WorkerTask,
|
||||
namespace: &BackupNamespace,
|
||||
params: &mut PullParameters,
|
||||
) -> Result<(StoreProgress, PullStats, bool), Error> {
|
||||
@ -1460,11 +1370,9 @@ pub(crate) async fn pull_ns(
|
||||
.into_iter()
|
||||
.filter(|group| group.apply_filters(¶ms.group_filter))
|
||||
.collect();
|
||||
task_log!(
|
||||
worker,
|
||||
"found {} groups to sync (out of {} total)",
|
||||
list.len(),
|
||||
unfiltered_count
|
||||
info!(
|
||||
"found {} groups to sync (out of {unfiltered_count} total)",
|
||||
list.len()
|
||||
);
|
||||
|
||||
let mut errors = false;
|
||||
@ -1492,15 +1400,10 @@ pub(crate) async fn pull_ns(
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
worker,
|
||||
"sync group {} failed - group lock failed: {}",
|
||||
&group,
|
||||
err
|
||||
);
|
||||
info!("sync group {} failed - group lock failed: {err}", &group);
|
||||
errors = true;
|
||||
// do not stop here, instead continue
|
||||
task_log!(worker, "create_locked_backup_group failed");
|
||||
info!("create_locked_backup_group failed");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@ -1508,19 +1411,16 @@ pub(crate) async fn pull_ns(
|
||||
// permission check
|
||||
if params.owner != owner {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
task_log!(
|
||||
worker,
|
||||
"sync group {} failed - owner check failed ({} != {})",
|
||||
&group,
|
||||
params.owner,
|
||||
owner
|
||||
info!(
|
||||
"sync group {} failed - owner check failed ({} != {owner})",
|
||||
&group, params.owner
|
||||
);
|
||||
errors = true; // do not stop here, instead continue
|
||||
} else {
|
||||
match pull_group(worker, params, namespace, &group, &mut progress).await {
|
||||
match pull_group(params, namespace, &group, &mut progress).await {
|
||||
Ok(stats) => pull_stats.add(stats),
|
||||
Err(err) => {
|
||||
task_log!(worker, "sync group {} failed - {}", &group, err,);
|
||||
info!("sync group {} failed - {err}", &group);
|
||||
errors = true; // do not stop here, instead continue
|
||||
}
|
||||
}
|
||||
@ -1542,7 +1442,7 @@ pub(crate) async fn pull_ns(
|
||||
if !local_group.apply_filters(¶ms.group_filter) {
|
||||
continue;
|
||||
}
|
||||
task_log!(worker, "delete vanished group '{local_group}'",);
|
||||
info!("delete vanished group '{local_group}'");
|
||||
let delete_stats_result = params
|
||||
.target
|
||||
.store
|
||||
@ -1551,10 +1451,7 @@ pub(crate) async fn pull_ns(
|
||||
match delete_stats_result {
|
||||
Ok(stats) => {
|
||||
if !stats.all_removed() {
|
||||
task_log!(
|
||||
worker,
|
||||
"kept some protected snapshots of group '{local_group}'",
|
||||
);
|
||||
info!("kept some protected snapshots of group '{local_group}'");
|
||||
pull_stats.add(PullStats::from(RemovedVanishedStats {
|
||||
snapshots: stats.removed_snapshots(),
|
||||
groups: 0,
|
||||
@ -1569,7 +1466,7 @@ pub(crate) async fn pull_ns(
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
task_log!(worker, "{}", err);
|
||||
info!("{err}");
|
||||
errors = true;
|
||||
}
|
||||
}
|
||||
@ -1577,7 +1474,7 @@ pub(crate) async fn pull_ns(
|
||||
Ok(())
|
||||
});
|
||||
if let Err(err) = result {
|
||||
task_log!(worker, "error during cleanup: {}", err);
|
||||
info!("error during cleanup: {err}");
|
||||
errors = true;
|
||||
};
|
||||
}
|
||||
|
@ -1,13 +1,14 @@
|
||||
use anyhow::{bail, format_err, Context, Error};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use pbs_config::{acl::AclTree, token_shadow, BackupLockGuard};
|
||||
use proxmox_lang::try_block;
|
||||
use proxmox_ldap::{Config, Connection, SearchParameters, SearchResult};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
use proxmox_schema::{ApiType, Schema};
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use std::collections::HashSet;
|
||||
|
||||
use pbs_api_types::{
|
||||
AdRealmConfig, ApiToken, Authid, LdapRealmConfig, Realm, RealmType, RemoveVanished,
|
||||
@ -39,7 +40,7 @@ pub fn do_realm_sync_job(
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string()).unwrap();
|
||||
|
||||
task_log!(worker, "starting realm sync for {}", realm.as_str());
|
||||
info!("starting realm sync for {}", realm.as_str());
|
||||
|
||||
let override_settings = GeneralSyncSettingsOverride {
|
||||
remove_vanished,
|
||||
@ -49,12 +50,12 @@ pub fn do_realm_sync_job(
|
||||
async move {
|
||||
match realm_type {
|
||||
RealmType::Ldap => {
|
||||
LdapRealmSyncJob::new(worker, realm, &override_settings, dry_run)?
|
||||
LdapRealmSyncJob::new(realm, &override_settings, dry_run)?
|
||||
.sync()
|
||||
.await
|
||||
}
|
||||
RealmType::Ad => {
|
||||
AdRealmSyncJob::new(worker, realm, &override_settings, dry_run)?
|
||||
AdRealmSyncJob::new(realm, &override_settings, dry_run)?
|
||||
.sync()
|
||||
.await
|
||||
}
|
||||
@ -73,7 +74,6 @@ struct AdRealmSyncJob(LdapRealmSyncJob);
|
||||
|
||||
impl AdRealmSyncJob {
|
||||
fn new(
|
||||
worker: Arc<WorkerTask>,
|
||||
realm: Realm,
|
||||
override_settings: &GeneralSyncSettingsOverride,
|
||||
dry_run: bool,
|
||||
@ -98,7 +98,6 @@ impl AdRealmSyncJob {
|
||||
let ldap_config = auth::AdAuthenticator::api_type_to_config(&config)?;
|
||||
|
||||
Ok(Self(LdapRealmSyncJob {
|
||||
worker,
|
||||
realm,
|
||||
general_sync_settings: sync_settings,
|
||||
ldap_sync_settings: sync_attributes,
|
||||
@ -114,7 +113,6 @@ impl AdRealmSyncJob {
|
||||
|
||||
/// Implementation for syncing LDAP realms
|
||||
struct LdapRealmSyncJob {
|
||||
worker: Arc<WorkerTask>,
|
||||
realm: Realm,
|
||||
general_sync_settings: GeneralSyncSettings,
|
||||
ldap_sync_settings: LdapSyncSettings,
|
||||
@ -125,7 +123,6 @@ struct LdapRealmSyncJob {
|
||||
impl LdapRealmSyncJob {
|
||||
/// Create new LdapRealmSyncJob
|
||||
fn new(
|
||||
worker: Arc<WorkerTask>,
|
||||
realm: Realm,
|
||||
override_settings: &GeneralSyncSettingsOverride,
|
||||
dry_run: bool,
|
||||
@ -150,7 +147,6 @@ impl LdapRealmSyncJob {
|
||||
let ldap_config = auth::LdapAuthenticator::api_type_to_config(&config)?;
|
||||
|
||||
Ok(Self {
|
||||
worker,
|
||||
realm,
|
||||
general_sync_settings: sync_settings,
|
||||
ldap_sync_settings: sync_attributes,
|
||||
@ -162,10 +158,7 @@ impl LdapRealmSyncJob {
|
||||
/// Perform realm synchronization
|
||||
async fn sync(&self) -> Result<(), Error> {
|
||||
if self.dry_run {
|
||||
task_log!(
|
||||
self.worker,
|
||||
"this is a DRY RUN - changes will not be persisted"
|
||||
);
|
||||
info!("this is a DRY RUN - changes will not be persisted");
|
||||
}
|
||||
|
||||
let ldap = Connection::new(self.ldap_config.clone());
|
||||
@ -247,7 +240,7 @@ impl LdapRealmSyncJob {
|
||||
anyhow::Ok(())
|
||||
});
|
||||
if let Err(e) = result {
|
||||
task_log!(self.worker, "could not create/update user: {e}");
|
||||
info!("could not create/update user: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
@ -266,18 +259,10 @@ impl LdapRealmSyncJob {
|
||||
|
||||
if let Some(existing_user) = existing_user {
|
||||
if existing_user != new_or_updated_user {
|
||||
task_log!(
|
||||
self.worker,
|
||||
"updating user {}",
|
||||
new_or_updated_user.userid.as_str()
|
||||
);
|
||||
info!("updating user {}", new_or_updated_user.userid.as_str());
|
||||
}
|
||||
} else {
|
||||
task_log!(
|
||||
self.worker,
|
||||
"creating user {}",
|
||||
new_or_updated_user.userid.as_str()
|
||||
);
|
||||
info!("creating user {}", new_or_updated_user.userid.as_str());
|
||||
}
|
||||
|
||||
user_config.set_data(
|
||||
@ -299,10 +284,7 @@ impl LdapRealmSyncJob {
|
||||
let schema = schema.unwrap_string_schema();
|
||||
|
||||
if let Err(e) = schema.check_constraints(value) {
|
||||
task_warn!(
|
||||
self.worker,
|
||||
"{userid}: ignoring attribute `{attribute}`: {e}"
|
||||
);
|
||||
warn!("{userid}: ignoring attribute `{attribute}`: {e}");
|
||||
|
||||
None
|
||||
} else {
|
||||
@ -381,7 +363,7 @@ impl LdapRealmSyncJob {
|
||||
to_delete: &[Userid],
|
||||
) -> Result<(), Error> {
|
||||
for userid in to_delete {
|
||||
task_log!(self.worker, "deleting user {}", userid.as_str());
|
||||
info!("deleting user {}", userid.as_str());
|
||||
|
||||
// Delete the user
|
||||
user_config.sections.remove(userid.as_str());
|
||||
@ -408,7 +390,7 @@ impl LdapRealmSyncJob {
|
||||
|
||||
if !self.dry_run {
|
||||
if let Err(e) = token_shadow::delete_secret(&tokenid) {
|
||||
task_warn!(self.worker, "could not delete token for user {userid}: {e}",)
|
||||
warn!("could not delete token for user {userid}: {e}",)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
use anyhow::{format_err, Error};
|
||||
use tracing::{error, info};
|
||||
|
||||
use pbs_api_types::{Authid, Operation, VerificationJobConfig};
|
||||
use pbs_datastore::DataStore;
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
use proxmox_sys::task_log;
|
||||
|
||||
use crate::{
|
||||
backup::{verify_all_backups, verify_filter},
|
||||
@ -34,9 +34,9 @@ pub fn do_verification_job(
|
||||
move |worker| {
|
||||
job.start(&worker.upid().to_string())?;
|
||||
|
||||
task_log!(worker, "Starting datastore verify job '{}'", job_id);
|
||||
info!("Starting datastore verify job '{job_id}'");
|
||||
if let Some(event_str) = schedule {
|
||||
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
||||
info!("task triggered by schedule '{event_str}'");
|
||||
}
|
||||
|
||||
let ns = match verification_job.ns {
|
||||
@ -58,9 +58,9 @@ pub fn do_verification_job(
|
||||
let job_result = match result {
|
||||
Ok(ref failed_dirs) if failed_dirs.is_empty() => Ok(()),
|
||||
Ok(ref failed_dirs) => {
|
||||
task_log!(worker, "Failed to verify the following snapshots/groups:");
|
||||
error!("Failed to verify the following snapshots/groups:");
|
||||
for dir in failed_dirs {
|
||||
task_log!(worker, "\t{}", dir);
|
||||
error!("\t{dir}");
|
||||
}
|
||||
|
||||
Err(format_err!(
|
||||
|
@ -12,6 +12,7 @@ use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
use tracing::info;
|
||||
|
||||
use proxmox_sys::fs::{
|
||||
atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
|
||||
@ -19,7 +20,7 @@ use proxmox_sys::fs::{
|
||||
|
||||
use proxmox_io::ReadExt;
|
||||
use proxmox_section_config::SectionConfigData;
|
||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive};
|
||||
@ -381,11 +382,9 @@ pub fn request_and_load_media(
|
||||
) -> Result<(Box<dyn TapeDriver>, MediaId), Error> {
|
||||
let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| {
|
||||
if let Ok((Some(media_id), _)) = handle.read_label() {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"found media label {} ({})",
|
||||
media_id.label.label_text,
|
||||
media_id.label.uuid,
|
||||
media_id.label.label_text, media_id.label.uuid,
|
||||
);
|
||||
|
||||
if media_id.label.uuid == *uuid {
|
||||
@ -423,19 +422,15 @@ pub fn request_and_load_media(
|
||||
let update_and_log_request_error =
|
||||
|old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> {
|
||||
if new != *old {
|
||||
task_log!(worker, "{}", new);
|
||||
info!("{new}");
|
||||
let (device_type, device) = if let Some(changer) = changer {
|
||||
("changer", changer.as_str())
|
||||
} else {
|
||||
("drive", drive)
|
||||
};
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
"Please insert media '{}' into {} '{}'",
|
||||
label_text,
|
||||
device_type,
|
||||
device
|
||||
info!(
|
||||
"Please insert media '{label_text}' into {device_type} '{device}'"
|
||||
);
|
||||
send_load_media_notification(
|
||||
notification_mode,
|
||||
@ -460,19 +455,9 @@ pub fn request_and_load_media(
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
} else if drive_config.changer.is_none() {
|
||||
task_log!(
|
||||
worker,
|
||||
"Checking for media '{}' in drive '{}'",
|
||||
label_text,
|
||||
drive
|
||||
);
|
||||
info!("Checking for media '{label_text}' in drive '{drive}'");
|
||||
} else {
|
||||
task_log!(
|
||||
worker,
|
||||
"trying to load media '{}' into drive '{}'",
|
||||
label_text,
|
||||
drive
|
||||
);
|
||||
info!("trying to load media '{label_text}' into drive '{drive}'");
|
||||
}
|
||||
|
||||
if drive_config.changer.is_some() {
|
||||
@ -499,8 +484,7 @@ pub fn request_and_load_media(
|
||||
|
||||
let request_error = match handle.read_label() {
|
||||
Ok((Some(media_id), _)) if media_id.label.uuid == label.uuid => {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"found media label {} ({})",
|
||||
media_id.label.label_text,
|
||||
media_id.label.uuid.to_string(),
|
||||
|
@ -11,8 +11,8 @@ use std::sync::{Arc, Mutex};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use proxmox_sys::{task_log, task_warn};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use pbs_datastore::{DataStore, SnapshotReader};
|
||||
@ -62,7 +62,6 @@ impl PoolWriter {
|
||||
pub fn new(
|
||||
mut pool: MediaPool,
|
||||
drive_name: &str,
|
||||
worker: &WorkerTask,
|
||||
notification_mode: TapeNotificationMode,
|
||||
force_media_set: bool,
|
||||
ns_magic: bool,
|
||||
@ -71,11 +70,11 @@ impl PoolWriter {
|
||||
|
||||
let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
|
||||
if let Some(reason) = new_media_set_reason {
|
||||
task_log!(worker, "starting new media set - reason: {}", reason,);
|
||||
info!("starting new media set - reason: {reason}");
|
||||
}
|
||||
|
||||
let media_set_uuid = pool.current_media_set().uuid();
|
||||
task_log!(worker, "media set uuid: {}", media_set_uuid);
|
||||
info!("media set uuid: {media_set_uuid}");
|
||||
|
||||
let mut catalog_set = CatalogSet::new();
|
||||
|
||||
@ -130,7 +129,7 @@ impl PoolWriter {
|
||||
}
|
||||
|
||||
/// Eject media and drop PoolWriterState (close drive)
|
||||
pub fn eject_media(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
pub fn eject_media(&mut self) -> Result<(), Error> {
|
||||
let mut status = match self.status.take() {
|
||||
Some(status) => status,
|
||||
None => return Ok(()), // no media loaded
|
||||
@ -139,13 +138,13 @@ impl PoolWriter {
|
||||
let (drive_config, _digest) = pbs_config::drive::config()?;
|
||||
|
||||
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
||||
task_log!(worker, "eject media");
|
||||
info!("eject media");
|
||||
status.drive.eject_media()?; // rewind and eject early, so that unload_media is faster
|
||||
drop(status); // close drive
|
||||
task_log!(worker, "unload media");
|
||||
info!("unload media");
|
||||
changer.unload_media(None)?; //eject and unload
|
||||
} else {
|
||||
task_log!(worker, "standalone drive - ejecting media");
|
||||
info!("standalone drive - ejecting media");
|
||||
status.drive.eject_media()?;
|
||||
}
|
||||
|
||||
@ -153,14 +152,14 @@ impl PoolWriter {
|
||||
}
|
||||
|
||||
/// Export current media set and drop PoolWriterState (close drive)
|
||||
pub fn export_media_set(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
pub fn export_media_set(&mut self) -> Result<(), Error> {
|
||||
let mut status = self.status.take();
|
||||
|
||||
let (drive_config, _digest) = pbs_config::drive::config()?;
|
||||
|
||||
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
|
||||
if let Some(ref mut status) = status {
|
||||
task_log!(worker, "rewind media");
|
||||
info!("rewind media");
|
||||
// rewind first so that the unload command later does not run into a timeout
|
||||
status.drive.rewind()?;
|
||||
}
|
||||
@ -170,25 +169,15 @@ impl PoolWriter {
|
||||
let media = self.pool.lookup_media(media_uuid)?;
|
||||
let label_text = media.label_text();
|
||||
if let Some(slot) = changer.export_media(label_text)? {
|
||||
task_log!(
|
||||
worker,
|
||||
"exported media '{}' to import/export slot {}",
|
||||
label_text,
|
||||
slot
|
||||
);
|
||||
info!("exported media '{label_text}' to import/export slot {slot}");
|
||||
} else {
|
||||
task_warn!(
|
||||
worker,
|
||||
"export failed - media '{}' is not online or in different drive",
|
||||
label_text
|
||||
warn!(
|
||||
"export failed - media '{label_text}' is not online or in different drive"
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if let Some(mut status) = status {
|
||||
task_log!(
|
||||
worker,
|
||||
"standalone drive - ejecting media instead of export"
|
||||
);
|
||||
info!("standalone drive - ejecting media instead of export");
|
||||
status.drive.eject_media()?;
|
||||
}
|
||||
|
||||
@ -238,11 +227,7 @@ impl PoolWriter {
|
||||
return Ok(media_uuid);
|
||||
}
|
||||
|
||||
task_log!(
|
||||
worker,
|
||||
"allocated new writable media '{}'",
|
||||
media.label_text()
|
||||
);
|
||||
info!("allocated new writable media '{}'", media.label_text());
|
||||
|
||||
if let Some(PoolWriterState { mut drive, .. }) = self.status.take() {
|
||||
if let Some(uuid) = &last_media_uuid {
|
||||
@ -252,7 +237,7 @@ impl PoolWriter {
|
||||
.set_media_bytes_used(uuid, Some(stats.total_used_native_capacity))?;
|
||||
}
|
||||
|
||||
task_log!(worker, "eject current media");
|
||||
info!("eject current media");
|
||||
drive.eject_media()?;
|
||||
}
|
||||
}
|
||||
@ -270,7 +255,7 @@ impl PoolWriter {
|
||||
// test for critical tape alert flags
|
||||
if let Ok(alert_flags) = drive.tape_alert_flags() {
|
||||
if !alert_flags.is_empty() {
|
||||
task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
|
||||
info!("TapeAlertFlags: {alert_flags:?}");
|
||||
if tape_alert_flags_critical(alert_flags) {
|
||||
self.pool.set_media_status_damaged(&media_uuid)?;
|
||||
bail!(
|
||||
@ -281,20 +266,15 @@ impl PoolWriter {
|
||||
}
|
||||
}
|
||||
|
||||
let (catalog, is_new_media) = update_media_set_label(
|
||||
worker,
|
||||
drive.as_mut(),
|
||||
old_media_id.media_set_label,
|
||||
media.id(),
|
||||
)?;
|
||||
let (catalog, is_new_media) =
|
||||
update_media_set_label(drive.as_mut(), old_media_id.media_set_label, media.id())?;
|
||||
|
||||
self.catalog_set.lock().unwrap().append_catalog(catalog)?;
|
||||
|
||||
let media_set = media.media_set_label().unwrap();
|
||||
|
||||
if is_new_media && media_set.seq_nr >= MEDIA_SET_SEQ_NR_WARN_LIMIT {
|
||||
task_warn!(
|
||||
worker,
|
||||
warn!(
|
||||
"large media-set detected ({}), consider using a different allocation policy",
|
||||
media_set.seq_nr
|
||||
);
|
||||
@ -311,7 +291,7 @@ impl PoolWriter {
|
||||
|
||||
if is_new_media {
|
||||
// add catalogs from previous media
|
||||
self.append_media_set_catalogs(worker)?;
|
||||
self.append_media_set_catalogs()?;
|
||||
}
|
||||
|
||||
self.used_tapes.insert(media_uuid.clone());
|
||||
@ -331,12 +311,12 @@ impl PoolWriter {
|
||||
// Check it tape is loaded, then move to EOM (if not already there)
|
||||
//
|
||||
// Returns the tape position at EOM.
|
||||
fn prepare_tape_write(status: &mut PoolWriterState, worker: &WorkerTask) -> Result<u64, Error> {
|
||||
fn prepare_tape_write(status: &mut PoolWriterState) -> Result<u64, Error> {
|
||||
if !status.at_eom {
|
||||
task_log!(worker, "moving to end of media");
|
||||
info!("moving to end of media");
|
||||
status.drive.move_to_eom(true)?;
|
||||
status.at_eom = true;
|
||||
task_log!(worker, "arrived at end of media");
|
||||
info!("arrived at end of media");
|
||||
}
|
||||
|
||||
let current_file_number = status.drive.current_file_number()?;
|
||||
@ -357,7 +337,7 @@ impl PoolWriter {
|
||||
/// on the media (return value 'Ok(false, _)'). In that case, the
|
||||
/// archive is marked incomplete. The caller should mark the media
|
||||
/// as full and try again using another media.
|
||||
pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result<bool, Error> {
|
||||
pub fn append_catalog_archive(&mut self) -> Result<bool, Error> {
|
||||
let catalog_magic = self.catalog_version();
|
||||
|
||||
let status = match self.status {
|
||||
@ -365,7 +345,7 @@ impl PoolWriter {
|
||||
None => bail!("PoolWriter - no media loaded"),
|
||||
};
|
||||
|
||||
Self::prepare_tape_write(status, worker)?;
|
||||
Self::prepare_tape_write(status)?;
|
||||
|
||||
let catalog_set = self.catalog_set.lock().unwrap();
|
||||
|
||||
@ -408,7 +388,7 @@ impl PoolWriter {
|
||||
}
|
||||
|
||||
// Append catalogs for all previous media in set (without last)
|
||||
fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> {
|
||||
fn append_media_set_catalogs(&mut self) -> Result<(), Error> {
|
||||
let media_set = self.pool.current_media_set();
|
||||
|
||||
let mut media_list = media_set.media_list();
|
||||
@ -424,7 +404,7 @@ impl PoolWriter {
|
||||
None => bail!("PoolWriter - no media loaded"),
|
||||
};
|
||||
|
||||
Self::prepare_tape_write(status, worker)?;
|
||||
Self::prepare_tape_write(status)?;
|
||||
|
||||
for (seq_nr, uuid) in media_list.iter().enumerate() {
|
||||
let uuid = match uuid {
|
||||
@ -436,7 +416,7 @@ impl PoolWriter {
|
||||
|
||||
let mut file = Self::open_catalog_file(uuid)?;
|
||||
|
||||
task_log!(worker, "write catalog for previous media: {}", uuid);
|
||||
info!("write catalog for previous media: {uuid}");
|
||||
|
||||
if tape_write_catalog(
|
||||
writer.as_mut(),
|
||||
@ -467,7 +447,6 @@ impl PoolWriter {
|
||||
/// media.
|
||||
pub fn append_snapshot_archive(
|
||||
&mut self,
|
||||
worker: &WorkerTask,
|
||||
snapshot_reader: &SnapshotReader,
|
||||
) -> Result<(bool, usize), Error> {
|
||||
let status = match self.status {
|
||||
@ -475,7 +454,7 @@ impl PoolWriter {
|
||||
None => bail!("PoolWriter - no media loaded"),
|
||||
};
|
||||
|
||||
let current_file_number = Self::prepare_tape_write(status, worker)?;
|
||||
let current_file_number = Self::prepare_tape_write(status)?;
|
||||
|
||||
let (done, bytes_written) = {
|
||||
let mut writer: Box<dyn TapeWrite> = status.drive.write_file()?;
|
||||
@ -521,7 +500,7 @@ impl PoolWriter {
|
||||
None => bail!("PoolWriter - no media loaded"),
|
||||
};
|
||||
|
||||
let current_file_number = Self::prepare_tape_write(status, worker)?;
|
||||
let current_file_number = Self::prepare_tape_write(status)?;
|
||||
|
||||
let writer = status.drive.write_file()?;
|
||||
|
||||
@ -533,8 +512,7 @@ impl PoolWriter {
|
||||
status.bytes_written_after_sync += bytes_written;
|
||||
|
||||
let elapsed = start_time.elapsed()?.as_secs_f64();
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"wrote {} chunks ({:.2} MB at {:.2} MB/s)",
|
||||
saved_chunks.len(),
|
||||
bytes_written as f64 / 1_000_000.0,
|
||||
@ -614,7 +592,7 @@ fn write_chunk_archive<'a>(
|
||||
}
|
||||
|
||||
if writer.bytes_written() > max_size {
|
||||
//task_log!(worker, "Chunk Archive max size reached, closing archive");
|
||||
//info!("Chunk Archive max size reached, closing archive");
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -628,7 +606,6 @@ fn write_chunk_archive<'a>(
|
||||
// set label does not match the expected media set, overwrite the
|
||||
// media set label.
|
||||
fn update_media_set_label(
|
||||
worker: &WorkerTask,
|
||||
drive: &mut dyn TapeDriver,
|
||||
old_set: Option<MediaSetLabel>,
|
||||
media_id: &MediaId,
|
||||
@ -657,7 +634,7 @@ fn update_media_set_label(
|
||||
|
||||
let new_media = match old_set {
|
||||
None => {
|
||||
task_log!(worker, "writing new media set label");
|
||||
info!("writing new media set label");
|
||||
drive.write_media_set_label(new_set, key_config.as_ref())?;
|
||||
media_catalog = MediaCatalog::overwrite(TAPE_STATUS_DIR, media_id, false)?;
|
||||
true
|
||||
@ -681,8 +658,7 @@ fn update_media_set_label(
|
||||
|
||||
false
|
||||
} else {
|
||||
task_log!(
|
||||
worker,
|
||||
info!(
|
||||
"writing new media set label (overwrite '{}/{}')",
|
||||
media_set_label.uuid.to_string(),
|
||||
media_set_label.seq_nr,
|
||||
|
@ -16,14 +16,13 @@ use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use proxmox_lang::error::io_err_other;
|
||||
use proxmox_lang::{io_bail, io_format_err};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
use proxmox_schema::api;
|
||||
use proxmox_sys::linux::procfs::{mountinfo::Device, MountInfo};
|
||||
use proxmox_sys::task_log;
|
||||
|
||||
use pbs_api_types::{BLOCKDEVICE_DISK_AND_PARTITION_NAME_REGEX, BLOCKDEVICE_NAME_REGEX};
|
||||
|
||||
mod zfs;
|
||||
use tracing::info;
|
||||
pub use zfs::*;
|
||||
mod zpool_status;
|
||||
pub use zpool_status::*;
|
||||
@ -1116,7 +1115,7 @@ pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Erro
|
||||
|
||||
/// Wipes all labels and the first 200 MiB of a disk/partition (or the whole if it is smaller).
|
||||
/// If called with a partition, also sets the partition type to 0x83 'Linux filesystem'.
|
||||
pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error> {
|
||||
pub fn wipe_blockdev(disk: &Disk) -> Result<(), Error> {
|
||||
let disk_path = match disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", disk.syspath()),
|
||||
@ -1137,13 +1136,13 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error>
|
||||
|
||||
to_wipe.push(disk_path.to_path_buf());
|
||||
|
||||
task_log!(worker, "Wiping block device {}", disk_path.display());
|
||||
info!("Wiping block device {}", disk_path.display());
|
||||
|
||||
let mut wipefs_command = std::process::Command::new("wipefs");
|
||||
wipefs_command.arg("--all").args(&to_wipe);
|
||||
|
||||
let wipefs_output = proxmox_sys::command::run_command(wipefs_command, None)?;
|
||||
task_log!(worker, "wipefs output: {}", wipefs_output);
|
||||
info!("wipefs output: {wipefs_output}");
|
||||
|
||||
let size = disk.size().map(|size| size / 1024 / 1024)?;
|
||||
let count = size.min(200);
|
||||
@ -1163,21 +1162,17 @@ pub fn wipe_blockdev(disk: &Disk, worker: Arc<WorkerTask>) -> Result<(), Error>
|
||||
dd_command.args(args);
|
||||
|
||||
let dd_output = proxmox_sys::command::run_command(dd_command, None)?;
|
||||
task_log!(worker, "dd output: {}", dd_output);
|
||||
info!("dd output: {dd_output}");
|
||||
|
||||
if is_partition {
|
||||
// set the partition type to 0x83 'Linux filesystem'
|
||||
change_parttype(disk, "8300", worker)?;
|
||||
change_parttype(disk, "8300")?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn change_parttype(
|
||||
part_disk: &Disk,
|
||||
part_type: &str,
|
||||
worker: Arc<WorkerTask>,
|
||||
) -> Result<(), Error> {
|
||||
pub fn change_parttype(part_disk: &Disk, part_type: &str) -> Result<(), Error> {
|
||||
let part_path = match part_disk.device_path() {
|
||||
Some(path) => path,
|
||||
None => bail!("disk {:?} has no node in /dev", part_disk.syspath()),
|
||||
@ -1199,7 +1194,7 @@ pub fn change_parttype(
|
||||
};
|
||||
sgdisk_command.arg(part_disk_parent_path);
|
||||
let sgdisk_output = proxmox_sys::command::run_command(sgdisk_command, None)?;
|
||||
task_log!(worker, "sgdisk output: {}", sgdisk_output);
|
||||
info!("sgdisk output: {sgdisk_output}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -7,24 +7,25 @@ extern crate tokio;
|
||||
|
||||
use proxmox_lang::try_block;
|
||||
use proxmox_sys::fs::CreateOptions;
|
||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||
use proxmox_sys::WorkerTaskContext;
|
||||
|
||||
use pbs_api_types::{Authid, UPID};
|
||||
|
||||
use proxmox_rest_server::{CommandSocket, WorkerTask};
|
||||
use tracing::info;
|
||||
|
||||
fn garbage_collection(worker: &WorkerTask) -> Result<(), Error> {
|
||||
task_log!(worker, "start garbage collection");
|
||||
info!("start garbage collection");
|
||||
|
||||
for i in 0..50 {
|
||||
worker.check_abort()?;
|
||||
|
||||
task_log!(worker, "progress {}", i);
|
||||
info!("progress {i}");
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
}
|
||||
|
||||
task_log!(worker, "end garbage collection");
|
||||
info!("end garbage collection");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user