api: make prune-group a real workertask
`prune-group` is currently not a real workertask, ie it behaves like one but doesn't start a thread nor a task to do its work. Changed it to start a tokio-task, so that we can delete snapshots asynchronously. The `dry-run` feature still behaves in the same way and returns early. This paves the way for the new logging infra (which uses `task_local` to define a logger) and improves performance of bigger backup-groups. Signed-off-by: Gabriel Goller <g.goller@proxmox.com>
This commit is contained in:
parent
b757c61621
commit
432de66a36
@ -945,6 +945,12 @@ pub fn verify(
|
|||||||
type: BackupNamespace,
|
type: BackupNamespace,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"use-task": {
|
||||||
|
type: bool,
|
||||||
|
default: false,
|
||||||
|
optional: true,
|
||||||
|
description: "Spins up an asynchronous task that does the work.",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
|
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
|
||||||
@ -961,7 +967,7 @@ pub fn prune(
|
|||||||
keep_options: KeepOptions,
|
keep_options: KeepOptions,
|
||||||
store: String,
|
store: String,
|
||||||
ns: Option<BackupNamespace>,
|
ns: Option<BackupNamespace>,
|
||||||
_param: Value,
|
param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
@ -979,7 +985,20 @@ pub fn prune(
|
|||||||
let worker_id = format!("{}:{}:{}", store, ns, group);
|
let worker_id = format!("{}:{}:{}", store, ns, group);
|
||||||
let group = datastore.backup_group(ns.clone(), group);
|
let group = datastore.backup_group(ns.clone(), group);
|
||||||
|
|
||||||
let mut prune_result = Vec::new();
|
#[derive(Debug, serde::Serialize)]
|
||||||
|
struct PruneResult {
|
||||||
|
#[serde(rename = "backup-type")]
|
||||||
|
backup_type: BackupType,
|
||||||
|
#[serde(rename = "backup-id")]
|
||||||
|
backup_id: String,
|
||||||
|
#[serde(rename = "backup-time")]
|
||||||
|
backup_time: i64,
|
||||||
|
keep: bool,
|
||||||
|
protected: bool,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
ns: Option<BackupNamespace>,
|
||||||
|
}
|
||||||
|
let mut prune_result: Vec<PruneResult> = Vec::new();
|
||||||
|
|
||||||
let list = group.list_backups()?;
|
let list = group.list_backups()?;
|
||||||
|
|
||||||
@ -992,78 +1011,97 @@ pub fn prune(
|
|||||||
if dry_run {
|
if dry_run {
|
||||||
for (info, mark) in prune_info {
|
for (info, mark) in prune_info {
|
||||||
let keep = keep_all || mark.keep();
|
let keep = keep_all || mark.keep();
|
||||||
|
let backup_dir = &info.backup_dir;
|
||||||
|
|
||||||
let mut result = json!({
|
let mut result = PruneResult {
|
||||||
"backup-type": info.backup_dir.backup_type(),
|
backup_type: backup_dir.backup_type(),
|
||||||
"backup-id": info.backup_dir.backup_id(),
|
backup_id: backup_dir.backup_id().to_owned(),
|
||||||
"backup-time": info.backup_dir.backup_time(),
|
backup_time: backup_dir.backup_time(),
|
||||||
"keep": keep,
|
keep,
|
||||||
"protected": mark.protected(),
|
protected: mark.protected(),
|
||||||
});
|
ns: None,
|
||||||
let prune_ns = info.backup_dir.backup_ns();
|
};
|
||||||
|
let prune_ns = backup_dir.backup_ns();
|
||||||
if !prune_ns.is_root() {
|
if !prune_ns.is_root() {
|
||||||
result["ns"] = serde_json::to_value(prune_ns)?;
|
result.ns = Some(prune_ns.to_owned());
|
||||||
}
|
}
|
||||||
prune_result.push(result);
|
prune_result.push(result);
|
||||||
}
|
}
|
||||||
return Ok(json!(prune_result));
|
return Ok(json!(prune_result));
|
||||||
}
|
}
|
||||||
|
|
||||||
// We use a WorkerTask just to have a task log, but run synchrounously
|
let prune_group = move |worker: Arc<WorkerTask>| {
|
||||||
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
|
if keep_all {
|
||||||
|
task_log!(worker, "No prune selection - keeping all files.");
|
||||||
|
} else {
|
||||||
|
let mut opts = Vec::new();
|
||||||
|
if !ns.is_root() {
|
||||||
|
opts.push(format!("--ns {ns}"));
|
||||||
|
}
|
||||||
|
crate::server::cli_keep_options(&mut opts, &keep_options);
|
||||||
|
|
||||||
if keep_all {
|
task_log!(worker, "retention options: {}", opts.join(" "));
|
||||||
task_log!(worker, "No prune selection - keeping all files.");
|
task_log!(
|
||||||
} else {
|
worker,
|
||||||
let mut opts = Vec::new();
|
"Starting prune on {} group \"{}\"",
|
||||||
if !ns.is_root() {
|
print_store_and_ns(&store, &ns),
|
||||||
opts.push(format!("--ns {ns}"));
|
group.group(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
crate::server::cli_keep_options(&mut opts, &keep_options);
|
|
||||||
|
|
||||||
task_log!(worker, "retention options: {}", opts.join(" "));
|
for (info, mark) in prune_info {
|
||||||
task_log!(
|
let keep = keep_all || mark.keep();
|
||||||
worker,
|
let backup_dir = &info.backup_dir;
|
||||||
"Starting prune on {} group \"{}\"",
|
|
||||||
print_store_and_ns(&store, &ns),
|
|
||||||
group.group(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (info, mark) in prune_info {
|
let backup_time = backup_dir.backup_time();
|
||||||
let keep = keep_all || mark.keep();
|
let timestamp = backup_dir.backup_time_string();
|
||||||
|
let group: &pbs_api_types::BackupGroup = backup_dir.as_ref();
|
||||||
|
|
||||||
let backup_time = info.backup_dir.backup_time();
|
let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
|
||||||
let timestamp = info.backup_dir.backup_time_string();
|
|
||||||
let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
|
|
||||||
|
|
||||||
let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
|
task_log!(worker, "{msg}");
|
||||||
|
|
||||||
task_log!(worker, "{}", msg);
|
prune_result.push(PruneResult {
|
||||||
|
backup_type: group.ty,
|
||||||
|
backup_id: group.id.clone(),
|
||||||
|
backup_time,
|
||||||
|
keep,
|
||||||
|
protected: mark.protected(),
|
||||||
|
ns: None,
|
||||||
|
});
|
||||||
|
|
||||||
prune_result.push(json!({
|
if !keep {
|
||||||
"backup-type": group.ty,
|
if let Err(err) = backup_dir.destroy(false) {
|
||||||
"backup-id": group.id,
|
task_warn!(
|
||||||
"backup-time": backup_time,
|
worker,
|
||||||
"keep": keep,
|
"failed to remove dir {:?}: {}",
|
||||||
"protected": mark.protected(),
|
backup_dir.relative_path(),
|
||||||
}));
|
err,
|
||||||
|
);
|
||||||
if !(dry_run || keep) {
|
}
|
||||||
if let Err(err) = info.backup_dir.destroy(false) {
|
|
||||||
task_warn!(
|
|
||||||
worker,
|
|
||||||
"failed to remove dir {:?}: {}",
|
|
||||||
info.backup_dir.relative_path(),
|
|
||||||
err,
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
prune_result
|
||||||
|
};
|
||||||
|
|
||||||
|
if param["use-task"].as_bool().unwrap_or(false) {
|
||||||
|
let upid = WorkerTask::spawn(
|
||||||
|
"prune",
|
||||||
|
Some(worker_id),
|
||||||
|
auth_id.to_string(),
|
||||||
|
true,
|
||||||
|
move |worker| async move {
|
||||||
|
let _ = prune_group(worker.clone());
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(json!(upid))
|
||||||
|
} else {
|
||||||
|
let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
|
||||||
|
let result = prune_group(worker.clone());
|
||||||
|
worker.log_result(&Ok(()));
|
||||||
|
Ok(json!(result))
|
||||||
}
|
}
|
||||||
|
|
||||||
worker.log_result(&Ok(()));
|
|
||||||
|
|
||||||
Ok(json!(prune_result))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -52,6 +52,7 @@ Ext.define('PBS.Datastore.PruneInputPanel', {
|
|||||||
if (me.ns && me.ns !== '') {
|
if (me.ns && me.ns !== '') {
|
||||||
values.ns = me.ns;
|
values.ns = me.ns;
|
||||||
}
|
}
|
||||||
|
values["use-task"] = true;
|
||||||
return values;
|
return values;
|
||||||
},
|
},
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user