GC: flatten existing status into job status

to avoid drifting definitions and reduce duplication. with the next major
release, the 'upid' field could then be renamed and aliased to be in line with
the other jobs, which all use 'last-run-upid'. doing it now would break
existing callers of the GC status endpoint (or consumers of the on-disk status
file).

the main difference is that the GC status fields are now not optional (except
for the UPID) in the job status, since flattening an optional value is not
possible. this only affects datastores that were never GCed at all, and only
direct API consumers, since the UI handles those fields correctly.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2024-04-22 11:02:57 +02:00
parent b7fce90b12
commit 3ae21d87c1
4 changed files with 31 additions and 58 deletions

View File

@ -1280,7 +1280,7 @@ pub struct TypeCounts {
}, },
}, },
)] )]
#[derive(Clone, Default, Serialize, Deserialize, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// Garbage collection status. /// Garbage collection status.
pub struct GarbageCollectionStatus { pub struct GarbageCollectionStatus {
@ -1309,11 +1309,10 @@ pub struct GarbageCollectionStatus {
#[api( #[api(
properties: { properties: {
"last-run-upid": { "status": {
optional: true, type: GarbageCollectionStatus,
type: UPID,
},
}, },
}
)] )]
#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@ -1321,21 +1320,8 @@ pub struct GarbageCollectionStatus {
pub struct GarbageCollectionJobStatus { pub struct GarbageCollectionJobStatus {
/// Datastore /// Datastore
pub store: String, pub store: String,
/// upid of the last run gc job #[serde(flatten)]
#[serde(skip_serializing_if = "Option::is_none")] pub status: GarbageCollectionStatus,
pub last_run_upid: Option<String>,
/// Sum of removed bytes.
#[serde(skip_serializing_if = "Option::is_none")]
pub removed_bytes: Option<u64>,
/// Number of removed chunks
#[serde(skip_serializing_if = "Option::is_none")]
pub removed_chunks: Option<usize>,
/// Sum of pending bytes
#[serde(skip_serializing_if = "Option::is_none")]
pub pending_bytes: Option<u64>,
/// Number of pending chunks
#[serde(skip_serializing_if = "Option::is_none")]
pub pending_chunks: Option<usize>,
/// Schedule of the gc job /// Schedule of the gc job
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub schedule: Option<String>, pub schedule: Option<String>,

View File

@ -35,13 +35,13 @@ use pxar::EntryKind;
use pbs_api_types::{ use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
GarbageCollectionJobStatus, GarbageCollectionStatus, GroupListItem, JobScheduleStatus, GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation,
KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::pxar::{create_tar, create_zip}; use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -1273,35 +1273,15 @@ pub fn garbage_collection_job_status(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let status_in_memory = datastore.last_gc_status(); let status_in_memory = datastore.last_gc_status();
let state_file = JobState::load("garbage_collection", &store) let state_file = JobState::load("garbage_collection", &store)
.map_err(|err| { .map_err(|err| log::error!("could not open GC statefile for {store}: {err}"))
log::error!(
"could not open statefile for {:?}: {}",
info.last_run_upid,
err
)
})
.ok(); .ok();
let mut selected_upid = None; match status_in_memory.upid {
if status_in_memory.upid.is_some() { Some(ref upid) => {
selected_upid = status_in_memory.upid;
} else if let Some(JobState::Finished { upid, .. }) = &state_file {
selected_upid = Some(upid.to_owned());
}
info.last_run_upid = selected_upid.clone();
match selected_upid {
Some(upid) => {
info.removed_bytes = Some(status_in_memory.removed_bytes);
info.removed_chunks = Some(status_in_memory.removed_chunks);
info.pending_bytes = Some(status_in_memory.pending_bytes);
info.pending_chunks = Some(status_in_memory.pending_chunks);
let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default(); let mut computed_schedule: JobScheduleStatus = JobScheduleStatus::default();
let mut duration = None; let mut duration = None;
if let Some(state) = state_file { if let Some(state) = state_file {
if let Ok(cs) = compute_schedule_status(&state, info.last_run_upid.as_deref()) { if let Ok(cs) = compute_schedule_status(&state, Some(&upid)) {
computed_schedule = cs; computed_schedule = cs;
} }
} }
@ -1327,6 +1307,7 @@ pub fn garbage_collection_job_status(
} }
} }
info.status = status_in_memory;
info.next_run = computed_schedule.next_run; info.next_run = computed_schedule.next_run;
info.last_run_endtime = computed_schedule.last_run_endtime; info.last_run_endtime = computed_schedule.last_run_endtime;
info.last_run_state = computed_schedule.last_run_state; info.last_run_state = computed_schedule.last_run_state;

View File

@ -200,7 +200,13 @@ Ext.define('PBS.Utils', {
}, },
render_task_status: function(value, metadata, record, rowIndex, colIndex, store) { render_task_status: function(value, metadata, record, rowIndex, colIndex, store) {
if (!record.data['last-run-upid'] && !store.getById('last-run-upid')?.data.value) { // GC tasks use 'upid' for backwards-compat, rest use 'last-run-upid'
if (
!record.data['last-run-upid'] &&
!store.getById('last-run-upid')?.data.value &&
!record.data.upid &&
!store.getById('upid')?.data.value
) {
return '-'; return '-';
} }

View File

@ -1,14 +1,14 @@
Ext.define('pbs-gc-jobs-status', { Ext.define('pbs-gc-jobs-status', {
extend: 'Ext.data.Model', extend: 'Ext.data.Model',
fields: [ fields: [
'store', 'last-run-upid', 'removed-bytes', 'pending-bytes', 'schedule', 'store', 'upid', 'removed-bytes', 'pending-bytes', 'schedule',
'next-run', 'last-run-endtime', 'last-run-state', 'next-run', 'last-run-endtime', 'last-run-state',
{ {
name: 'duration', name: 'duration',
calculate: function(data) { calculate: function(data) {
let endtime = data['last-run-endtime']; let endtime = data['last-run-endtime'];
if (!endtime) return undefined; if (!endtime) return undefined;
let task = Proxmox.Utils.parse_task_upid(data['last-run-upid']); let task = Proxmox.Utils.parse_task_upid(data['upid']);
return endtime - task.starttime; return endtime - task.starttime;
}, },
}, },
@ -97,7 +97,7 @@ Ext.define('PBS.config.GCJobView', {
showTaskLog: function() { showTaskLog: function() {
let me = this; let me = this;
let upid = this.getData()['last-run-upid']; let upid = this.getData().upid;
if (!upid) return; if (!upid) return;
Ext.create('Proxmox.window.TaskViewer', { upid }).show(); Ext.create('Proxmox.window.TaskViewer', { upid }).show();
@ -147,7 +147,7 @@ Ext.define('PBS.config.GCJobView', {
xtype: 'proxmoxButton', xtype: 'proxmoxButton',
text: gettext('Show Log'), text: gettext('Show Log'),
handler: 'showTaskLog', handler: 'showTaskLog',
enableFn: (rec) => !!rec.data["last-run-upid"], enableFn: (rec) => !!rec.data.upid,
disabled: true, disabled: true,
}, },
{ {
@ -214,7 +214,7 @@ Ext.define('PBS.config.GCJobView', {
{ {
header: gettext('Removed Data'), header: gettext('Removed Data'),
dataIndex: 'removed-bytes', dataIndex: 'removed-bytes',
renderer: (value) => value !== undefined renderer: (value, meta, record) => record.data.upid !== null
? Proxmox.Utils.format_size(value, true) : "-", ? Proxmox.Utils.format_size(value, true) : "-",
sortable: false, sortable: false,
minWidth: 85, minWidth: 85,
@ -223,7 +223,7 @@ Ext.define('PBS.config.GCJobView', {
{ {
header: gettext('Pending Data'), header: gettext('Pending Data'),
dataIndex: 'pending-bytes', dataIndex: 'pending-bytes',
renderer: (value) => value !== undefined renderer: (value, meta, record) => record.data.upid !== null
? Proxmox.Utils.format_size(value, true) : "-", ? Proxmox.Utils.format_size(value, true) : "-",
sortable: false, sortable: false,
minWidth: 80, minWidth: 80,