5
0
mirror of git://git.proxmox.com/git/proxmox-backup.git synced 2025-01-07 17:18:03 +03:00

more clippy fixes and annotations

the remaining ones are:
- type complexity
- fns with many arguments
- new() without default()
- false positives for redundant closures (where closure returns a static
  value)
- expected vs actual length check without match/cmp

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-07-26 13:36:14 +02:00
parent 3be9106f1c
commit e1db06705e
52 changed files with 188 additions and 225 deletions

View File

@ -1190,7 +1190,7 @@ pub struct TypeCounts {
},
},
)]
#[derive(Clone, Serialize, Deserialize)]
#[derive(Clone, Default, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Garbage collection status.
pub struct GarbageCollectionStatus {
@ -1217,24 +1217,6 @@ pub struct GarbageCollectionStatus {
pub still_bad: usize,
}
impl Default for GarbageCollectionStatus {
fn default() -> Self {
GarbageCollectionStatus {
upid: None,
index_file_count: 0,
index_data_bytes: 0,
disk_bytes: 0,
disk_chunks: 0,
removed_bytes: 0,
removed_chunks: 0,
pending_bytes: 0,
pending_chunks: 0,
removed_bad: 0,
still_bad: 0,
}
}
}
#[api(
properties: {
"gc-status": {

View File

@ -542,7 +542,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
let mut components = file.entry().path().components();
components.next_back(); // discard last
@ -586,7 +586,7 @@ where
let entry = root
.lookup(&path)
.await?
.ok_or(format_err!("error looking up '{:?}'", path))?;
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
let realfile = accessor.follow_hardlink(&entry).await?;
let metadata = realfile.entry().metadata();
let realpath = Path::new(link);
@ -705,7 +705,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
let prefix = {
let mut components = file.entry().path().components();
@ -753,7 +753,7 @@ where
let entry = root
.lookup(&path)
.await?
.ok_or(format_err!("error looking up '{:?}'", path))?;
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
let realfile = accessor.follow_hardlink(&entry).await?;
let metadata = realfile.entry().metadata();
log::debug!("adding '{}' to zip", path.display());
@ -841,7 +841,7 @@ where
let file = root
.lookup(&path)
.await?
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
recurse_files_extractor(&mut extractor, file).await
}
@ -917,8 +917,8 @@ fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
Ok((file_name_os, file_name))
}
async fn recurse_files_extractor<'a, T>(
extractor: &'a mut Extractor,
async fn recurse_files_extractor<T>(
extractor: &mut Extractor,
file: FileEntry<T>,
) -> Result<(), Error>
where
@ -960,8 +960,8 @@ where
Ok(())
}
async fn seq_files_extractor<'a, T>(
extractor: &'a mut Extractor,
async fn seq_files_extractor<T>(
extractor: &mut Extractor,
mut decoder: pxar::decoder::aio::Decoder<T>,
) -> Result<(), Error>
where

View File

@ -134,10 +134,12 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
_ => ("0".to_string(), String::new()),
};
let owner_string = format!("{}/{}", meta.stat.uid, meta.stat.gid);
format!(
"{} {:<13} {} {:>8} {:?}{}",
mode_string,
format!("{}/{}", meta.stat.uid, meta.stat.gid),
owner_string,
format_mtime(&meta.stat.mtime),
size,
entry.path(),

View File

@ -619,7 +619,7 @@ impl BackupInfo {
})
}
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
pub fn sort_list(list: &mut [BackupInfo], ascendending: bool) {
if ascendending {
// oldest first
list.sort_unstable_by(|a, b| a.backup_dir.dir.time.cmp(&b.backup_dir.dir.time));

View File

@ -1181,7 +1181,7 @@ impl DataStore {
/// stat'ed are placed at the end of the list
pub fn get_chunks_in_order<F, A>(
&self,
index: &Box<dyn IndexFile + Send>,
index: &(dyn IndexFile + Send),
skip_chunk: F,
check_abort: A,
) -> Result<Vec<(usize, u64)>, Error>

View File

@ -209,7 +209,7 @@ impl Iterator for ListGroups {
group_type,
) {
Ok(ty) => self.id_state = Some(ty),
Err(err) => return Some(Err(err.into())),
Err(err) => return Some(Err(err)),
}
}
}

View File

@ -146,7 +146,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> {
Some(Operation::Read),
)?;
let order =
datastore.get_chunks_in_order(&index, &self.skip_fn, |_| Ok(()))?;
datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
self.current_index = Some((Arc::new(index), 0, order));
} else {

View File

@ -262,8 +262,8 @@ pub fn lookup_device<'a>(devices: &'a [TapeDeviceInfo], path: &str) -> Option<&'
}
/// Lookup optional drive identification attributes
pub fn lookup_device_identification<'a>(
devices: &'a [TapeDeviceInfo],
pub fn lookup_device_identification(
devices: &[TapeDeviceInfo],
path: &str,
) -> OptionalDeviceIdentification {
if let Some(info) = lookup_device(devices, path) {

View File

@ -27,11 +27,10 @@ pub fn initialize_element_status<F: AsRawFd>(file: &mut F) -> Result<(), Error>
// like mtx(1), set a very long timeout (30 minutes)
sg_raw.set_timeout(30 * 60);
let mut cmd = Vec::new();
cmd.extend(&[0x07, 0, 0, 0, 0, 0]); // INITIALIZE ELEMENT STATUS (07h)
let cmd = &[0x07, 0, 0, 0, 0, 0]; // INITIALIZE ELEMENT STATUS (07h)
sg_raw
.do_command(&cmd)
.do_command(cmd)
.map_err(|err| format_err!("initializte element status (07h) failed - {}", err))?;
Ok(())
@ -123,15 +122,16 @@ fn read_element_address_assignment<F: AsRawFd>(
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.push(0x1A); // MODE SENSE6 (1Ah)
cmd.push(0x08); // DBD=1 (The Disable Block Descriptors)
cmd.push(0x1D); // Element Address Assignment Page
cmd.push(0);
cmd.push(allocation_len); // allocation len
cmd.push(0); //control
let cmd = &[
0x1A, // MODE SENSE6 (1Ah)
0x08, // DBD=1 (The Disable Block Descriptors)
0x1D, // Element Address Assignment Page
0,
allocation_len, // allocation len
0, //control
];
let data = execute_scsi_command(&mut sg_raw, &cmd, "read element address assignment", true)?;
let data = execute_scsi_command(&mut sg_raw, cmd, "read element address assignment", true)?;
proxmox_lang::try_block!({
let mut reader = &data[..];
@ -146,6 +146,7 @@ fn read_element_address_assignment<F: AsRawFd>(
.map_err(|err: Error| format_err!("decode element address assignment page failed - {}", err))
}
#[allow(clippy::vec_init_then_push)]
fn scsi_move_medium_cdb(
medium_transport_address: u16,
source_element_address: u16,
@ -276,6 +277,7 @@ impl ElementType {
}
}
#[allow(clippy::vec_init_then_push)]
fn scsi_read_element_status_cdb(
start_element_address: u16,
number_of_elements: u16,

View File

@ -175,6 +175,7 @@ impl SgTape {
/// of data. After the command is successfully completed, the
/// drive is positioned immediately before End Of Data (not End Of
/// Tape).
#[allow(clippy::vec_init_then_push)]
pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
@ -273,6 +274,7 @@ impl SgTape {
Ok(())
}
#[allow(clippy::unusual_byte_groupings)]
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
if position == 0 {
return self.rewind();
@ -534,8 +536,7 @@ impl SgTape {
})?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.push(0x10);
let mut cmd = vec![0x10];
if immediate {
cmd.push(1); // IMMED=1
} else {
@ -668,16 +669,17 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 0).unwrap(); // cannot fail with size 0
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.push(0x08); // READ
cmd.push(0x02); // VARIABLE SIZED BLOCKS, SILI=1
//cmd.push(0x00); // VARIABLE SIZED BLOCKS, SILI=0
cmd.push(((transfer_len >> 16) & 0xff) as u8);
cmd.push(((transfer_len >> 8) & 0xff) as u8);
cmd.push((transfer_len & 0xff) as u8);
cmd.push(0); // control byte
let cmd = &[
0x08, // READ
0x02, // VARIABLE SIZED BLOCKS, SILI=1
//0x00, // VARIABLE SIZED BLOCKS, SILI=0
((transfer_len >> 16) & 0xff) as u8,
((transfer_len >> 8) & 0xff) as u8,
(transfer_len & 0xff) as u8,
0, // control byte
];
let data = match sg_raw.do_in_command(&cmd, buffer) {
let data = match sg_raw.do_in_command(cmd, buffer) {
Ok(data) => data,
Err(ScsiError::Sense(SenseInfo {
sense_key: 0,
@ -734,6 +736,7 @@ impl SgTape {
}
/// Set important drive options
#[allow(clippy::vec_init_then_push)]
pub fn set_drive_options(
&mut self,
compression: Option<bool>,

View File

@ -73,6 +73,7 @@ struct SspSetDataEncryptionPage {
/* key follows */
}
#[allow(clippy::vec_init_then_push)]
fn sg_spout_set_encryption<F: AsRawFd>(
file: &mut F,
algorythm_index: u8,
@ -129,6 +130,7 @@ fn sg_spout_set_encryption<F: AsRawFd>(
}
// Warning: this blocks and fails if there is no media loaded
#[allow(clippy::vec_init_then_push)]
fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
let allocation_len: u32 = 8192 + 4;
@ -157,6 +159,7 @@ fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, E
}
// Warning: this blocks and fails if there is no media loaded
#[allow(clippy::vec_init_then_push)]
fn sg_spin_data_encryption_caps<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
let allocation_len: u32 = 8192 + 4;

View File

@ -24,6 +24,7 @@ struct MamAttributeHeader {
len: u16,
}
#[allow(clippy::upper_case_acronyms)]
enum MamFormat {
BINARY,
ASCII,

View File

@ -79,6 +79,7 @@ pub fn read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<TapeAlertFlags,
decode_tape_alert_flags(&data)
}
#[allow(clippy::vec_init_then_push)]
fn sg_read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
let mut sg_raw = SgRaw::new(file, 512)?;

View File

@ -22,6 +22,7 @@ pub fn read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Lp17VolumeStat
decode_volume_statistics(&data)
}
#[allow(clippy::vec_init_then_push)]
fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
let alloc_len: u16 = 8192;
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;

View File

@ -236,10 +236,12 @@ pub struct ModeParameterHeader {
}
impl ModeParameterHeader {
#[allow(clippy::unusual_byte_groupings)]
pub fn buffer_mode(&self) -> u8 {
(self.flags3 & 0b0111_0000) >> 4
(self.flags3 & 0b0_111_0000) >> 4
}
#[allow(clippy::unusual_byte_groupings)]
pub fn set_buffer_mode(&mut self, buffer_mode: bool) {
let mut mode = self.flags3 & 0b1_000_1111;
if buffer_mode {
@ -248,8 +250,9 @@ impl ModeParameterHeader {
self.flags3 = mode;
}
#[allow(clippy::unusual_byte_groupings)]
pub fn write_protect(&self) -> bool {
(self.flags3 & 0b1000_0000) != 0
(self.flags3 & 0b1_000_0000) != 0
}
}
@ -380,13 +383,11 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
///
/// The file must be a handle to a SCSI device.
pub fn new(file: &'a mut F, buffer_size: usize) -> Result<Self, Error> {
let buffer;
if buffer_size > 0 {
buffer = alloc_page_aligned_buffer(buffer_size)?;
let buffer = if buffer_size > 0 {
alloc_page_aligned_buffer(buffer_size)?
} else {
buffer = Box::new([]);
}
Box::new([])
};
let sense_buffer = [0u8; 32];
@ -683,8 +684,7 @@ pub fn scsi_mode_sense<F: AsRawFd, P: Endian>(
let allocation_len: u16 = 4096;
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
let mut cmd = Vec::new();
cmd.push(0x5A); // MODE SENSE(10)
let mut cmd = vec![0x5A]; // MODE SENSE(10)
if disable_block_descriptor {
cmd.push(8); // DBD=1 (Disable Block Descriptors)
} else {

View File

@ -702,7 +702,7 @@ async fn create_backup(
let backup_id = param["backup-id"]
.as_str()
.unwrap_or(proxmox_sys::nodename());
.unwrap_or_else(|| proxmox_sys::nodename());
let backup_ns = optional_ns_param(&param)?;

View File

@ -59,7 +59,7 @@ impl ServerAdapter for StaticAuthAdapter {
> {
Box::pin(async move {
match headers.get(hyper::header::AUTHORIZATION) {
Some(header) if header.to_str().unwrap_or("") == &self.ticket => {
Some(header) if header.to_str().unwrap_or("") == self.ticket => {
let user_info: Box<dyn UserInformation + Send + Sync> =
Box::new(SimpleUserInformation {});
Ok((String::from("root@pam"), user_info))

View File

@ -82,6 +82,7 @@ struct LVMBucketData {
/// more subdirectories
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
#[derive(Clone)]
#[allow(clippy::upper_case_acronyms)]
enum Bucket {
Partition(PartitionBucketData),
RawFs(PartitionBucketData),
@ -91,7 +92,7 @@ enum Bucket {
impl Bucket {
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
haystack: &'a mut Vec<Bucket>,
haystack: &'a mut [Bucket],
ty: A,
comp: &[B],
) -> Option<&'a mut Bucket> {

View File

@ -676,7 +676,7 @@ const fn retry() -> Retry {
impl Retry {
fn tick(&mut self) -> Result<(), Error> {
if self.0 >= 3 {
Err(Error::Client(format!("kept getting a badNonce error!")))
Err(Error::Client("kept getting a badNonce error!".to_string()))
} else {
self.0 += 1;
Ok(())

View File

@ -257,7 +257,7 @@ fn openid_auth_url(
let url = open_id.authorize_url(PROXMOX_BACKUP_RUN_DIR_M!(), &realm)?;
Ok(url.into())
Ok(url)
}
#[sortable]

View File

@ -100,7 +100,7 @@ fn check_privs_and_load_store(
if limited {
let owner = datastore.get_owner(ns, backup_group)?;
check_backup_owner(&owner, &auth_id)?;
check_backup_owner(&owner, auth_id)?;
}
Ok(datastore)
@ -778,6 +778,7 @@ pub async fn status(
///
/// This function can verify a single backup snapshot, all backup from a backup group,
/// or all backups in the datastore.
#[allow(clippy::too_many_arguments)]
pub fn verify(
store: String,
ns: Option<BackupNamespace>,
@ -1287,7 +1288,7 @@ pub fn download_file(
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
store,
&backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
@ -1301,7 +1302,7 @@ pub fn download_file(
println!(
"Download {} from {} ({}/{})",
file_name,
print_store_and_ns(&store, &backup_ns),
print_store_and_ns(store, &backup_ns),
backup_dir,
file_name
);
@ -1372,7 +1373,7 @@ pub fn download_file_decoded(
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
store,
&backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
@ -1394,7 +1395,7 @@ pub fn download_file_decoded(
println!(
"Download {} from {} ({}/{})",
file_name,
print_store_and_ns(&store, &backup_ns),
print_store_and_ns(store, &backup_ns),
backup_dir_api,
file_name
);
@ -1403,7 +1404,7 @@ pub fn download_file_decoded(
path.push(backup_dir.relative_path());
path.push(&file_name);
let extension = file_name.rsplitn(2, '.').next().unwrap();
let (_, extension) = file_name.rsplit_once('.').unwrap();
let body = match extension {
"didx" => {
@ -1503,7 +1504,7 @@ pub fn upload_backup_log(
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
store,
&backup_ns,
&auth_id,
0,
@ -1524,7 +1525,7 @@ pub fn upload_backup_log(
println!(
"Upload backup log to {} {backup_dir_api}/{file_name}",
print_store_and_ns(&store, &backup_ns),
print_store_and_ns(store, &backup_ns),
);
let data = req_body
@ -1667,7 +1668,7 @@ pub fn pxar_file_download(
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store,
store,
&ns,
&auth_id,
PRIV_DATASTORE_READ,

View File

@ -255,7 +255,7 @@ impl BackupEnvironment {
pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option<u32> {
let state = self.state.lock().unwrap();
state.known_chunks.get(digest).map(|len| *len)
state.known_chunks.get(digest).copied()
}
/// Store the writer with an unique ID

View File

@ -246,9 +246,9 @@ pub async fn do_register_account<'a>(
rsa_bits: Option<u32>,
) -> Result<&'a Account, Error> {
let contact = account_contact_from_string(&contact);
Ok(client
client
.new_account(name, agree_to_tos, contact, rsa_bits)
.await?)
.await
}
#[api(

View File

@ -481,11 +481,7 @@ pub async fn scan_remote_groups(
let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
let args = if let Some(ns) = namespace {
Some(json!({ "ns": ns }))
} else {
None
};
let args = namespace.map(|ns| json!({ "ns": ns }));
let api_res = client
.get(&format!("api2/json/admin/datastore/{}/groups", store), args)

View File

@ -90,6 +90,7 @@ pub fn list_keys(
},
)]
/// Change the encryption key's password (and password hint).
#[allow(clippy::too_many_arguments)]
pub fn change_passphrase(
kdf: Option<Kdf>,
password: Option<String>,

View File

@ -397,7 +397,7 @@ async fn order_certificate(
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
if let Err(err) = acme.finalize(finalize, &csr.data).await {
if finalize_error_cnt >= 5 {
return Err(err.into());
return Err(err);
}
finalize_error_cnt += 1;

View File

@ -88,7 +88,7 @@ pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
let name = data
.Where
.strip_prefix(BASE_MOUNT_DIR)
.unwrap_or_else(|| &data.Where)
.unwrap_or(&data.Where)
.to_string();
list.push(DatastoreMountInfo {

View File

@ -59,6 +59,7 @@ use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT};
},
)]
/// Read syslog entries.
#[allow(clippy::too_many_arguments)]
fn get_journal(
since: Option<i64>,
until: Option<i64>,

View File

@ -41,9 +41,9 @@ pub fn create_value_from_rrd(
let mut t = start;
for index in 0..data.len() {
let entry = timemap.entry(t).or_insert(json!({ "time": t }));
if let Some(value) = data[index] {
for value in data {
let entry = timemap.entry(t).or_insert_with(|| json!({ "time": t }));
if let Some(value) = value {
entry[*name] = value.into();
}
t += reso;

View File

@ -144,7 +144,7 @@ fn get_syslog(
) -> Result<Value, Error> {
let service = param["service"]
.as_str()
.map(|service| crate::api2::node::services::real_service_name(service));
.map(crate::api2::node::services::real_service_name);
let (count, lines) = dump_journal(
param["start"].as_u64(),

View File

@ -216,6 +216,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
},
)]
/// Sync store from other repository
#[allow(clippy::too_many_arguments)]
async fn pull(
store: String,
ns: Option<BackupNamespace>,

View File

@ -49,7 +49,7 @@ pub async fn datastore_status(
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
if !allowed {
if let Ok(datastore) = DataStore::lookup_datastore(&store, Some(Operation::Lookup)) {
if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) {
if can_access_any_namespace(datastore, &auth_id, &user_info) {
list.push(DataStoreStatusListItem::empty(store, None));
}
@ -57,7 +57,7 @@ pub async fn datastore_status(
continue;
}
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Read)) {
let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) {
Ok(datastore) => datastore,
Err(err) => {
list.push(DataStoreStatusListItem::empty(store, Some(err.to_string())));
@ -127,7 +127,7 @@ pub async fn datastore_status(
list.push(entry);
}
Ok(list.into())
Ok(list)
}
const SUBDIRS: SubdirMap = &[(

View File

@ -214,30 +214,28 @@ pub async fn list_media(
let inventory = Inventory::load(status_path)?;
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]);
if (privs & PRIV_TAPE_AUDIT) != 0 {
if pool.is_none() {
for media_id in inventory.list_unassigned_media() {
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
if (privs & PRIV_TAPE_AUDIT) != 0 && pool.is_none() {
for media_id in inventory.list_unassigned_media() {
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
if status == MediaStatus::Unknown {
status = MediaStatus::Writable;
}
list.push(MediaListEntry {
uuid: media_id.label.uuid.clone(),
ctime: media_id.label.ctime,
label_text: media_id.label.label_text.to_string(),
location,
status,
catalog: true, // empty, so we do not need a catalog
expired: false,
media_set_uuid: None,
media_set_name: None,
media_set_ctime: None,
seq_nr: None,
pool: None,
});
if status == MediaStatus::Unknown {
status = MediaStatus::Writable;
}
list.push(MediaListEntry {
uuid: media_id.label.uuid.clone(),
ctime: media_id.label.ctime,
label_text: media_id.label.label_text.to_string(),
location,
status,
catalog: true, // empty, so we do not need a catalog
expired: false,
media_set_uuid: None,
media_set_name: None,
media_set_ctime: None,
seq_nr: None,
pool: None,
});
}
}

View File

@ -316,6 +316,7 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
},
)]
/// Restore data from media-set. Namespaces will be automatically created if necessary.
#[allow(clippy::too_many_arguments)]
pub fn restore(
store: String,
drive: String,
@ -631,7 +632,7 @@ fn restore_list_worker(
let mut restorable = Vec::new();
// restore source namespaces
for (store, snapshot) in catalog.list_snapshots() {
let (ns, dir) = match parse_ns_and_snapshot(&snapshot) {
let (ns, dir) = match parse_ns_and_snapshot(snapshot) {
Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir),
Err(err) => {
task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}");
@ -1194,7 +1195,6 @@ fn restore_partial_chunk_archive<'a>(
let verify_and_write_channel = writer_pool.channel();
while let Some((digest, blob)) = decoder.next_chunk()? {
worker.check_abort()?;
if chunk_list.remove(&digest) {
@ -1878,13 +1878,10 @@ pub fn fast_catalog_restore(
let catalog_uuid = &archive_header.uuid;
let wanted = media_set
.media_list()
.iter()
.any(|e| match e {
None => false,
Some(uuid) => uuid == catalog_uuid,
});
let wanted = media_set.media_list().iter().any(|e| match e {
None => false,
Some(uuid) => uuid == catalog_uuid,
});
if !wanted {
task_log!(

View File

@ -17,6 +17,7 @@ pub trait ProxmoxAuthenticator {
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
}
#[allow(clippy::upper_case_acronyms)]
struct PAM();
impl ProxmoxAuthenticator for PAM {
@ -70,6 +71,7 @@ impl ProxmoxAuthenticator for PAM {
}
}
#[allow(clippy::upper_case_acronyms)]
struct PBS();
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");

View File

@ -85,7 +85,7 @@ pub fn can_access_any_namespace(
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
let name = store.name();
iter.any(|ns| -> bool {
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
let user_privs = user_info.lookup_privs(auth_id, &["datastore", name, &ns.to_string()]);
user_privs & wanted != 0
})
}
@ -136,7 +136,7 @@ impl<'a> ListAccessibleBackupGroups<'a> {
override_owner_priv: override_owner_priv.unwrap_or(0),
owner_and_priv: owner_and_priv.unwrap_or(0),
state: None,
store: store,
store,
user_info: CachedUserInfo::new()?,
})
}
@ -157,11 +157,10 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
return Some(Ok(group));
}
if let Some(auth_id) = &self.auth_id {
match self.store.owns_backup(
&group.backup_ns(),
group.group(),
&auth_id,
) {
match self
.store
.owns_backup(group.backup_ns(), group.group(), auth_id)
{
Ok(is_owner) if is_owner => return Some(Ok(group)),
Ok(_) => continue,
Err(err) => return Some(Err(err)),
@ -182,8 +181,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
if let Some(auth_id) = &self.auth_id {
let info = &self.user_info;
let privs =
info.lookup_privs(&auth_id, &ns.acl_path(self.store.name()));
let privs = info.lookup_privs(auth_id, &ns.acl_path(self.store.name()));
if privs & NS_PRIVS_OK == 0 {
continue;
@ -196,7 +194,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
continue; // no owner override and no extra privs -> nothing visible
}
}
self.state = match ListGroups::new(Arc::clone(&self.store), ns) {
self.state = match ListGroups::new(Arc::clone(self.store), ns) {
Ok(iter) => Some((iter, override_owner)),
Err(err) => return Some(Err(err)),
};

View File

@ -198,7 +198,7 @@ fn verify_index_chunks(
let chunk_list =
verify_worker
.datastore
.get_chunks_in_order(&index, skip_chunk, check_abort)?;
.get_chunks_in_order(&*index, skip_chunk, check_abort)?;
for (pos, _) in chunk_list {
verify_worker.worker.check_abort()?;

View File

@ -276,6 +276,7 @@ fn task_mgmt_cli() -> CommandLineInterface {
}
)]
/// Sync datastore from another repository
#[allow(clippy::too_many_arguments)]
async fn pull_datastore(
remote: String,
remote_store: String,

View File

@ -231,10 +231,7 @@ pub struct NodeConfig {
impl NodeConfig {
pub fn acme_config(&self) -> Option<Result<AcmeConfig, Error>> {
self.acme.as_deref().map(|config| -> Result<_, Error> {
Ok(crate::tools::config::from_property_string(
config,
&AcmeConfig::API_SCHEMA,
)?)
crate::tools::config::from_property_string(config, &AcmeConfig::API_SCHEMA)
})
}

View File

@ -538,7 +538,7 @@ pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
/// send email on certificate renewal failure.
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
let error: String = match result {
Err(e) => e.to_string().into(),
Err(e) => e.to_string(),
_ => return Ok(()),
};
@ -620,8 +620,7 @@ fn handlebars_humam_bytes_helper(
) -> HelperResult {
let param = h
.param(0)
.map(|v| v.value().as_u64())
.flatten()
.and_then(|v| v.value().as_u64())
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
out.write(&HumanByte::from(param).to_string())?;
@ -638,13 +637,11 @@ fn handlebars_relative_percentage_helper(
) -> HelperResult {
let param0 = h
.param(0)
.map(|v| v.value().as_f64())
.flatten()
.and_then(|v| v.value().as_f64())
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
let param1 = h
.param(1)
.map(|v| v.value().as_f64())
.flatten()
.and_then(|v| v.value().as_f64())
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
if param1 == 0.0 {

View File

@ -324,10 +324,12 @@ pub fn compute_schedule_status(
}
};
let mut status = JobScheduleStatus::default();
status.last_run_upid = upid.map(String::from);
status.last_run_state = state;
status.last_run_endtime = endtime;
let mut status = JobScheduleStatus {
last_run_upid: upid.map(String::from),
last_run_state: state,
last_run_endtime: endtime,
..Default::default()
};
if let Some(schedule) = schedule {
if let Ok(event) = schedule.parse::<CalendarEvent>() {

View File

@ -164,9 +164,9 @@ pub fn do_prune_job(
let worker_type = job.jobtype().to_string();
let auth_id = auth_id.clone();
let worker_id = match &prune_options.ns {
Some(ns) if ns.is_root() => format!("{store}"),
Some(ns) if ns.is_root() => store,
Some(ns) => format!("{store}:{ns}"),
None => format!("{store}"),
None => store,
};
let upid_str = WorkerTask::new_thread(

View File

@ -788,7 +788,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<
let mut created = false;
let store_ns_str = print_store_and_ns(params.store.name(), ns);
if !ns.is_root() && !params.store.namespace_path(&ns).exists() {
if !ns.is_root() && !params.store.namespace_path(ns).exists() {
check_ns_modification_privs(params.store.name(), ns, &params.owner)
.map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?;
@ -817,7 +817,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<
}
fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result<bool, Error> {
check_ns_modification_privs(&params.store.name(), local_ns, &params.owner)
check_ns_modification_privs(params.store.name(), local_ns, &params.owner)
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
params.store.remove_namespace_recursive(local_ns, true)

View File

@ -116,13 +116,11 @@ impl<'a> ChunkArchiveWriter<'a> {
} else {
self.write_all(&blob_data[start..end])?
};
if leom {
if self.close_on_leom {
let mut writer = self.writer.take().unwrap();
writer.finish(false)?;
self.bytes_written = writer.bytes_written();
return Ok(chunk_is_complete);
}
if leom && self.close_on_leom {
let mut writer = self.writer.take().unwrap();
writer.finish(false)?;
self.bytes_written = writer.bytes_written();
return Ok(chunk_is_complete);
}
start = end;
}

View File

@ -610,9 +610,7 @@ impl MediaCatalog {
}
self.pending.extend(store.as_bytes());
self.content
.entry(store.to_string())
.or_default();
self.content.entry(store.to_string()).or_default();
self.current_archive = Some((uuid, file_number, store.to_string()));
@ -726,10 +724,7 @@ impl MediaCatalog {
self.pending.push(b':');
self.pending.extend(path.as_bytes());
let content = self
.content
.entry(store.to_string())
.or_default();
let content = self.content.entry(store.to_string()).or_default();
content.snapshot_index.insert(path, file_number);
@ -857,9 +852,7 @@ impl MediaCatalog {
self.check_start_chunk_archive(file_number)?;
self.content
.entry(store.to_string())
.or_default();
self.content.entry(store.to_string()).or_default();
self.current_archive = Some((uuid, file_number, store.to_string()));
}
@ -893,10 +886,7 @@ impl MediaCatalog {
let _ = parse_ns_and_snapshot(snapshot)?;
self.check_register_snapshot(file_number)?;
let content = self
.content
.entry(store.to_string())
.or_default();
let content = self.content.entry(store.to_string()).or_default();
content
.snapshot_index
@ -1015,19 +1005,14 @@ impl MediaSetCatalog {
/// as (datastore, snapshot).
/// The snapshot contains namespaces in the format 'ns/namespace'.
pub fn list_snapshots(&self) -> impl Iterator<Item = (&str, &str)> {
self.catalog_list
.values()
.flat_map(|catalog| {
catalog
.content
.iter()
.flat_map(|(store, content)| {
content
.snapshot_index
.keys()
.map(move |key| (store.as_str(), key.as_str()))
})
self.catalog_list.values().flat_map(|catalog| {
catalog.content.iter().flat_map(|(store, content)| {
content
.snapshot_index
.keys()
.map(move |key| (store.as_str(), key.as_str()))
})
})
}
}

View File

@ -344,13 +344,11 @@ impl MediaPool {
MediaLocation::Online(name) => {
if self.force_media_availability {
true
} else if let Some(ref changer_name) = self.changer_name {
name == changer_name
} else {
if let Some(ref changer_name) = self.changer_name {
name == changer_name
} else {
// a standalone drive cannot use media currently inside a library
false
}
// a standalone drive cannot use media currently inside a library
false
}
}
MediaLocation::Offline => {
@ -686,10 +684,8 @@ impl MediaPool {
let media_location = media.location();
if self.location_is_available(media_location) {
last_is_writable = true;
} else {
if let MediaLocation::Vault(vault) = media_location {
bail!("writable media offsite in vault '{}'", vault);
}
} else if let MediaLocation::Vault(vault) = media_location {
bail!("writable media offsite in vault '{}'", vault);
}
}
_ => bail!(

View File

@ -110,13 +110,12 @@ fn get_changelog_url(
command.arg("--print-uris");
command.arg(package);
let output = proxmox_sys::command::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
let output = match output.splitn(2, ' ').next() {
Some(output) => {
if output.len() < 2 {
bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
}
output[1..output.len() - 1].to_owned()
}
let output = match output.split_once(' ') {
Some((uri, _file_name)) if uri.len() > 2 => uri[1..uri.len() - 1].to_owned(),
Some((uri, _file_name)) => bail!(
"invalid output (URI part too short) from 'apt-get changelog --print-uris': {}",
uri
),
None => bail!(
"invalid output from 'apt-get changelog --print-uris': {}",
output

View File

@ -478,7 +478,7 @@ impl Disk {
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
let stat: Vec<u64> = stat
.split_ascii_whitespace()
.map(|s| u64::from_str_radix(s, 10).unwrap_or(0))
.map(|s| s.parse().unwrap_or_default())
.collect();
if stat.len() < 15 {
@ -821,7 +821,7 @@ fn get_partitions_info(
let mut used = PartitionUsageType::Unused;
if let Some(devnum) = disk.devnum().ok() {
if let Ok(devnum) = disk.devnum() {
if lvm_devices.contains(&devnum) {
used = PartitionUsageType::LVM;
} else if zfs_devices.contains(&devnum) {

View File

@ -203,7 +203,7 @@ pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result<SmartData
})
}
static WEAROUT_FIELD_ORDER: &[&'static str] = &[
static WEAROUT_FIELD_ORDER: &[&str] = &[
"Media_Wearout_Indicator",
"SSD_Life_Left",
"Wear_Leveling_Count",

View File

@ -21,9 +21,9 @@ lazy_static! {
fn get_pool_from_dataset(dataset: &str) -> &str {
if let Some(idx) = dataset.find('/') {
&dataset[0..idx].as_ref()
dataset[0..idx].as_ref()
} else {
dataset.as_ref()
dataset
}
}
@ -53,7 +53,7 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
// All times are nanoseconds
let stat: Vec<u64> = lines[2]
.split_ascii_whitespace()
.map(|s| u64::from_str_radix(s, 10).unwrap_or(0))
.map(|s| s.parse().unwrap_or_default())
.collect();
let ticks = (stat[4] + stat[7]) / 1_000_000; // convert to milisec
@ -147,12 +147,10 @@ fn parse_objset_stat(pool: &str, objset_id: &str) -> Result<(String, BlockDevSta
let value = parts.next().ok_or_else(|| format_err!("no value found"))?;
match name {
Some("dataset_name") => dataset_name = value.to_string(),
Some("writes") => stat.write_ios = u64::from_str_radix(value, 10).unwrap_or(0),
Some("nwritten") => {
stat.write_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512
}
Some("reads") => stat.read_ios = u64::from_str_radix(value, 10).unwrap_or(0),
Some("nread") => stat.read_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512,
Some("writes") => stat.write_ios = value.parse().unwrap_or_default(),
Some("nwritten") => stat.write_sectors = value.parse::<u64>().unwrap_or_default() / 512,
Some("reads") => stat.read_ios = value.parse().unwrap_or_default(),
Some("nread") => stat.read_sectors = value.parse::<u64>().unwrap_or_default() / 512,
_ => {}
}
}

View File

@ -1,4 +1,4 @@
use std::mem::replace;
use std::mem::{replace, take};
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
@ -255,7 +255,7 @@ where
stack.push(StackItem {
node: replace(&mut cur.node, node),
level: replace(&mut cur.level, vdev_level),
children_of_parent: replace(&mut cur.children_of_parent, Vec::new()),
children_of_parent: take(&mut cur.children_of_parent),
});
} else {
// same indentation level, add to children of the previous level:

View File

@ -56,7 +56,7 @@ impl<T: Any> AsAny for T {
/// The default 2 hours are far too long for PBS
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
pub const DEFAULT_USER_AGENT_STRING: &'static str = "proxmox-backup-client/1.0";
pub const DEFAULT_USER_AGENT_STRING: &str = "proxmox-backup-client/1.0";
/// Returns a new instance of `SimpleHttp` configured for PBS usage.
pub fn pbs_simple_http(proxy_config: Option<ProxyConfig>) -> SimpleHttp {
@ -64,7 +64,6 @@ pub fn pbs_simple_http(proxy_config: Option<ProxyConfig>) -> SimpleHttp {
proxy_config,
user_agent: Some(DEFAULT_USER_AGENT_STRING.to_string()),
tcp_keepalive: Some(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME),
..Default::default()
};
SimpleHttp::with_options(options)