diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 170d248fc..fbd01fc42 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1190,7 +1190,7 @@ pub struct TypeCounts { }, }, )] -#[derive(Clone, Serialize, Deserialize)] +#[derive(Clone, Default, Serialize, Deserialize)] #[serde(rename_all = "kebab-case")] /// Garbage collection status. pub struct GarbageCollectionStatus { @@ -1217,24 +1217,6 @@ pub struct GarbageCollectionStatus { pub still_bad: usize, } -impl Default for GarbageCollectionStatus { - fn default() -> Self { - GarbageCollectionStatus { - upid: None, - index_file_count: 0, - index_data_bytes: 0, - disk_bytes: 0, - disk_chunks: 0, - removed_bytes: 0, - removed_chunks: 0, - pending_bytes: 0, - pending_chunks: 0, - removed_bad: 0, - still_bad: 0, - } - } -} - #[api( properties: { "gc-status": { diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs index 95530e48a..161d2cef1 100644 --- a/pbs-client/src/pxar/extract.rs +++ b/pbs-client/src/pxar/extract.rs @@ -542,7 +542,7 @@ where let file = root .lookup(&path) .await? - .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?; + .ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?; let mut components = file.entry().path().components(); components.next_back(); // discard last @@ -586,7 +586,7 @@ where let entry = root .lookup(&path) .await? - .ok_or(format_err!("error looking up '{:?}'", path))?; + .ok_or_else(|| format_err!("error looking up '{:?}'", path))?; let realfile = accessor.follow_hardlink(&entry).await?; let metadata = realfile.entry().metadata(); let realpath = Path::new(link); @@ -705,7 +705,7 @@ where let file = root .lookup(&path) .await? - .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?; + .ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?; let prefix = { let mut components = file.entry().path().components(); @@ -753,7 +753,7 @@ where let entry = root .lookup(&path) .await? - .ok_or(format_err!("error looking up '{:?}'", path))?; + .ok_or_else(|| format_err!("error looking up '{:?}'", path))?; let realfile = accessor.follow_hardlink(&entry).await?; let metadata = realfile.entry().metadata(); log::debug!("adding '{}' to zip", path.display()); @@ -841,7 +841,7 @@ where let file = root .lookup(&path) .await? - .ok_or(format_err!("error opening '{:?}'", path.as_ref()))?; + .ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?; recurse_files_extractor(&mut extractor, file).await } @@ -917,8 +917,8 @@ fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> { Ok((file_name_os, file_name)) } -async fn recurse_files_extractor<'a, T>( - extractor: &'a mut Extractor, +async fn recurse_files_extractor( + extractor: &mut Extractor, file: FileEntry, ) -> Result<(), Error> where @@ -960,8 +960,8 @@ where Ok(()) } -async fn seq_files_extractor<'a, T>( - extractor: &'a mut Extractor, +async fn seq_files_extractor( + extractor: &mut Extractor, mut decoder: pxar::decoder::aio::Decoder, ) -> Result<(), Error> where diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs index 0a7da6ed3..ddb14edaf 100644 --- a/pbs-client/src/pxar/tools.rs +++ b/pbs-client/src/pxar/tools.rs @@ -134,10 +134,12 @@ pub fn format_single_line_entry(entry: &Entry) -> String { _ => ("0".to_string(), String::new()), }; + let owner_string = format!("{}/{}", meta.stat.uid, meta.stat.gid); + format!( "{} {:<13} {} {:>8} {:?}{}", mode_string, - format!("{}/{}", meta.stat.uid, meta.stat.gid), + owner_string, format_mtime(&meta.stat.mtime), size, entry.path(), diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 1e5390926..7dce606f6 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -619,7 +619,7 @@ impl BackupInfo { }) } - pub fn sort_list(list: &mut Vec, ascendending: bool) { + pub fn sort_list(list: &mut [BackupInfo], ascendending: bool) { if ascendending { // oldest first list.sort_unstable_by(|a, b| a.backup_dir.dir.time.cmp(&b.backup_dir.dir.time)); diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index c871eae51..a539ddc5d 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1181,7 +1181,7 @@ impl DataStore { /// stat'ed are placed at the end of the list pub fn get_chunks_in_order( &self, - index: &Box, + index: &(dyn IndexFile + Send), skip_chunk: F, check_abort: A, ) -> Result, Error> diff --git a/pbs-datastore/src/hierarchy.rs b/pbs-datastore/src/hierarchy.rs index 260d54069..8b7af038b 100644 --- a/pbs-datastore/src/hierarchy.rs +++ b/pbs-datastore/src/hierarchy.rs @@ -209,7 +209,7 @@ impl Iterator for ListGroups { group_type, ) { Ok(ty) => self.id_state = Some(ty), - Err(err) => return Some(Err(err.into())), + Err(err) => return Some(Err(err)), } } } diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index a2bb6aa1d..5cc508815 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -146,7 +146,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> { Some(Operation::Read), )?; let order = - datastore.get_chunks_in_order(&index, &self.skip_fn, |_| Ok(()))?; + datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?; self.current_index = Some((Arc::new(index), 0, order)); } else { diff --git a/pbs-tape/src/linux_list_drives.rs b/pbs-tape/src/linux_list_drives.rs index cc7765d34..39d2aac79 100644 --- a/pbs-tape/src/linux_list_drives.rs +++ b/pbs-tape/src/linux_list_drives.rs @@ -262,8 +262,8 @@ pub fn lookup_device<'a>(devices: &'a [TapeDeviceInfo], path: &str) -> Option<&' } /// Lookup optional drive identification attributes -pub fn lookup_device_identification<'a>( - devices: &'a [TapeDeviceInfo], +pub fn lookup_device_identification( + devices: &[TapeDeviceInfo], path: &str, ) -> OptionalDeviceIdentification { if let Some(info) = lookup_device(devices, path) { diff --git a/pbs-tape/src/sg_pt_changer.rs b/pbs-tape/src/sg_pt_changer.rs index d413c1e02..d5c1c5641 100644 --- a/pbs-tape/src/sg_pt_changer.rs +++ b/pbs-tape/src/sg_pt_changer.rs @@ -27,11 +27,10 @@ pub fn initialize_element_status(file: &mut F) -> Result<(), Error> // like mtx(1), set a very long timeout (30 minutes) sg_raw.set_timeout(30 * 60); - let mut cmd = Vec::new(); - cmd.extend(&[0x07, 0, 0, 0, 0, 0]); // INITIALIZE ELEMENT STATUS (07h) + let cmd = &[0x07, 0, 0, 0, 0, 0]; // INITIALIZE ELEMENT STATUS (07h) sg_raw - .do_command(&cmd) + .do_command(cmd) .map_err(|err| format_err!("initializte element status (07h) failed - {}", err))?; Ok(()) @@ -123,15 +122,16 @@ fn read_element_address_assignment( let mut sg_raw = SgRaw::new(file, allocation_len as usize)?; sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT); - let mut cmd = Vec::new(); - cmd.push(0x1A); // MODE SENSE6 (1Ah) - cmd.push(0x08); // DBD=1 (The Disable Block Descriptors) - cmd.push(0x1D); // Element Address Assignment Page - cmd.push(0); - cmd.push(allocation_len); // allocation len - cmd.push(0); //control + let cmd = &[ + 0x1A, // MODE SENSE6 (1Ah) + 0x08, // DBD=1 (The Disable Block Descriptors) + 0x1D, // Element Address Assignment Page + 0, + allocation_len, // allocation len + 0, //control + ]; - let data = execute_scsi_command(&mut sg_raw, &cmd, "read element address assignment", true)?; + let data = execute_scsi_command(&mut sg_raw, cmd, "read element address assignment", true)?; proxmox_lang::try_block!({ let mut reader = &data[..]; @@ -146,6 +146,7 @@ fn read_element_address_assignment( .map_err(|err: Error| format_err!("decode element address assignment page failed - {}", err)) } +#[allow(clippy::vec_init_then_push)] fn scsi_move_medium_cdb( medium_transport_address: u16, source_element_address: u16, @@ -276,6 +277,7 @@ impl ElementType { } } +#[allow(clippy::vec_init_then_push)] fn scsi_read_element_status_cdb( start_element_address: u16, number_of_elements: u16, diff --git a/pbs-tape/src/sg_tape.rs b/pbs-tape/src/sg_tape.rs index 8ffd02dd8..784eb40ff 100644 --- a/pbs-tape/src/sg_tape.rs +++ b/pbs-tape/src/sg_tape.rs @@ -175,6 +175,7 @@ impl SgTape { /// of data. After the command is successfully completed, the /// drive is positioned immediately before End Of Data (not End Of /// Tape). + #[allow(clippy::vec_init_then_push)] pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> { let mut sg_raw = SgRaw::new(&mut self.file, 16)?; sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT); @@ -273,6 +274,7 @@ impl SgTape { Ok(()) } + #[allow(clippy::unusual_byte_groupings)] pub fn locate_file(&mut self, position: u64) -> Result<(), Error> { if position == 0 { return self.rewind(); @@ -534,8 +536,7 @@ impl SgTape { })?; sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT); - let mut cmd = Vec::new(); - cmd.push(0x10); + let mut cmd = vec![0x10]; if immediate { cmd.push(1); // IMMED=1 } else { @@ -668,16 +669,17 @@ impl SgTape { let mut sg_raw = SgRaw::new(&mut self.file, 0).unwrap(); // cannot fail with size 0 sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT); - let mut cmd = Vec::new(); - cmd.push(0x08); // READ - cmd.push(0x02); // VARIABLE SIZED BLOCKS, SILI=1 - //cmd.push(0x00); // VARIABLE SIZED BLOCKS, SILI=0 - cmd.push(((transfer_len >> 16) & 0xff) as u8); - cmd.push(((transfer_len >> 8) & 0xff) as u8); - cmd.push((transfer_len & 0xff) as u8); - cmd.push(0); // control byte + let cmd = &[ + 0x08, // READ + 0x02, // VARIABLE SIZED BLOCKS, SILI=1 + //0x00, // VARIABLE SIZED BLOCKS, SILI=0 + ((transfer_len >> 16) & 0xff) as u8, + ((transfer_len >> 8) & 0xff) as u8, + (transfer_len & 0xff) as u8, + 0, // control byte + ]; - let data = match sg_raw.do_in_command(&cmd, buffer) { + let data = match sg_raw.do_in_command(cmd, buffer) { Ok(data) => data, Err(ScsiError::Sense(SenseInfo { sense_key: 0, @@ -734,6 +736,7 @@ impl SgTape { } /// Set important drive options + #[allow(clippy::vec_init_then_push)] pub fn set_drive_options( &mut self, compression: Option, diff --git a/pbs-tape/src/sg_tape/encryption.rs b/pbs-tape/src/sg_tape/encryption.rs index bd841ba0a..20177d561 100644 --- a/pbs-tape/src/sg_tape/encryption.rs +++ b/pbs-tape/src/sg_tape/encryption.rs @@ -73,6 +73,7 @@ struct SspSetDataEncryptionPage { /* key follows */ } +#[allow(clippy::vec_init_then_push)] fn sg_spout_set_encryption( file: &mut F, algorythm_index: u8, @@ -129,6 +130,7 @@ fn sg_spout_set_encryption( } // Warning: this blocks and fails if there is no media loaded +#[allow(clippy::vec_init_then_push)] fn sg_spin_data_encryption_status(file: &mut F) -> Result, Error> { let allocation_len: u32 = 8192 + 4; @@ -157,6 +159,7 @@ fn sg_spin_data_encryption_status(file: &mut F) -> Result, E } // Warning: this blocks and fails if there is no media loaded +#[allow(clippy::vec_init_then_push)] fn sg_spin_data_encryption_caps(file: &mut F) -> Result, Error> { let allocation_len: u32 = 8192 + 4; diff --git a/pbs-tape/src/sg_tape/mam.rs b/pbs-tape/src/sg_tape/mam.rs index 4c9c5956a..01f62eb04 100644 --- a/pbs-tape/src/sg_tape/mam.rs +++ b/pbs-tape/src/sg_tape/mam.rs @@ -24,6 +24,7 @@ struct MamAttributeHeader { len: u16, } +#[allow(clippy::upper_case_acronyms)] enum MamFormat { BINARY, ASCII, diff --git a/pbs-tape/src/sg_tape/tape_alert_flags.rs b/pbs-tape/src/sg_tape/tape_alert_flags.rs index 8b25d4bdb..1483d6255 100644 --- a/pbs-tape/src/sg_tape/tape_alert_flags.rs +++ b/pbs-tape/src/sg_tape/tape_alert_flags.rs @@ -79,6 +79,7 @@ pub fn read_tape_alert_flags(file: &mut F) -> Result(file: &mut F) -> Result, Error> { let mut sg_raw = SgRaw::new(file, 512)?; diff --git a/pbs-tape/src/sg_tape/volume_statistics.rs b/pbs-tape/src/sg_tape/volume_statistics.rs index 3bacfdc4e..002c011ea 100644 --- a/pbs-tape/src/sg_tape/volume_statistics.rs +++ b/pbs-tape/src/sg_tape/volume_statistics.rs @@ -22,6 +22,7 @@ pub fn read_volume_statistics(file: &mut F) -> Result(file: &mut F) -> Result, Error> { let alloc_len: u16 = 8192; let mut sg_raw = SgRaw::new(file, alloc_len as usize)?; diff --git a/pbs-tape/src/sgutils2.rs b/pbs-tape/src/sgutils2.rs index db92277b8..f74fb0e68 100644 --- a/pbs-tape/src/sgutils2.rs +++ b/pbs-tape/src/sgutils2.rs @@ -236,10 +236,12 @@ pub struct ModeParameterHeader { } impl ModeParameterHeader { + #[allow(clippy::unusual_byte_groupings)] pub fn buffer_mode(&self) -> u8 { - (self.flags3 & 0b0111_0000) >> 4 + (self.flags3 & 0b0_111_0000) >> 4 } + #[allow(clippy::unusual_byte_groupings)] pub fn set_buffer_mode(&mut self, buffer_mode: bool) { let mut mode = self.flags3 & 0b1_000_1111; if buffer_mode { @@ -248,8 +250,9 @@ impl ModeParameterHeader { self.flags3 = mode; } + #[allow(clippy::unusual_byte_groupings)] pub fn write_protect(&self) -> bool { - (self.flags3 & 0b1000_0000) != 0 + (self.flags3 & 0b1_000_0000) != 0 } } @@ -380,13 +383,11 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> { /// /// The file must be a handle to a SCSI device. pub fn new(file: &'a mut F, buffer_size: usize) -> Result { - let buffer; - - if buffer_size > 0 { - buffer = alloc_page_aligned_buffer(buffer_size)?; + let buffer = if buffer_size > 0 { + alloc_page_aligned_buffer(buffer_size)? } else { - buffer = Box::new([]); - } + Box::new([]) + }; let sense_buffer = [0u8; 32]; @@ -683,8 +684,7 @@ pub fn scsi_mode_sense( let allocation_len: u16 = 4096; let mut sg_raw = SgRaw::new(file, allocation_len as usize)?; - let mut cmd = Vec::new(); - cmd.push(0x5A); // MODE SENSE(10) + let mut cmd = vec![0x5A]; // MODE SENSE(10) if disable_block_descriptor { cmd.push(8); // DBD=1 (Disable Block Descriptors) } else { diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index fc7dc1faa..4bb9aa5e3 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -702,7 +702,7 @@ async fn create_backup( let backup_id = param["backup-id"] .as_str() - .unwrap_or(proxmox_sys::nodename()); + .unwrap_or_else(|| proxmox_sys::nodename()); let backup_ns = optional_ns_param(¶m)?; diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/auth.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/auth.rs index 1d6e2a8f1..6a3614d3d 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/auth.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/auth.rs @@ -59,7 +59,7 @@ impl ServerAdapter for StaticAuthAdapter { > { Box::pin(async move { match headers.get(hyper::header::AUTHORIZATION) { - Some(header) if header.to_str().unwrap_or("") == &self.ticket => { + Some(header) if header.to_str().unwrap_or("") == self.ticket => { let user_info: Box = Box::new(SimpleUserInformation {}); Ok((String::from("root@pam"), user_info)) diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs index a3b7cfa4c..0aef30ea7 100644 --- a/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs +++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/disk.rs @@ -82,6 +82,7 @@ struct LVMBucketData { /// more subdirectories /// e.g.: "/drive-scsi0/part/0/etc/passwd" #[derive(Clone)] +#[allow(clippy::upper_case_acronyms)] enum Bucket { Partition(PartitionBucketData), RawFs(PartitionBucketData), @@ -91,7 +92,7 @@ enum Bucket { impl Bucket { fn filter_mut<'a, A: AsRef, B: AsRef>( - haystack: &'a mut Vec, + haystack: &'a mut [Bucket], ty: A, comp: &[B], ) -> Option<&'a mut Bucket> { diff --git a/src/acme/client.rs b/src/acme/client.rs index 743712073..886be3e35 100644 --- a/src/acme/client.rs +++ b/src/acme/client.rs @@ -676,7 +676,7 @@ const fn retry() -> Retry { impl Retry { fn tick(&mut self) -> Result<(), Error> { if self.0 >= 3 { - Err(Error::Client(format!("kept getting a badNonce error!"))) + Err(Error::Client("kept getting a badNonce error!".to_string())) } else { self.0 += 1; Ok(()) diff --git a/src/api2/access/openid.rs b/src/api2/access/openid.rs index e75047f76..4a12e5710 100644 --- a/src/api2/access/openid.rs +++ b/src/api2/access/openid.rs @@ -257,7 +257,7 @@ fn openid_auth_url( let url = open_id.authorize_url(PROXMOX_BACKUP_RUN_DIR_M!(), &realm)?; - Ok(url.into()) + Ok(url) } #[sortable] diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 11a22e2a1..ff2f1f871 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -100,7 +100,7 @@ fn check_privs_and_load_store( if limited { let owner = datastore.get_owner(ns, backup_group)?; - check_backup_owner(&owner, &auth_id)?; + check_backup_owner(&owner, auth_id)?; } Ok(datastore) @@ -778,6 +778,7 @@ pub async fn status( /// /// This function can verify a single backup snapshot, all backup from a backup group, /// or all backups in the datastore. +#[allow(clippy::too_many_arguments)] pub fn verify( store: String, ns: Option, @@ -1287,7 +1288,7 @@ pub fn download_file( let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?; let datastore = check_privs_and_load_store( - &store, + store, &backup_ns, &auth_id, PRIV_DATASTORE_READ, @@ -1301,7 +1302,7 @@ pub fn download_file( println!( "Download {} from {} ({}/{})", file_name, - print_store_and_ns(&store, &backup_ns), + print_store_and_ns(store, &backup_ns), backup_dir, file_name ); @@ -1372,7 +1373,7 @@ pub fn download_file_decoded( let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?; let datastore = check_privs_and_load_store( - &store, + store, &backup_ns, &auth_id, PRIV_DATASTORE_READ, @@ -1394,7 +1395,7 @@ pub fn download_file_decoded( println!( "Download {} from {} ({}/{})", file_name, - print_store_and_ns(&store, &backup_ns), + print_store_and_ns(store, &backup_ns), backup_dir_api, file_name ); @@ -1403,7 +1404,7 @@ pub fn download_file_decoded( path.push(backup_dir.relative_path()); path.push(&file_name); - let extension = file_name.rsplitn(2, '.').next().unwrap(); + let (_, extension) = file_name.rsplit_once('.').unwrap(); let body = match extension { "didx" => { @@ -1503,7 +1504,7 @@ pub fn upload_backup_log( let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?; let datastore = check_privs_and_load_store( - &store, + store, &backup_ns, &auth_id, 0, @@ -1524,7 +1525,7 @@ pub fn upload_backup_log( println!( "Upload backup log to {} {backup_dir_api}/{file_name}", - print_store_and_ns(&store, &backup_ns), + print_store_and_ns(store, &backup_ns), ); let data = req_body @@ -1667,7 +1668,7 @@ pub fn pxar_file_download( let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?; let datastore = check_privs_and_load_store( - &store, + store, &ns, &auth_id, PRIV_DATASTORE_READ, diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs index 8c1c42dbb..e9a5cbc87 100644 --- a/src/api2/backup/environment.rs +++ b/src/api2/backup/environment.rs @@ -255,7 +255,7 @@ impl BackupEnvironment { pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option { let state = self.state.lock().unwrap(); - state.known_chunks.get(digest).map(|len| *len) + state.known_chunks.get(digest).copied() } /// Store the writer with an unique ID diff --git a/src/api2/config/acme.rs b/src/api2/config/acme.rs index 81455b47f..0f96986c1 100644 --- a/src/api2/config/acme.rs +++ b/src/api2/config/acme.rs @@ -246,9 +246,9 @@ pub async fn do_register_account<'a>( rsa_bits: Option, ) -> Result<&'a Account, Error> { let contact = account_contact_from_string(&contact); - Ok(client + client .new_account(name, agree_to_tos, contact, rsa_bits) - .await?) + .await } #[api( diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs index a86a74a76..8c51f3d93 100644 --- a/src/api2/config/remote.rs +++ b/src/api2/config/remote.rs @@ -481,11 +481,7 @@ pub async fn scan_remote_groups( let client = remote_client(&remote, None).await.map_err(map_remote_err)?; - let args = if let Some(ns) = namespace { - Some(json!({ "ns": ns })) - } else { - None - }; + let args = namespace.map(|ns| json!({ "ns": ns })); let api_res = client .get(&format!("api2/json/admin/datastore/{}/groups", store), args) diff --git a/src/api2/config/tape_encryption_keys.rs b/src/api2/config/tape_encryption_keys.rs index 9dc4d26ba..9a9f1b8dd 100644 --- a/src/api2/config/tape_encryption_keys.rs +++ b/src/api2/config/tape_encryption_keys.rs @@ -90,6 +90,7 @@ pub fn list_keys( }, )] /// Change the encryption key's password (and password hint). +#[allow(clippy::too_many_arguments)] pub fn change_passphrase( kdf: Option, password: Option, diff --git a/src/api2/node/certificates.rs b/src/api2/node/certificates.rs index 1b9a88504..b3b468a01 100644 --- a/src/api2/node/certificates.rs +++ b/src/api2/node/certificates.rs @@ -397,7 +397,7 @@ async fn order_certificate( .ok_or_else(|| format_err!("missing 'finalize' URL in order"))?; if let Err(err) = acme.finalize(finalize, &csr.data).await { if finalize_error_cnt >= 5 { - return Err(err.into()); + return Err(err); } finalize_error_cnt += 1; diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index cada95cdb..1d3114ef3 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -88,7 +88,7 @@ pub fn list_datastore_mounts() -> Result, Error> { let name = data .Where .strip_prefix(BASE_MOUNT_DIR) - .unwrap_or_else(|| &data.Where) + .unwrap_or(&data.Where) .to_string(); list.push(DatastoreMountInfo { diff --git a/src/api2/node/journal.rs b/src/api2/node/journal.rs index 1bedc8dc9..1ef302c7e 100644 --- a/src/api2/node/journal.rs +++ b/src/api2/node/journal.rs @@ -59,6 +59,7 @@ use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT}; }, )] /// Read syslog entries. +#[allow(clippy::too_many_arguments)] fn get_journal( since: Option, until: Option, diff --git a/src/api2/node/rrd.rs b/src/api2/node/rrd.rs index 55e6099de..c43754b9a 100644 --- a/src/api2/node/rrd.rs +++ b/src/api2/node/rrd.rs @@ -41,9 +41,9 @@ pub fn create_value_from_rrd( let mut t = start; - for index in 0..data.len() { - let entry = timemap.entry(t).or_insert(json!({ "time": t })); - if let Some(value) = data[index] { + for value in data { + let entry = timemap.entry(t).or_insert_with(|| json!({ "time": t })); + if let Some(value) = value { entry[*name] = value.into(); } t += reso; diff --git a/src/api2/node/syslog.rs b/src/api2/node/syslog.rs index 15d3c2d1d..54b34334a 100644 --- a/src/api2/node/syslog.rs +++ b/src/api2/node/syslog.rs @@ -144,7 +144,7 @@ fn get_syslog( ) -> Result { let service = param["service"] .as_str() - .map(|service| crate::api2::node::services::real_service_name(service)); + .map(crate::api2::node::services::real_service_name); let (count, lines) = dump_journal( param["start"].as_u64(), diff --git a/src/api2/pull.rs b/src/api2/pull.rs index e05e946ec..8876aa5d9 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -216,6 +216,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto }, )] /// Sync store from other repository +#[allow(clippy::too_many_arguments)] async fn pull( store: String, ns: Option, diff --git a/src/api2/status.rs b/src/api2/status.rs index 0f2e42d63..d6dec9d22 100644 --- a/src/api2/status.rs +++ b/src/api2/status.rs @@ -49,7 +49,7 @@ pub async fn datastore_status( let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; if !allowed { - if let Ok(datastore) = DataStore::lookup_datastore(&store, Some(Operation::Lookup)) { + if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { if can_access_any_namespace(datastore, &auth_id, &user_info) { list.push(DataStoreStatusListItem::empty(store, None)); } @@ -57,7 +57,7 @@ pub async fn datastore_status( continue; } - let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Read)) { + let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { Ok(datastore) => datastore, Err(err) => { list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); @@ -127,7 +127,7 @@ pub async fn datastore_status( list.push(entry); } - Ok(list.into()) + Ok(list) } const SUBDIRS: SubdirMap = &[( diff --git a/src/api2/tape/media.rs b/src/api2/tape/media.rs index c6f6b52e7..ad7c43544 100644 --- a/src/api2/tape/media.rs +++ b/src/api2/tape/media.rs @@ -214,30 +214,28 @@ pub async fn list_media( let inventory = Inventory::load(status_path)?; let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]); - if (privs & PRIV_TAPE_AUDIT) != 0 { - if pool.is_none() { - for media_id in inventory.list_unassigned_media() { - let (mut status, location) = inventory.status_and_location(&media_id.label.uuid); + if (privs & PRIV_TAPE_AUDIT) != 0 && pool.is_none() { + for media_id in inventory.list_unassigned_media() { + let (mut status, location) = inventory.status_and_location(&media_id.label.uuid); - if status == MediaStatus::Unknown { - status = MediaStatus::Writable; - } - - list.push(MediaListEntry { - uuid: media_id.label.uuid.clone(), - ctime: media_id.label.ctime, - label_text: media_id.label.label_text.to_string(), - location, - status, - catalog: true, // empty, so we do not need a catalog - expired: false, - media_set_uuid: None, - media_set_name: None, - media_set_ctime: None, - seq_nr: None, - pool: None, - }); + if status == MediaStatus::Unknown { + status = MediaStatus::Writable; } + + list.push(MediaListEntry { + uuid: media_id.label.uuid.clone(), + ctime: media_id.label.ctime, + label_text: media_id.label.label_text.to_string(), + location, + status, + catalog: true, // empty, so we do not need a catalog + expired: false, + media_set_uuid: None, + media_set_name: None, + media_set_ctime: None, + seq_nr: None, + pool: None, + }); } } diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index ba17bb6fd..2c8b08fc8 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -316,6 +316,7 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE); }, )] /// Restore data from media-set. Namespaces will be automatically created if necessary. +#[allow(clippy::too_many_arguments)] pub fn restore( store: String, drive: String, @@ -631,7 +632,7 @@ fn restore_list_worker( let mut restorable = Vec::new(); // restore source namespaces for (store, snapshot) in catalog.list_snapshots() { - let (ns, dir) = match parse_ns_and_snapshot(&snapshot) { + let (ns, dir) = match parse_ns_and_snapshot(snapshot) { Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir), Err(err) => { task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}"); @@ -1194,7 +1195,6 @@ fn restore_partial_chunk_archive<'a>( let verify_and_write_channel = writer_pool.channel(); while let Some((digest, blob)) = decoder.next_chunk()? { - worker.check_abort()?; if chunk_list.remove(&digest) { @@ -1878,13 +1878,10 @@ pub fn fast_catalog_restore( let catalog_uuid = &archive_header.uuid; - let wanted = media_set - .media_list() - .iter() - .any(|e| match e { - None => false, - Some(uuid) => uuid == catalog_uuid, - }); + let wanted = media_set.media_list().iter().any(|e| match e { + None => false, + Some(uuid) => uuid == catalog_uuid, + }); if !wanted { task_log!( diff --git a/src/auth.rs b/src/auth.rs index bd57fe87b..f1d5c0a19 100644 --- a/src/auth.rs +++ b/src/auth.rs @@ -17,6 +17,7 @@ pub trait ProxmoxAuthenticator { fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>; } +#[allow(clippy::upper_case_acronyms)] struct PAM(); impl ProxmoxAuthenticator for PAM { @@ -70,6 +71,7 @@ impl ProxmoxAuthenticator for PAM { } } +#[allow(clippy::upper_case_acronyms)] struct PBS(); const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json"); diff --git a/src/backup/hierarchy.rs b/src/backup/hierarchy.rs index 0f05505af..640a7762d 100644 --- a/src/backup/hierarchy.rs +++ b/src/backup/hierarchy.rs @@ -85,7 +85,7 @@ pub fn can_access_any_namespace( PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP; let name = store.name(); iter.any(|ns| -> bool { - let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]); + let user_privs = user_info.lookup_privs(auth_id, &["datastore", name, &ns.to_string()]); user_privs & wanted != 0 }) } @@ -136,7 +136,7 @@ impl<'a> ListAccessibleBackupGroups<'a> { override_owner_priv: override_owner_priv.unwrap_or(0), owner_and_priv: owner_and_priv.unwrap_or(0), state: None, - store: store, + store, user_info: CachedUserInfo::new()?, }) } @@ -157,11 +157,10 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> { return Some(Ok(group)); } if let Some(auth_id) = &self.auth_id { - match self.store.owns_backup( - &group.backup_ns(), - group.group(), - &auth_id, - ) { + match self + .store + .owns_backup(group.backup_ns(), group.group(), auth_id) + { Ok(is_owner) if is_owner => return Some(Ok(group)), Ok(_) => continue, Err(err) => return Some(Err(err)), @@ -182,8 +181,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> { if let Some(auth_id) = &self.auth_id { let info = &self.user_info; - let privs = - info.lookup_privs(&auth_id, &ns.acl_path(self.store.name())); + let privs = info.lookup_privs(auth_id, &ns.acl_path(self.store.name())); if privs & NS_PRIVS_OK == 0 { continue; @@ -196,7 +194,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> { continue; // no owner override and no extra privs -> nothing visible } } - self.state = match ListGroups::new(Arc::clone(&self.store), ns) { + self.state = match ListGroups::new(Arc::clone(self.store), ns) { Ok(iter) => Some((iter, override_owner)), Err(err) => return Some(Err(err)), }; diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 9ad45e5e3..3984e28dc 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -198,7 +198,7 @@ fn verify_index_chunks( let chunk_list = verify_worker .datastore - .get_chunks_in_order(&index, skip_chunk, check_abort)?; + .get_chunks_in_order(&*index, skip_chunk, check_abort)?; for (pos, _) in chunk_list { verify_worker.worker.check_abort()?; diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 0ca3e990d..9857d7017 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -276,6 +276,7 @@ fn task_mgmt_cli() -> CommandLineInterface { } )] /// Sync datastore from another repository +#[allow(clippy::too_many_arguments)] async fn pull_datastore( remote: String, remote_store: String, diff --git a/src/config/node.rs b/src/config/node.rs index 5a6a48d4e..881800a7c 100644 --- a/src/config/node.rs +++ b/src/config/node.rs @@ -231,10 +231,7 @@ pub struct NodeConfig { impl NodeConfig { pub fn acme_config(&self) -> Option> { self.acme.as_deref().map(|config| -> Result<_, Error> { - Ok(crate::tools::config::from_property_string( - config, - &AcmeConfig::API_SCHEMA, - )?) + crate::tools::config::from_property_string(config, &AcmeConfig::API_SCHEMA) }) } diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs index 6fca41333..898bc8f31 100644 --- a/src/server/email_notifications.rs +++ b/src/server/email_notifications.rs @@ -538,7 +538,7 @@ pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> { /// send email on certificate renewal failure. pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> { let error: String = match result { - Err(e) => e.to_string().into(), + Err(e) => e.to_string(), _ => return Ok(()), }; @@ -620,8 +620,7 @@ fn handlebars_humam_bytes_helper( ) -> HelperResult { let param = h .param(0) - .map(|v| v.value().as_u64()) - .flatten() + .and_then(|v| v.value().as_u64()) .ok_or_else(|| RenderError::new("human-bytes: param not found"))?; out.write(&HumanByte::from(param).to_string())?; @@ -638,13 +637,11 @@ fn handlebars_relative_percentage_helper( ) -> HelperResult { let param0 = h .param(0) - .map(|v| v.value().as_f64()) - .flatten() + .and_then(|v| v.value().as_f64()) .ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?; let param1 = h .param(1) - .map(|v| v.value().as_f64()) - .flatten() + .and_then(|v| v.value().as_f64()) .ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?; if param1 == 0.0 { diff --git a/src/server/jobstate.rs b/src/server/jobstate.rs index 5cd43c848..4f347ad1a 100644 --- a/src/server/jobstate.rs +++ b/src/server/jobstate.rs @@ -324,10 +324,12 @@ pub fn compute_schedule_status( } }; - let mut status = JobScheduleStatus::default(); - status.last_run_upid = upid.map(String::from); - status.last_run_state = state; - status.last_run_endtime = endtime; + let mut status = JobScheduleStatus { + last_run_upid: upid.map(String::from), + last_run_state: state, + last_run_endtime: endtime, + ..Default::default() + }; if let Some(schedule) = schedule { if let Ok(event) = schedule.parse::() { diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index a62177e4f..4e261b489 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -164,9 +164,9 @@ pub fn do_prune_job( let worker_type = job.jobtype().to_string(); let auth_id = auth_id.clone(); let worker_id = match &prune_options.ns { - Some(ns) if ns.is_root() => format!("{store}"), + Some(ns) if ns.is_root() => store, Some(ns) => format!("{store}:{ns}"), - None => format!("{store}"), + None => store, }; let upid_str = WorkerTask::new_thread( diff --git a/src/server/pull.rs b/src/server/pull.rs index b159c75d3..95d3b981a 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -788,7 +788,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result< let mut created = false; let store_ns_str = print_store_and_ns(params.store.name(), ns); - if !ns.is_root() && !params.store.namespace_path(&ns).exists() { + if !ns.is_root() && !params.store.namespace_path(ns).exists() { check_ns_modification_privs(params.store.name(), ns, ¶ms.owner) .map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?; @@ -817,7 +817,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result< } fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result { - check_ns_modification_privs(¶ms.store.name(), local_ns, ¶ms.owner) + check_ns_modification_privs(params.store.name(), local_ns, ¶ms.owner) .map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?; params.store.remove_namespace_recursive(local_ns, true) diff --git a/src/tape/file_formats/chunk_archive.rs b/src/tape/file_formats/chunk_archive.rs index 8ed2f61d8..708470125 100644 --- a/src/tape/file_formats/chunk_archive.rs +++ b/src/tape/file_formats/chunk_archive.rs @@ -116,13 +116,11 @@ impl<'a> ChunkArchiveWriter<'a> { } else { self.write_all(&blob_data[start..end])? }; - if leom { - if self.close_on_leom { - let mut writer = self.writer.take().unwrap(); - writer.finish(false)?; - self.bytes_written = writer.bytes_written(); - return Ok(chunk_is_complete); - } + if leom && self.close_on_leom { + let mut writer = self.writer.take().unwrap(); + writer.finish(false)?; + self.bytes_written = writer.bytes_written(); + return Ok(chunk_is_complete); } start = end; } diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index 7fd007be9..25c752526 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -610,9 +610,7 @@ impl MediaCatalog { } self.pending.extend(store.as_bytes()); - self.content - .entry(store.to_string()) - .or_default(); + self.content.entry(store.to_string()).or_default(); self.current_archive = Some((uuid, file_number, store.to_string())); @@ -726,10 +724,7 @@ impl MediaCatalog { self.pending.push(b':'); self.pending.extend(path.as_bytes()); - let content = self - .content - .entry(store.to_string()) - .or_default(); + let content = self.content.entry(store.to_string()).or_default(); content.snapshot_index.insert(path, file_number); @@ -857,9 +852,7 @@ impl MediaCatalog { self.check_start_chunk_archive(file_number)?; - self.content - .entry(store.to_string()) - .or_default(); + self.content.entry(store.to_string()).or_default(); self.current_archive = Some((uuid, file_number, store.to_string())); } @@ -893,10 +886,7 @@ impl MediaCatalog { let _ = parse_ns_and_snapshot(snapshot)?; self.check_register_snapshot(file_number)?; - let content = self - .content - .entry(store.to_string()) - .or_default(); + let content = self.content.entry(store.to_string()).or_default(); content .snapshot_index @@ -1015,19 +1005,14 @@ impl MediaSetCatalog { /// as (datastore, snapshot). /// The snapshot contains namespaces in the format 'ns/namespace'. pub fn list_snapshots(&self) -> impl Iterator { - self.catalog_list - .values() - .flat_map(|catalog| { - catalog - .content - .iter() - .flat_map(|(store, content)| { - content - .snapshot_index - .keys() - .map(move |key| (store.as_str(), key.as_str())) - }) + self.catalog_list.values().flat_map(|catalog| { + catalog.content.iter().flat_map(|(store, content)| { + content + .snapshot_index + .keys() + .map(move |key| (store.as_str(), key.as_str())) }) + }) } } diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index 0593feb4d..07413f1ff 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -344,13 +344,11 @@ impl MediaPool { MediaLocation::Online(name) => { if self.force_media_availability { true + } else if let Some(ref changer_name) = self.changer_name { + name == changer_name } else { - if let Some(ref changer_name) = self.changer_name { - name == changer_name - } else { - // a standalone drive cannot use media currently inside a library - false - } + // a standalone drive cannot use media currently inside a library + false } } MediaLocation::Offline => { @@ -686,10 +684,8 @@ impl MediaPool { let media_location = media.location(); if self.location_is_available(media_location) { last_is_writable = true; - } else { - if let MediaLocation::Vault(vault) = media_location { - bail!("writable media offsite in vault '{}'", vault); - } + } else if let MediaLocation::Vault(vault) = media_location { + bail!("writable media offsite in vault '{}'", vault); } } _ => bail!( diff --git a/src/tools/apt.rs b/src/tools/apt.rs index 58cd605be..45b64cee7 100644 --- a/src/tools/apt.rs +++ b/src/tools/apt.rs @@ -110,13 +110,12 @@ fn get_changelog_url( command.arg("--print-uris"); command.arg(package); let output = proxmox_sys::command::run_command(command, None)?; // format: 'http://foo/bar' package.changelog - let output = match output.splitn(2, ' ').next() { - Some(output) => { - if output.len() < 2 { - bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output) - } - output[1..output.len() - 1].to_owned() - } + let output = match output.split_once(' ') { + Some((uri, _file_name)) if uri.len() > 2 => uri[1..uri.len() - 1].to_owned(), + Some((uri, _file_name)) => bail!( + "invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", + uri + ), None => bail!( "invalid output from 'apt-get changelog --print-uris': {}", output diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 35ec99963..6d070cd4c 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -478,7 +478,7 @@ impl Disk { let stat = unsafe { std::str::from_utf8_unchecked(&stat) }; let stat: Vec = stat .split_ascii_whitespace() - .map(|s| u64::from_str_radix(s, 10).unwrap_or(0)) + .map(|s| s.parse().unwrap_or_default()) .collect(); if stat.len() < 15 { @@ -821,7 +821,7 @@ fn get_partitions_info( let mut used = PartitionUsageType::Unused; - if let Some(devnum) = disk.devnum().ok() { + if let Ok(devnum) = disk.devnum() { if lvm_devices.contains(&devnum) { used = PartitionUsageType::LVM; } else if zfs_devices.contains(&devnum) { diff --git a/src/tools/disks/smart.rs b/src/tools/disks/smart.rs index 3738cdfd7..621527e99 100644 --- a/src/tools/disks/smart.rs +++ b/src/tools/disks/smart.rs @@ -203,7 +203,7 @@ pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result &str { if let Some(idx) = dataset.find('/') { - &dataset[0..idx].as_ref() + dataset[0..idx].as_ref() } else { - dataset.as_ref() + dataset } } @@ -53,7 +53,7 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result, Error> { // All times are nanoseconds let stat: Vec = lines[2] .split_ascii_whitespace() - .map(|s| u64::from_str_radix(s, 10).unwrap_or(0)) + .map(|s| s.parse().unwrap_or_default()) .collect(); let ticks = (stat[4] + stat[7]) / 1_000_000; // convert to milisec @@ -147,12 +147,10 @@ fn parse_objset_stat(pool: &str, objset_id: &str) -> Result<(String, BlockDevSta let value = parts.next().ok_or_else(|| format_err!("no value found"))?; match name { Some("dataset_name") => dataset_name = value.to_string(), - Some("writes") => stat.write_ios = u64::from_str_radix(value, 10).unwrap_or(0), - Some("nwritten") => { - stat.write_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512 - } - Some("reads") => stat.read_ios = u64::from_str_radix(value, 10).unwrap_or(0), - Some("nread") => stat.read_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512, + Some("writes") => stat.write_ios = value.parse().unwrap_or_default(), + Some("nwritten") => stat.write_sectors = value.parse::().unwrap_or_default() / 512, + Some("reads") => stat.read_ios = value.parse().unwrap_or_default(), + Some("nread") => stat.read_sectors = value.parse::().unwrap_or_default() / 512, _ => {} } } diff --git a/src/tools/disks/zpool_status.rs b/src/tools/disks/zpool_status.rs index cb87c81f6..679ed1a6f 100644 --- a/src/tools/disks/zpool_status.rs +++ b/src/tools/disks/zpool_status.rs @@ -1,4 +1,4 @@ -use std::mem::replace; +use std::mem::{replace, take}; use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; @@ -255,7 +255,7 @@ where stack.push(StackItem { node: replace(&mut cur.node, node), level: replace(&mut cur.level, vdev_level), - children_of_parent: replace(&mut cur.children_of_parent, Vec::new()), + children_of_parent: take(&mut cur.children_of_parent), }); } else { // same indentation level, add to children of the previous level: diff --git a/src/tools/mod.rs b/src/tools/mod.rs index 408249c2d..fadb55ef3 100644 --- a/src/tools/mod.rs +++ b/src/tools/mod.rs @@ -56,7 +56,7 @@ impl AsAny for T { /// The default 2 hours are far too long for PBS pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120; -pub const DEFAULT_USER_AGENT_STRING: &'static str = "proxmox-backup-client/1.0"; +pub const DEFAULT_USER_AGENT_STRING: &str = "proxmox-backup-client/1.0"; /// Returns a new instance of `SimpleHttp` configured for PBS usage. pub fn pbs_simple_http(proxy_config: Option) -> SimpleHttp { @@ -64,7 +64,6 @@ pub fn pbs_simple_http(proxy_config: Option) -> SimpleHttp { proxy_config, user_agent: Some(DEFAULT_USER_AGENT_STRING.to_string()), tcp_keepalive: Some(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME), - ..Default::default() }; SimpleHttp::with_options(options)