clippy 1.65 fixes

and rustfmt

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-12-05 11:27:40 +01:00
parent b0e3095594
commit 16f6766a68
89 changed files with 184 additions and 185 deletions

View File

@ -5,7 +5,7 @@ use std::process::Command;
fn main() {
let repoid = match env::var("REPOID") {
Ok(repoid) => repoid,
Err(_) => match Command::new("git").args(&["rev-parse", "HEAD"]).output() {
Err(_) => match Command::new("git").args(["rev-parse", "HEAD"]).output() {
Ok(output) => String::from_utf8(output.stdout).unwrap(),
Err(err) => {
panic!("git rev-parse failed: {}", err);

View File

@ -393,7 +393,7 @@ impl BackupWriter {
"wid": wid ,
"chunk-count": upload_stats.chunk_count,
"size": upload_stats.size,
"csum": hex::encode(&upload_stats.csum),
"csum": hex::encode(upload_stats.csum),
});
let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats {
@ -478,7 +478,7 @@ impl BackupWriter {
let mut digest_list = vec![];
let mut offset_list = vec![];
for (offset, digest) in chunk_list {
digest_list.push(hex::encode(&digest));
digest_list.push(hex::encode(digest));
offset_list.push(offset);
}
log::debug!("append chunks list len ({})", digest_list.len());
@ -704,7 +704,7 @@ impl BackupWriter {
if let MergedChunkInfo::New(chunk_info) = merged_chunk_info {
let offset = chunk_info.offset;
let digest = chunk_info.digest;
let digest_str = hex::encode(&digest);
let digest_str = hex::encode(digest);
log::trace!(
"upload new chunk {} ({} bytes, offset {})",

View File

@ -573,7 +573,7 @@ impl HttpClient {
Ok(fp) => fp,
Err(err) => bail!("failed to calculate certificate FP - {}", err), // should not happen
};
let fp_string = hex::encode(&fp);
let fp_string = hex::encode(fp);
let fp_string = fp_string
.as_bytes()
.chunks(2)

View File

@ -30,7 +30,7 @@ impl Future for PipeToSendStream {
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.get_mut();
if this.data != None {
if this.data.is_some() {
// just reserve 1 byte to make sure there's some
// capacity available. h2 will handle the capacity
// management for the actual body chunk.

View File

@ -1003,7 +1003,7 @@ fn get_acl_do(
// In order to be able to get ACLs with type ACL_TYPE_DEFAULT, we have
// to create a path for acl_get_file(). acl_get_fd() only allows to get
// ACL_TYPE_ACCESS attributes.
let acl = match acl::ACL::get_file(&proc_path, acl_type) {
let acl = match acl::ACL::get_file(proc_path, acl_type) {
Ok(acl) => acl,
// Don't bail if underlying endpoint does not support acls
Err(Errno::EOPNOTSUPP) => {
@ -1079,10 +1079,10 @@ fn process_acl(
metadata.acl.group_obj = acl_group_obj;
}
acl::ACL_TYPE_DEFAULT => {
if user_obj_permissions != None
|| group_obj_permissions != None
|| other_permissions != None
|| mask_permissions != None
if user_obj_permissions.is_some()
|| group_obj_permissions.is_some()
|| other_permissions.is_some()
|| mask_permissions.is_some()
{
acl_default = Some(pxar_acl::Default {
// The value is set to UINT64_MAX as placeholder if one

View File

@ -64,7 +64,7 @@ where
}
create_path(
&destination,
destination,
None,
Some(CreateOptions::new().perm(Mode::from_bits_truncate(0o700))),
)

View File

@ -36,8 +36,8 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(DATASTORE_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(DATASTORE_CFG_FILENAME, &content)?;

View File

@ -39,8 +39,8 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(DOMAINS_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(DOMAINS_CFG_FILENAME, &content)?;

View File

@ -68,8 +68,8 @@ pub fn lock() -> Result<BackupLockGuard, Error> {
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(DRIVE_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(DRIVE_CFG_FILENAME, &content)?;

View File

@ -251,7 +251,7 @@ impl KeyConfig {
.write(true)
.mode(0o0600)
.create_new(true)
.open(&path)?;
.open(path)?;
file.write_all(data.as_bytes())?;
}
@ -269,7 +269,7 @@ pub fn load_and_decrypt_key(
path: &std::path::Path,
passphrase: &dyn Fn() -> Result<Vec<u8>, Error>,
) -> Result<([u8; 32], i64, Fingerprint), Error> {
decrypt_key(&file_get_contents(&path)?, passphrase)
decrypt_key(&file_get_contents(path)?, passphrase)
.with_context(|| format!("failed to load decryption key from {:?}", path))
}

View File

@ -48,8 +48,8 @@ pub fn lock() -> Result<BackupLockGuard, Error> {
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(MEDIA_POOL_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(MEDIA_POOL_CFG_FILENAME, &content)?;

View File

@ -37,8 +37,8 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(REMOTE_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(REMOTE_CFG_FILENAME, &content)?;

View File

@ -36,8 +36,8 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(SYNC_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(SYNC_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(SYNC_CFG_FILENAME, &content)?;

View File

@ -36,8 +36,8 @@ pub fn lock() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(TAPE_JOB_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(TAPE_JOB_CFG_FILENAME, &content)?;

View File

@ -44,7 +44,7 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
/// Read and parse the configuration file
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(TRAFFIC_CONTROL_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data = CONFIG.parse(TRAFFIC_CONTROL_CFG_FILENAME, &content)?;

View File

@ -51,8 +51,8 @@ pub fn lock_config() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(SectionConfigData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(USER_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(USER_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let mut data = CONFIG.parse(USER_CFG_FILENAME, &content)?;

View File

@ -364,7 +364,7 @@ impl DataStore {
self.stat_chunk(&info.digest).map_err(|err| {
format_err!(
"fast_index_verification error, stat_chunk {} failed - {}",
hex::encode(&info.digest),
hex::encode(info.digest),
err,
)
})?;

View File

@ -471,7 +471,7 @@ impl DynamicChunkWriter {
chunk_size,
(compressed_size * 100) / (chunk_size as u64),
is_duplicate,
hex::encode(&digest)
hex::encode(digest)
);
self.index.add_chunk(self.chunk_offset as u64, &digest)?;
self.chunk_buffer.truncate(0);

View File

@ -148,7 +148,7 @@ impl BackupManifest {
/// By generating a HMAC SHA256 over the canonical json
/// representation, The 'unpreotected' property is excluded.
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
Self::json_signature(&serde_json::to_value(self)?, crypt_config)
}
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
@ -166,11 +166,11 @@ impl BackupManifest {
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
let mut manifest = serde_json::to_value(&self)?;
let mut manifest = serde_json::to_value(self)?;
if let Some(crypt_config) = crypt_config {
let sig = self.signature(crypt_config)?;
manifest["signature"] = hex::encode(&sig).into();
manifest["signature"] = hex::encode(sig).into();
let fingerprint = &Fingerprint::new(crypt_config.fingerprint());
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
}
@ -223,7 +223,7 @@ impl BackupManifest {
if let Some(crypt_config) = crypt_config {
if let Some(signature) = signature {
let expected_signature = hex::encode(&Self::json_signature(&json, crypt_config)?);
let expected_signature = hex::encode(Self::json_signature(&json, crypt_config)?);
let fingerprint = &json["unprotected"]["key-fingerprint"];
if fingerprint != &Value::Null {
@ -300,7 +300,7 @@ fn test_manifest_signature() -> Result<(), Error> {
);
let manifest: BackupManifest = serde_json::from_value(manifest)?;
let expected_signature = hex::encode(&manifest.signature(&crypt_config)?);
let expected_signature = hex::encode(manifest.signature(&crypt_config)?);
assert_eq!(signature, expected_signature);

View File

@ -227,7 +227,7 @@ fn paperkey_text<W: Write>(
fn generate_qr_code(output_type: &str, lines: &[String]) -> Result<Vec<u8>, Error> {
let mut child = Command::new("qrencode")
.args(&["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
.args(["-t", output_type, "-m0", "-s1", "-lm", "--output", "-"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?;

View File

@ -155,9 +155,9 @@ fn scsi_move_medium_cdb(
let mut cmd = Vec::new();
cmd.push(0xA5); // MOVE MEDIUM (A5h)
cmd.push(0); // reserved
cmd.extend(&medium_transport_address.to_be_bytes());
cmd.extend(&source_element_address.to_be_bytes());
cmd.extend(&destination_element_address.to_be_bytes());
cmd.extend(medium_transport_address.to_be_bytes());
cmd.extend(source_element_address.to_be_bytes());
cmd.extend(destination_element_address.to_be_bytes());
cmd.push(0); // reserved
cmd.push(0); // reserved
cmd.push(0); // Invert=0
@ -287,9 +287,9 @@ fn scsi_read_element_status_cdb(
let mut cmd = Vec::new();
cmd.push(0xB8); // READ ELEMENT STATUS (B8h)
cmd.push(element_type.byte1());
cmd.extend(&start_element_address.to_be_bytes());
cmd.extend(start_element_address.to_be_bytes());
cmd.extend(&number_of_elements.to_be_bytes());
cmd.extend(number_of_elements.to_be_bytes());
cmd.push(element_type.byte6());
cmd.extend(&allocation_len.to_be_bytes()[1..4]);
cmd.push(0);

View File

@ -186,7 +186,7 @@ impl SgTape {
} else {
cmd.push(1); // LONG=1
}
cmd.extend(&[0, 0, 0, 0]);
cmd.extend([0, 0, 0, 0]);
sg_raw
.do_command(&cmd)
@ -229,7 +229,7 @@ impl SgTape {
let mut cmd = Vec::new();
if has_format {
cmd.extend(&[0x04, 0, 0, 0, 0, 0]); // FORMAT
cmd.extend([0x04, 0, 0, 0, 0, 0]); // FORMAT
sg_raw.do_command(&cmd)?;
if !fast {
self.erase_media(false)?; // overwrite everything
@ -248,7 +248,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.extend(&[0x1E, 0, 0, 0]);
cmd.extend([0x1E, 0, 0, 0]);
if allow {
cmd.push(0);
} else {
@ -265,7 +265,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.extend(&[0x01, 0, 0, 0, 0, 0]); // REWIND
cmd.extend([0x01, 0, 0, 0, 0, 0]); // REWIND
sg_raw
.do_command(&cmd)
@ -315,9 +315,9 @@ impl SgTape {
let fixed_position = fixed_position.saturating_sub(1);
let mut cmd = Vec::new();
cmd.extend(&[0x92, 0b000_01_000, 0, 0]); // LOCATE(16) filemarks
cmd.extend(&fixed_position.to_be_bytes());
cmd.extend(&[0, 0, 0, 0]);
cmd.extend([0x92, 0b000_01_000, 0, 0]); // LOCATE(16) filemarks
cmd.extend(fixed_position.to_be_bytes());
cmd.extend([0, 0, 0, 0]);
sg_raw
.do_command(&cmd)
@ -365,7 +365,7 @@ impl SgTape {
// READ POSITION LONG FORM works on LTO4 or newer (with recent
// firmware), although it is missing in the IBM LTO4 SSCI
// reference manual.
cmd.extend(&[0x34, 0x06, 0, 0, 0, 0, 0, 0, 0, 0]); // READ POSITION LONG FORM
cmd.extend([0x34, 0x06, 0, 0, 0, 0, 0, 0, 0, 0]); // READ POSITION LONG FORM
let data = sg_raw
.do_command(&cmd)
@ -442,7 +442,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.extend(&[0x11, 0x03, 0, 0, 0, 0]); // SPACE(6) move to EOD
cmd.extend([0x11, 0x03, 0, 0, 0, 0]); // SPACE(6) move to EOD
sg_raw
.do_command(&cmd)
@ -479,10 +479,10 @@ impl SgTape {
} else {
cmd.push(1); // filemarks
}
cmd.extend(&[0, 0]); // reserved
cmd.extend([0, 0]); // reserved
let count: i64 = count as i64;
cmd.extend(&count.to_be_bytes());
cmd.extend(&[0, 0, 0, 0]); // reserved
cmd.extend(count.to_be_bytes());
cmd.extend([0, 0, 0, 0]); // reserved
}
sg_raw.do_command(&cmd)?;
@ -504,7 +504,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.extend(&[0x1B, 0, 0, 0, 0, 0]); // LODA/UNLOAD HOLD=0, LOAD=0
cmd.extend([0x1B, 0, 0, 0, 0, 0]); // LODA/UNLOAD HOLD=0, LOAD=0
sg_raw
.do_command(&cmd)
@ -517,7 +517,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
let mut cmd = Vec::new();
cmd.extend(&[0x1B, 0, 0, 0, 0b0000_0001, 0]); // LODA/UNLOAD HOLD=0, LOAD=1
cmd.extend([0x1B, 0, 0, 0, 0b0000_0001, 0]); // LODA/UNLOAD HOLD=0, LOAD=1
sg_raw
.do_command(&cmd)
@ -542,7 +542,7 @@ impl SgTape {
} else {
cmd.push(0); // IMMED=0
}
cmd.extend(&[0, 0, count as u8]); // COUNT
cmd.extend([0, 0, count as u8]); // COUNT
cmd.push(0); // control byte
match sg_raw.do_command(&cmd) {
@ -570,7 +570,7 @@ impl SgTape {
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
sg_raw.set_timeout(30); // use short timeout
let mut cmd = Vec::new();
cmd.extend(&[0x00, 0, 0, 0, 0, 0]); // TEST UNIT READY
cmd.extend([0x00, 0, 0, 0, 0, 0]); // TEST UNIT READY
match sg_raw.do_command(&cmd) {
Ok(_) => Ok(()),
@ -774,10 +774,10 @@ impl SgTape {
let mut cmd = Vec::new();
cmd.push(0x55); // MODE SELECT(10)
cmd.push(0b0001_0000); // PF=1
cmd.extend(&[0, 0, 0, 0, 0]); //reserved
cmd.extend([0, 0, 0, 0, 0]); //reserved
let param_list_len: u16 = data.len() as u16;
cmd.extend(&param_list_len.to_be_bytes());
cmd.extend(param_list_len.to_be_bytes());
cmd.push(0); // control
let mut buffer = alloc_page_aligned_buffer(4096)?;

View File

@ -120,7 +120,7 @@ fn sg_spout_set_encryption<F: AsRawFd>(
cmd.push(0x10); // Set Data Encryption page
cmd.push(0);
cmd.push(0);
cmd.extend(&(outbuf_len as u32).to_be_bytes()); // data out len
cmd.extend((outbuf_len as u32).to_be_bytes()); // data out len
cmd.push(0);
cmd.push(0);
@ -143,7 +143,7 @@ fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, E
cmd.push(0x20); // Data Encryption Status page
cmd.push(0);
cmd.push(0);
cmd.extend(&allocation_len.to_be_bytes());
cmd.extend(allocation_len.to_be_bytes());
cmd.push(0);
cmd.push(0);
@ -172,7 +172,7 @@ fn sg_spin_data_encryption_caps<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Err
cmd.push(0x10); // Data Encryption Capabilities page
cmd.push(0);
cmd.push(0);
cmd.extend(&allocation_len.to_be_bytes());
cmd.extend(allocation_len.to_be_bytes());
cmd.push(0);
cmd.push(0);

View File

@ -182,10 +182,10 @@ fn read_tape_mam<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
let mut cmd = Vec::new();
cmd.extend(&[0x8c, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8]);
cmd.extend(&[0u8, 0u8]); // first attribute
cmd.extend(&alloc_len.to_be_bytes()); // alloc len
cmd.extend(&[0u8, 0u8]);
cmd.extend([0x8c, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8]);
cmd.extend([0u8, 0u8]); // first attribute
cmd.extend(alloc_len.to_be_bytes()); // alloc len
cmd.extend([0u8, 0u8]);
sg_raw
.do_command(&cmd)

View File

@ -29,8 +29,8 @@ pub fn report_density<F: AsRawFd>(file: &mut F) -> Result<u8, Error> {
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
let mut cmd = Vec::new();
cmd.extend(&[0x44, 0, 0, 0, 0, 0, 0]); // REPORT DENSITY SUPPORT (MEDIA=0)
cmd.extend(&alloc_len.to_be_bytes()); // alloc len
cmd.extend([0x44, 0, 0, 0, 0, 0, 0]); // REPORT DENSITY SUPPORT (MEDIA=0)
cmd.extend(alloc_len.to_be_bytes()); // alloc len
cmd.push(0u8); // control byte
let data = sg_raw

View File

@ -94,7 +94,7 @@ fn sg_read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error>
cmd.push(0);
cmd.push(0);
cmd.push(0);
cmd.extend(&[2u8, 0u8]); // alloc len
cmd.extend([2u8, 0u8]); // alloc len
cmd.push(0u8); // control byte
sg_raw

View File

@ -35,7 +35,7 @@ fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error>
cmd.push(0);
cmd.push(0);
cmd.push(0);
cmd.extend(&alloc_len.to_be_bytes()); // alloc len
cmd.extend(alloc_len.to_be_bytes()); // alloc len
cmd.push(0u8); // control byte
sg_raw

View File

@ -644,7 +644,7 @@ pub fn scsi_inquiry<F: AsRawFd>(file: &mut F) -> Result<InquiryInfo, Error> {
sg_raw.set_timeout(30); // use short timeout
let mut cmd = Vec::new();
cmd.extend(&[0x12, 0, 0, 0, allocation_len, 0]); // INQUIRY
cmd.extend([0x12, 0, 0, 0, allocation_len, 0]); // INQUIRY
let data = sg_raw
.do_command(&cmd)
@ -691,8 +691,8 @@ pub fn scsi_mode_sense<F: AsRawFd, P: Endian>(
cmd.push(page_code & 63); // report current values for page_code
cmd.push(sub_page_code);
cmd.extend(&[0, 0, 0]); // reserved
cmd.extend(&allocation_len.to_be_bytes()); // allocation len
cmd.extend([0, 0, 0]); // reserved
cmd.extend(allocation_len.to_be_bytes()); // allocation len
cmd.push(0); //control
let data = sg_raw
@ -752,7 +752,7 @@ pub fn scsi_request_sense<F: AsRawFd>(file: &mut F) -> Result<RequestSenseFixed,
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
sg_raw.set_timeout(30); // use short timeout
let mut cmd = Vec::new();
cmd.extend(&[0x03, 0, 0, 0, allocation_len, 0]); // REQUEST SENSE FIXED FORMAT
cmd.extend([0x03, 0, 0, 0, allocation_len, 0]); // REQUEST SENSE FIXED FORMAT
let data = sg_raw
.do_command(&cmd)

View File

@ -68,7 +68,7 @@ impl CertInfo {
pub fn fingerprint(&self) -> Result<String, Error> {
let fp = self.x509.digest(openssl::hash::MessageDigest::sha256())?;
Ok(hex::encode(&fp)
Ok(hex::encode(fp)
.as_bytes()
.chunks(2)
.map(|v| std::str::from_utf8(v).unwrap())

View File

@ -430,7 +430,7 @@ fn commit_journal_impl(
for rel_path in files.iter() {
let mut path = config.basedir.clone();
path.push(&rel_path);
path.push(rel_path);
fsync_file_or_dir(&path)
.map_err(|err| format_err!("fsync rrd file {} failed - {}", rel_path, err))?;
}

View File

@ -425,12 +425,12 @@ impl RRD {
options: CreateOptions,
avoid_page_cache: bool,
) -> Result<(), Error> {
let (fd, tmp_path) = make_tmp_file(&path, options)?;
let (fd, tmp_path) = make_tmp_file(path, options)?;
let mut file = unsafe { std::fs::File::from_raw_fd(fd.into_raw_fd()) };
let mut try_block = || -> Result<(), Error> {
let mut data: Vec<u8> = Vec::new();
data.extend(&PROXMOX_RRD_MAGIC_2_0);
data.extend(PROXMOX_RRD_MAGIC_2_0);
serde_cbor::to_writer(&mut data, self)?;
file.write_all(&data)?;
@ -454,7 +454,7 @@ impl RRD {
}
}
if let Err(err) = std::fs::rename(&tmp_path, &path) {
if let Err(err) = std::fs::rename(&tmp_path, path) {
let _ = nix::unistd::unlink(&tmp_path);
bail!("Atomic rename failed - {}", err);
}

View File

@ -114,7 +114,7 @@ impl DnsPlugin {
let mut command = Command::new("/usr/bin/setpriv");
#[rustfmt::skip]
command.args(&[
command.args([
"--reuid", "nobody",
"--regid", "nogroup",
"--clear-groups",

View File

@ -133,7 +133,7 @@ pub fn read_acl(
extract_acl_node_data(&tree.root, "", &mut list, exact, &auth_id_filter);
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -45,7 +45,7 @@ fn list_domains(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<BasicRealmInfo>,
list.push(serde_json::from_value(entry)?);
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -76,7 +76,7 @@ pub fn list_users(
let list: Vec<User> = config.convert_to_typed_array("user")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
let iter = list.into_iter().filter(filter_by_privs);
let list = if include_tokens {
@ -182,7 +182,7 @@ pub fn create_user(
pub fn read_user(userid: Userid, rpcenv: &mut dyn RpcEnvironment) -> Result<User, Error> {
let (config, digest) = pbs_config::user::config()?;
let user = config.lookup("user", userid.as_str())?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(user)
}
@ -423,7 +423,7 @@ pub fn read_token(
let tokenid = Authid::from((userid, Some(token_name)));
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
config.lookup("token", &tokenid.to_string())
}
@ -711,7 +711,7 @@ pub fn list_tokens(
let list: Vec<ApiToken> = config.convert_to_typed_array("token")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
let filter_by_owner = |token: ApiToken| {
if token.tokenid.is_token() && token.tokenid.user() == &userid {

View File

@ -1516,7 +1516,7 @@ pub fn upload_backup_log(
let file_name = CLIENT_LOG_BLOB_NAME;
let mut path = backup_dir.full_path();
path.push(&file_name);
path.push(file_name);
if path.exists() {
bail!("backup already contains a log.");

View File

@ -83,7 +83,7 @@ pub fn list_metric_servers(
list.push(serde_json::from_value(entry)?);
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -87,7 +87,7 @@ pub fn list_prune_jobs(
});
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -78,7 +78,7 @@ pub fn list_sync_jobs(
});
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -67,7 +67,7 @@ pub fn show_current_traffic(
}
// also return the configuration digest
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -84,7 +84,7 @@ pub fn list_verification_jobs(
});
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -484,7 +484,7 @@ fn create_fixed_index(
};
let (old_csum, _) = index.compute_csum();
let old_csum = hex::encode(&old_csum);
let old_csum = hex::encode(old_csum);
if old_csum != csum {
bail!(
"expected csum ({}) doesn't match last backup's ({}), cannot do incremental backup",

View File

@ -165,7 +165,7 @@ fn upload_fixed_chunk(
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
env.register_fixed_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = hex::encode(&digest);
let digest_str = hex::encode(digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = Ok(json!(digest_str));
@ -234,7 +234,7 @@ fn upload_dynamic_chunk(
UploadChunk::new(req_body, env.datastore.clone(), digest, size, encoded_size).await?;
env.register_dynamic_chunk(wid, digest, size, compressed_size, is_duplicate)?;
let digest_str = hex::encode(&digest);
let digest_str = hex::encode(digest);
env.debug(format!("upload_chunk done: {} bytes, {}", size, digest_str));
let result = Ok(json!(digest_str));

View File

@ -36,7 +36,7 @@ pub fn list_openid_realms(
let list = config.convert_to_typed_array("openid")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -138,7 +138,7 @@ pub fn read_openid_realm(
let config = domains.lookup("openid", &realm)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}

View File

@ -44,7 +44,7 @@ pub fn get_webauthn_config(
Some(c) => c,
None => return Ok(None),
};
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(Some(config))
}

View File

@ -521,7 +521,7 @@ fn modify_cfg_for_api(id: &str, ty: &str, data: &Value) -> PluginConfig {
/// List ACME challenge plugins.
pub fn list_plugins(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>, Error> {
let (plugins, digest) = plugin::config()?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(plugins
.iter()
.map(|(id, (ty, data))| modify_cfg_for_api(id, ty, data))
@ -543,7 +543,7 @@ pub fn list_plugins(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<PluginConfig>
/// List ACME challenge plugins.
pub fn get_plugin(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<PluginConfig, Error> {
let (plugins, digest) = plugin::config()?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
match plugins.get(&id) {
Some((ty, data)) => Ok(modify_cfg_for_api(&id, ty, data)),

View File

@ -86,7 +86,7 @@ pub fn get_config(
let data: ScsiTapeChanger = config.lookup("changer", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(data)
}
@ -127,7 +127,7 @@ pub fn list_changers(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -51,7 +51,7 @@ pub fn list_datastores(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
let list: Vec<DataStoreConfig> = config.convert_to_typed_array("datastore")?;
let filter_by_privs = |store: &DataStoreConfig| {
@ -154,7 +154,7 @@ pub fn read_datastore(
let (config, digest) = pbs_config::datastore::config()?;
let store_config = config.lookup("datastore", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(store_config)
}

View File

@ -86,7 +86,7 @@ pub fn get_config(
let data: LtoTapeDrive = config.lookup("lto", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(data)
}
@ -127,7 +127,7 @@ pub fn list_drives(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(drive_list)
}

View File

@ -72,7 +72,7 @@ pub fn list_pools(rpcenv: &mut dyn RpcEnvironment) -> Result<Vec<MediaPoolConfig
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -57,7 +57,7 @@ pub fn list_influxdb_http_servers(
item.token = None;
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -160,7 +160,7 @@ pub fn read_influxdb_http_server(
config.token = None;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}

View File

@ -42,7 +42,7 @@ pub fn list_influxdb_udp_servers(
let list = config.convert_to_typed_array("influxdb-udp")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -145,7 +145,7 @@ pub fn read_influxdb_udp_server(
let config = metrics.lookup("influxdb-udp", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}

View File

@ -51,7 +51,7 @@ pub fn list_prune_jobs(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -127,7 +127,7 @@ pub fn read_prune_job(
let required_privs = PRIV_DATASTORE_AUDIT;
user_info.check_privs(&auth_id, &prune_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(prune_job)
}

View File

@ -58,7 +58,7 @@ pub fn list_remotes(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -127,7 +127,7 @@ pub fn read_remote(
) -> Result<RemoteWithoutPassword, Error> {
let (config, digest) = pbs_config::remote::config()?;
let data: RemoteWithoutPassword = config.lookup("remote", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(data)
}

View File

@ -93,7 +93,7 @@ pub fn list_sync_jobs(
let list = config.convert_to_typed_array("sync")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
let list = list
.into_iter()
@ -181,7 +181,7 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Sync
bail!("permission check failed");
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(sync_job)
}

View File

@ -47,7 +47,7 @@ pub fn list_tape_backup_jobs(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -110,7 +110,7 @@ pub fn read_tape_backup_job(
let job = config.lookup("backup", &id)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(job)
}

View File

@ -45,7 +45,7 @@ pub fn list_keys(
list.push(item.into());
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -34,7 +34,7 @@ pub fn list_traffic_controls(
let list: Vec<TrafficControlRule> = config.convert_to_typed_array("rule")?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -95,7 +95,7 @@ pub fn read_traffic_control(
) -> Result<TrafficControlRule, Error> {
let (config, digest) = pbs_config::traffic_control::config()?;
let data: TrafficControlRule = config.lookup("rule", &name)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(data)
}

View File

@ -51,7 +51,7 @@ pub fn list_verification_jobs(
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}
@ -127,7 +127,7 @@ pub fn read_verification_job(
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(&auth_id, &verification_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(verification_job)
}

View File

@ -479,7 +479,7 @@ pub fn get_versions() -> Result<Vec<APTUpdateInfo>, Error> {
/// Get APT repository information.
pub fn get_repositories() -> Result<Value, Error> {
let (files, errors, digest) = proxmox_apt::repositories::repositories()?;
let digest = hex::encode(&digest);
let digest = hex::encode(digest);
let suite = proxmox_apt::repositories::get_current_release_codename()?;
@ -525,7 +525,7 @@ pub fn add_repository(handle: APTRepositoryHandle, digest: Option<String>) -> Re
let suite = proxmox_apt::repositories::get_current_release_codename()?;
if let Some(expected_digest) = digest {
let current_digest = hex::encode(&current_digest);
let current_digest = hex::encode(current_digest);
crate::tools::assert_if_modified(&expected_digest, &current_digest)?;
}
@ -618,7 +618,7 @@ pub fn change_repository(
let (mut files, errors, current_digest) = proxmox_apt::repositories::repositories()?;
if let Some(expected_digest) = digest {
let current_digest = hex::encode(&current_digest);
let current_digest = hex::encode(current_digest);
crate::tools::assert_if_modified(&expected_digest, &current_digest)?;
}

View File

@ -134,7 +134,7 @@ impl TryFrom<&cert::CertInfo> for CertificateInfo {
fn get_certificate_pem() -> Result<String, Error> {
let cert_path = configdir!("/proxy.pem");
let cert_pem = proxmox_sys::fs::file_get_contents(&cert_path)?;
let cert_pem = proxmox_sys::fs::file_get_contents(cert_path)?;
String::from_utf8(cert_pem)
.map_err(|_| format_err!("certificate in {:?} is not a valid PEM file", cert_path))
}
@ -253,11 +253,11 @@ pub async fn upload_custom_certificate(
pub async fn delete_custom_certificate() -> Result<(), Error> {
let cert_path = configdir!("/proxy.pem");
// Here we fail since if this fails nothing else breaks anyway
std::fs::remove_file(&cert_path)
std::fs::remove_file(cert_path)
.map_err(|err| format_err!("failed to unlink {:?} - {}", cert_path, err))?;
let key_path = configdir!("/proxy.key");
if let Err(err) = std::fs::remove_file(&key_path) {
if let Err(err) = std::fs::remove_file(key_path) {
// Here we just log since the certificate is already gone and we'd rather try to generate
// the self-signed certificate even if this fails:
log::error!(

View File

@ -30,7 +30,7 @@ pub const ROUTER: Router = Router::new()
/// Get the node configuration
pub fn get_node_config(rpcenv: &mut dyn RpcEnvironment) -> Result<NodeConfig, Error> {
let (config, digest) = crate::config::node::config()?;
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(config)
}

View File

@ -261,7 +261,7 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> {
let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
let full_path = std::path::Path::new(&mount_unit_path);
log::info!("removing systemd mount unit {:?}", full_path);
std::fs::remove_file(&full_path)?;
std::fs::remove_file(full_path)?;
// try to unmount, if that fails tell the user to reboot or unmount manually
let mut command = std::process::Command::new("umount");

View File

@ -238,7 +238,7 @@ pub fn create_zpool(
);
let mut command = std::process::Command::new("zpool");
command.args(&[
command.args([
"create",
"-o",
&format!("ashift={}", ashift),
@ -293,7 +293,7 @@ pub fn create_zpool(
if let Some(compression) = compression {
command.arg(&format!("compression={}", compression));
}
command.args(&["relatime=on", &name]);
command.args(["relatime=on", &name]);
task_log!(worker, "# {:?}", command);
let output = proxmox_sys::command::run_command(command, None)?;
task_log!(worker, "{}", output);

View File

@ -40,7 +40,7 @@ pub fn read_etc_resolv_conf() -> Result<Value, Error> {
let raw = file_get_contents(RESOLV_CONF_FN)?;
result["digest"] = Value::from(hex::encode(&sha::sha256(&raw)));
result["digest"] = Value::from(hex::encode(sha::sha256(&raw)));
let data = String::from_utf8(raw)?;

View File

@ -112,7 +112,7 @@ pub fn list_network_devices(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let (config, digest) = network::config()?;
let digest = hex::encode(&digest);
let digest = hex::encode(digest);
let mut list = Vec::new();
@ -157,7 +157,7 @@ pub fn read_interface(iface: String) -> Result<Value, Error> {
let interface = config.lookup(&iface)?;
let mut data: Value = to_value(interface)?;
data["digest"] = hex::encode(&digest).into();
data["digest"] = hex::encode(digest).into();
Ok(data)
}

View File

@ -37,7 +37,7 @@ fn get_full_service_state(service: &str) -> Result<Value, Error> {
let real_service_name = real_service_name(service);
let mut child = Command::new("systemctl")
.args(&["show", real_service_name])
.args(["show", real_service_name])
.stdout(Stdio::piped())
.spawn()?;
@ -198,7 +198,7 @@ fn run_service_command(service: &str, cmd: &str, auth_id: Authid) -> Result<Valu
let real_service_name = real_service_name(&service);
let status = Command::new("systemctl")
.args(&[&cmd, real_service_name])
.args([&cmd, real_service_name])
.status()?;
if !status.success() {

View File

@ -18,13 +18,13 @@ fn dump_journal(
let mut args = vec!["-o", "short", "--no-pager"];
if let Some(service) = service {
args.extend(&["--unit", service]);
args.extend(["--unit", service]);
}
if let Some(since) = since {
args.extend(&["--since", since]);
args.extend(["--since", since]);
}
if let Some(until) = until {
args.extend(&["--until", until]);
args.extend(["--until", until]);
}
let mut lines: Vec<Value> = vec![];

View File

@ -126,7 +126,7 @@ pub fn list_tape_backup_jobs(
});
}
rpcenv["digest"] = hex::encode(&digest).into();
rpcenv["digest"] = hex::encode(digest).into();
Ok(list)
}

View File

@ -1569,7 +1569,7 @@ fn scan_chunk_archive<'a>(
worker.check_abort()?;
if verbose {
task_log!(worker, "Found chunk: {}", hex::encode(&digest));
task_log!(worker, "Found chunk: {}", hex::encode(digest));
}
chunks.push(digest);
@ -1602,7 +1602,7 @@ fn restore_chunk_archive<'a>(
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
if !chunk_exists {
if verbose {
task_log!(worker2, "Insert chunk: {}", hex::encode(&digest));
task_log!(worker2, "Insert chunk: {}", hex::encode(digest));
}
bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
// println!("verify and write {}", hex::encode(&digest));
@ -1613,7 +1613,7 @@ fn restore_chunk_archive<'a>(
datastore.insert_chunk(&chunk, &digest)?;
} else if verbose {
task_log!(worker2, "Found existing chunk: {}", hex::encode(&digest));
task_log!(worker2, "Found existing chunk: {}", hex::encode(digest));
}
Ok(())
},
@ -1747,7 +1747,7 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
};
let mut archive_path = snapshot_path.to_owned();
archive_path.push(&filename);
archive_path.push(filename);
let mut tmp_path = archive_path.clone();
tmp_path.set_extension("tmp");

View File

@ -18,7 +18,7 @@ fn compute_csrf_secret_digest(timestamp: i64, secret: &[u8], userid: &Userid) ->
hasher.update(data.as_bytes());
hasher.update(secret);
base64::encode_config(&hasher.finish(), base64::STANDARD_NO_PAD)
base64::encode_config(hasher.finish(), base64::STANDARD_NO_PAD)
}
pub fn assemble_csrf_prevention_token(secret: &[u8], userid: &Userid) -> String {

View File

@ -360,7 +360,7 @@ async fn compare_files<'a>(
let file_a = entries_a.get(&p).context("File entry not in map")?;
let file_b = entries_b.get(&p).context("File entry not in map")?;
if !compare_file(&file_a, &file_b).await {
if !compare_file(file_a, file_b).await {
modified_files.insert(path, entry);
}
}

View File

@ -125,7 +125,7 @@ fn inspect_chunk(
let decode_output_path = decode.as_ref().map(Path::new);
let blob = DataBlob::load_from_reader(
&mut std::fs::File::open(&chunk_path)
&mut std::fs::File::open(chunk_path)
.map_err(|e| format_err!("could not open chunk file - {}", e))?,
)?;

View File

@ -80,7 +80,7 @@ async fn list_media(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(),
fn render_status(_value: &Value, record: &Value) -> Result<String, Error> {
let record = MediaListEntry::deserialize(record)?;
Ok(match record.status {
MediaStatus::Damaged | MediaStatus::Retired => serde_json::to_value(&record.status)?
MediaStatus::Damaged | MediaStatus::Retired => serde_json::to_value(record.status)?
.as_str()
.unwrap()
.to_string(),
@ -88,7 +88,7 @@ async fn list_media(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(),
if record.expired {
String::from("expired")
} else {
serde_json::to_value(&record.status)?
serde_json::to_value(record.status)?
.as_str()
.unwrap()
.to_string()

View File

@ -114,7 +114,7 @@ pub fn mark_account_deactivated(name: &str) -> Result<(), Error> {
}
pub fn load_dns_challenge_schema() -> Result<Vec<AcmeChallengeSchema>, Error> {
let raw = file_read_string(&ACME_DNS_SCHEMA_FN)?;
let raw = file_read_string(ACME_DNS_SCHEMA_FN)?;
let schemas: serde_json::Map<String, Value> = serde_json::from_str(&raw)?;
Ok(schemas

View File

@ -141,8 +141,8 @@ pub fn lock() -> Result<BackupLockGuard, Error> {
}
pub fn config() -> Result<(PluginData, [u8; 32]), Error> {
let content = proxmox_sys::fs::file_read_optional_string(ACME_PLUGIN_CFG_FILENAME)?
.unwrap_or_else(|| "".to_string());
let content =
proxmox_sys::fs::file_read_optional_string(ACME_PLUGIN_CFG_FILENAME)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let mut data = CONFIG.parse(ACME_PLUGIN_CFG_FILENAME, &content)?;

View File

@ -30,8 +30,7 @@ pub fn lock() -> Result<BackupLockGuard, Error> {
/// Read the Node Config.
pub fn config() -> Result<(NodeConfig, [u8; 32]), Error> {
let content =
proxmox_sys::fs::file_read_optional_string(CONF_FILE)?.unwrap_or_else(|| "".to_string());
let content = proxmox_sys::fs::file_read_optional_string(CONF_FILE)?.unwrap_or_default();
let digest = openssl::sha::sha256(content.as_bytes());
let data: NodeConfig = crate::tools::config::from_str(&content, &NodeConfig::API_SCHEMA)?;

View File

@ -221,7 +221,7 @@ async fn download_manifest(
.create(true)
.truncate(true)
.read(true)
.open(&filename)?;
.open(filename)?;
reader
.download(MANIFEST_BLOB_NAME, &mut tmp_manifest_file)
@ -345,7 +345,7 @@ async fn try_client_log_download(
// Note: be silent if there is no log - only log successful download
if let Ok(()) = reader.download(CLIENT_LOG_BLOB_NAME, tmpfile).await {
if let Err(err) = std::fs::rename(&tmp_path, &path) {
if let Err(err) = std::fs::rename(&tmp_path, path) {
bail!("Atomic rename file {:?} failed - {}", path, err);
}
task_log!(worker, "got backup log file {:?}", CLIENT_LOG_BLOB_NAME);

View File

@ -11,7 +11,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
let path = &config.path;
let mut command = std::process::Command::new("mtx");
command.args(&["-f", path, "status"]);
command.args(["-f", path, "status"]);
let output = run_command(command, None)?;
@ -25,7 +25,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
/// Run 'mtx load'
pub fn mtx_load(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&["-f", path, "load", &slot.to_string(), &drivenum.to_string()]);
command.args(["-f", path, "load", &slot.to_string(), &drivenum.to_string()]);
run_command(command, None)?;
Ok(())
@ -34,7 +34,7 @@ pub fn mtx_load(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
/// Run 'mtx unload'
pub fn mtx_unload(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&[
command.args([
"-f",
path,
"unload",
@ -49,7 +49,7 @@ pub fn mtx_unload(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
/// Run 'mtx transfer'
pub fn mtx_transfer(path: &str, from_slot: u64, to_slot: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&[
command.args([
"-f",
path,
"transfer",

View File

@ -339,8 +339,8 @@ impl TapeDriver for LtoTapeHandle {
fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result<String, Error> {
let mut command =
std::process::Command::new("/usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd");
command.args(&[subcmd]);
command.args(&["--stdin"]);
command.args([subcmd]);
command.args(["--stdin"]);
command.args(args);
let device_fd = nix::unistd::dup(fd)?;
command.stdin(unsafe { std::process::Stdio::from_raw_fd(device_fd) });

View File

@ -79,7 +79,7 @@ pub trait TapeDriver {
self.format_media(true)?; // this rewinds the tape
let raw = serde_json::to_string_pretty(&serde_json::to_value(&label)?)?;
let raw = serde_json::to_string_pretty(&serde_json::to_value(label)?)?;
let header =
MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, raw.len() as u32);

View File

@ -228,7 +228,7 @@ impl MediaCatalog {
if !found_magic_number {
me.pending
.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
.extend(Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
}
if write {
@ -301,7 +301,7 @@ impl MediaCatalog {
me.log_to_stdout = log_to_stdout;
me.pending
.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
.extend(Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
me.register_label(&media_id.label.uuid, 0, 0)?;

View File

@ -36,7 +36,7 @@ pub fn write_pkg_cache(state: &PkgState) -> Result<(), Error> {
}
pub fn read_pkg_state() -> Result<Option<PkgState>, Error> {
let serialized_state = match file_read_optional_string(&APT_PKG_STATE_FN) {
let serialized_state = match file_read_optional_string(APT_PKG_STATE_FN) {
Ok(Some(raw)) => raw,
Ok(None) => return Ok(None),
Err(err) => bail!("could not read cached package state file - {}", err),

View File

@ -22,7 +22,7 @@ pub fn get_lvm_devices(lsblk_info: &[LsblkInfo]) -> Result<HashSet<u64>, Error>
const PVS_BIN_PATH: &str = "pvs";
let mut command = std::process::Command::new(PVS_BIN_PATH);
command.args(&[
command.args([
"--reportformat",
"json",
"--noheadings",

View File

@ -559,7 +559,7 @@ pub struct BlockDevStat {
/// Use lsblk to read partition type uuids and file system types.
pub fn get_lsblk_info() -> Result<Vec<LsblkInfo>, Error> {
let mut command = std::process::Command::new("lsblk");
command.args(&["--json", "-o", "path,parttype,fstype"]);
command.args(["--json", "-o", "path,parttype,fstype"]);
let output = proxmox_sys::command::run_command(command, None)?;
@ -1049,7 +1049,7 @@ pub fn inititialize_gpt_disk(disk: &Disk, uuid: Option<&str>) -> Result<(), Erro
let mut command = std::process::Command::new("sgdisk");
command.arg(disk_path);
command.args(&["-U", uuid]);
command.args(["-U", uuid]);
proxmox_sys::command::run_command(command, None)?;
@ -1064,7 +1064,7 @@ pub fn create_single_linux_partition(disk: &Disk) -> Result<Disk, Error> {
};
let mut command = std::process::Command::new("sgdisk");
command.args(&["-n1", "-t1:8300"]);
command.args(["-n1", "-t1:8300"]);
command.arg(disk_path);
proxmox_sys::command::run_command(command, None)?;
@ -1116,7 +1116,7 @@ pub fn create_file_system(disk: &Disk, fs_type: FileSystemType) -> Result<(), Er
let fs_type = fs_type.to_string();
let mut command = std::process::Command::new("mkfs");
command.args(&["-t", &fs_type]);
command.args(["-t", &fs_type]);
command.arg(disk_path);
proxmox_sys::command::run_command(command, None)?;
@ -1146,7 +1146,7 @@ pub fn get_fs_uuid(disk: &Disk) -> Result<String, Error> {
};
let mut command = std::process::Command::new("blkid");
command.args(&["-o", "export"]);
command.args(["-o", "export"]);
command.arg(disk_path);
let output = proxmox_sys::command::run_command(command, None)?;

View File

@ -82,7 +82,7 @@ pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result<SmartData
let mut command = std::process::Command::new(SMARTCTL_BIN_PATH);
command.arg("-H");
if !health_only {
command.args(&["-A", "-j"]);
command.args(["-A", "-j"]);
}
let disk_path = match disk.device_path() {

View File

@ -141,7 +141,7 @@ pub fn zpool_list(pool: Option<String>, verbose: bool) -> Result<Vec<ZFSPoolInfo
// and maybe other things.
let mut command = std::process::Command::new("zpool");
command.args(&["list", "-H", "-p", "-P"]);
command.args(["list", "-H", "-p", "-P"]);
// Note: We do not use -o to define output properties, because zpool command ignores
// that completely for special vdevs and devices

View File

@ -369,7 +369,7 @@ fn test_vdev_list_to_tree() {
pub fn zpool_status(pool: &str) -> Result<Vec<(String, String)>, Error> {
let mut command = std::process::Command::new("zpool");
command.args(&["status", "-p", "-P", pool]);
command.args(["status", "-p", "-P", pool]);
let output = proxmox_sys::command::run_command(command, None)?;

View File

@ -74,7 +74,7 @@ fn test_uncompressed_blob_writer() -> Result<(), Error> {
let mut blob_writer = DataBlobWriter::new_uncompressed(tmp)?;
blob_writer.write_all(&TEST_DATA)?;
verify_test_blob(blob_writer.finish()?, &*TEST_DIGEST_PLAIN)
verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_PLAIN)
}
#[test]
@ -83,7 +83,7 @@ fn test_compressed_blob_writer() -> Result<(), Error> {
let mut blob_writer = DataBlobWriter::new_compressed(tmp)?;
blob_writer.write_all(&TEST_DATA)?;
verify_test_blob(blob_writer.finish()?, &*TEST_DIGEST_PLAIN)
verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_PLAIN)
}
#[test]
@ -92,7 +92,7 @@ fn test_encrypted_blob_writer() -> Result<(), Error> {
let mut blob_writer = DataBlobWriter::new_encrypted(tmp, CRYPT_CONFIG.clone())?;
blob_writer.write_all(&TEST_DATA)?;
verify_test_blob(blob_writer.finish()?, &*TEST_DIGEST_ENC)
verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_ENC)
}
#[test]
@ -101,5 +101,5 @@ fn test_encrypted_compressed_blob_writer() -> Result<(), Error> {
let mut blob_writer = DataBlobWriter::new_encrypted_compressed(tmp, CRYPT_CONFIG.clone())?;
blob_writer.write_all(&TEST_DATA)?;
verify_test_blob(blob_writer.finish()?, &*TEST_DIGEST_ENC)
verify_test_blob(blob_writer.finish()?, &TEST_DIGEST_ENC)
}