more clippy fixes and annotations
the remaining ones are: - type complexity - fns with many arguments - new() without default() - false positives for redundant closures (where closure returns a static value) - expected vs actual length check without match/cmp Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
parent
3be9106f1c
commit
e1db06705e
@ -1190,7 +1190,7 @@ pub struct TypeCounts {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
#[derive(Clone, Serialize, Deserialize)]
|
#[derive(Clone, Default, Serialize, Deserialize)]
|
||||||
#[serde(rename_all = "kebab-case")]
|
#[serde(rename_all = "kebab-case")]
|
||||||
/// Garbage collection status.
|
/// Garbage collection status.
|
||||||
pub struct GarbageCollectionStatus {
|
pub struct GarbageCollectionStatus {
|
||||||
@ -1217,24 +1217,6 @@ pub struct GarbageCollectionStatus {
|
|||||||
pub still_bad: usize,
|
pub still_bad: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for GarbageCollectionStatus {
|
|
||||||
fn default() -> Self {
|
|
||||||
GarbageCollectionStatus {
|
|
||||||
upid: None,
|
|
||||||
index_file_count: 0,
|
|
||||||
index_data_bytes: 0,
|
|
||||||
disk_bytes: 0,
|
|
||||||
disk_chunks: 0,
|
|
||||||
removed_bytes: 0,
|
|
||||||
removed_chunks: 0,
|
|
||||||
pending_bytes: 0,
|
|
||||||
pending_chunks: 0,
|
|
||||||
removed_bad: 0,
|
|
||||||
still_bad: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
properties: {
|
properties: {
|
||||||
"gc-status": {
|
"gc-status": {
|
||||||
|
@ -542,7 +542,7 @@ where
|
|||||||
let file = root
|
let file = root
|
||||||
.lookup(&path)
|
.lookup(&path)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
|
||||||
|
|
||||||
let mut components = file.entry().path().components();
|
let mut components = file.entry().path().components();
|
||||||
components.next_back(); // discard last
|
components.next_back(); // discard last
|
||||||
@ -586,7 +586,7 @@ where
|
|||||||
let entry = root
|
let entry = root
|
||||||
.lookup(&path)
|
.lookup(&path)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(format_err!("error looking up '{:?}'", path))?;
|
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
|
||||||
let realfile = accessor.follow_hardlink(&entry).await?;
|
let realfile = accessor.follow_hardlink(&entry).await?;
|
||||||
let metadata = realfile.entry().metadata();
|
let metadata = realfile.entry().metadata();
|
||||||
let realpath = Path::new(link);
|
let realpath = Path::new(link);
|
||||||
@ -705,7 +705,7 @@ where
|
|||||||
let file = root
|
let file = root
|
||||||
.lookup(&path)
|
.lookup(&path)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
|
||||||
|
|
||||||
let prefix = {
|
let prefix = {
|
||||||
let mut components = file.entry().path().components();
|
let mut components = file.entry().path().components();
|
||||||
@ -753,7 +753,7 @@ where
|
|||||||
let entry = root
|
let entry = root
|
||||||
.lookup(&path)
|
.lookup(&path)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(format_err!("error looking up '{:?}'", path))?;
|
.ok_or_else(|| format_err!("error looking up '{:?}'", path))?;
|
||||||
let realfile = accessor.follow_hardlink(&entry).await?;
|
let realfile = accessor.follow_hardlink(&entry).await?;
|
||||||
let metadata = realfile.entry().metadata();
|
let metadata = realfile.entry().metadata();
|
||||||
log::debug!("adding '{}' to zip", path.display());
|
log::debug!("adding '{}' to zip", path.display());
|
||||||
@ -841,7 +841,7 @@ where
|
|||||||
let file = root
|
let file = root
|
||||||
.lookup(&path)
|
.lookup(&path)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(format_err!("error opening '{:?}'", path.as_ref()))?;
|
.ok_or_else(|| format_err!("error opening '{:?}'", path.as_ref()))?;
|
||||||
|
|
||||||
recurse_files_extractor(&mut extractor, file).await
|
recurse_files_extractor(&mut extractor, file).await
|
||||||
}
|
}
|
||||||
@ -917,8 +917,8 @@ fn get_filename(entry: &Entry) -> Result<(OsString, CString), Error> {
|
|||||||
Ok((file_name_os, file_name))
|
Ok((file_name_os, file_name))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn recurse_files_extractor<'a, T>(
|
async fn recurse_files_extractor<T>(
|
||||||
extractor: &'a mut Extractor,
|
extractor: &mut Extractor,
|
||||||
file: FileEntry<T>,
|
file: FileEntry<T>,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
@ -960,8 +960,8 @@ where
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn seq_files_extractor<'a, T>(
|
async fn seq_files_extractor<T>(
|
||||||
extractor: &'a mut Extractor,
|
extractor: &mut Extractor,
|
||||||
mut decoder: pxar::decoder::aio::Decoder<T>,
|
mut decoder: pxar::decoder::aio::Decoder<T>,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
|
@ -134,10 +134,12 @@ pub fn format_single_line_entry(entry: &Entry) -> String {
|
|||||||
_ => ("0".to_string(), String::new()),
|
_ => ("0".to_string(), String::new()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let owner_string = format!("{}/{}", meta.stat.uid, meta.stat.gid);
|
||||||
|
|
||||||
format!(
|
format!(
|
||||||
"{} {:<13} {} {:>8} {:?}{}",
|
"{} {:<13} {} {:>8} {:?}{}",
|
||||||
mode_string,
|
mode_string,
|
||||||
format!("{}/{}", meta.stat.uid, meta.stat.gid),
|
owner_string,
|
||||||
format_mtime(&meta.stat.mtime),
|
format_mtime(&meta.stat.mtime),
|
||||||
size,
|
size,
|
||||||
entry.path(),
|
entry.path(),
|
||||||
|
@ -619,7 +619,7 @@ impl BackupInfo {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut [BackupInfo], ascendending: bool) {
|
||||||
if ascendending {
|
if ascendending {
|
||||||
// oldest first
|
// oldest first
|
||||||
list.sort_unstable_by(|a, b| a.backup_dir.dir.time.cmp(&b.backup_dir.dir.time));
|
list.sort_unstable_by(|a, b| a.backup_dir.dir.time.cmp(&b.backup_dir.dir.time));
|
||||||
|
@ -1181,7 +1181,7 @@ impl DataStore {
|
|||||||
/// stat'ed are placed at the end of the list
|
/// stat'ed are placed at the end of the list
|
||||||
pub fn get_chunks_in_order<F, A>(
|
pub fn get_chunks_in_order<F, A>(
|
||||||
&self,
|
&self,
|
||||||
index: &Box<dyn IndexFile + Send>,
|
index: &(dyn IndexFile + Send),
|
||||||
skip_chunk: F,
|
skip_chunk: F,
|
||||||
check_abort: A,
|
check_abort: A,
|
||||||
) -> Result<Vec<(usize, u64)>, Error>
|
) -> Result<Vec<(usize, u64)>, Error>
|
||||||
|
@ -209,7 +209,7 @@ impl Iterator for ListGroups {
|
|||||||
group_type,
|
group_type,
|
||||||
) {
|
) {
|
||||||
Ok(ty) => self.id_state = Some(ty),
|
Ok(ty) => self.id_state = Some(ty),
|
||||||
Err(err) => return Some(Err(err.into())),
|
Err(err) => return Some(Err(err)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,7 +146,7 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> {
|
|||||||
Some(Operation::Read),
|
Some(Operation::Read),
|
||||||
)?;
|
)?;
|
||||||
let order =
|
let order =
|
||||||
datastore.get_chunks_in_order(&index, &self.skip_fn, |_| Ok(()))?;
|
datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
|
||||||
|
|
||||||
self.current_index = Some((Arc::new(index), 0, order));
|
self.current_index = Some((Arc::new(index), 0, order));
|
||||||
} else {
|
} else {
|
||||||
|
@ -262,8 +262,8 @@ pub fn lookup_device<'a>(devices: &'a [TapeDeviceInfo], path: &str) -> Option<&'
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Lookup optional drive identification attributes
|
/// Lookup optional drive identification attributes
|
||||||
pub fn lookup_device_identification<'a>(
|
pub fn lookup_device_identification(
|
||||||
devices: &'a [TapeDeviceInfo],
|
devices: &[TapeDeviceInfo],
|
||||||
path: &str,
|
path: &str,
|
||||||
) -> OptionalDeviceIdentification {
|
) -> OptionalDeviceIdentification {
|
||||||
if let Some(info) = lookup_device(devices, path) {
|
if let Some(info) = lookup_device(devices, path) {
|
||||||
|
@ -27,11 +27,10 @@ pub fn initialize_element_status<F: AsRawFd>(file: &mut F) -> Result<(), Error>
|
|||||||
// like mtx(1), set a very long timeout (30 minutes)
|
// like mtx(1), set a very long timeout (30 minutes)
|
||||||
sg_raw.set_timeout(30 * 60);
|
sg_raw.set_timeout(30 * 60);
|
||||||
|
|
||||||
let mut cmd = Vec::new();
|
let cmd = &[0x07, 0, 0, 0, 0, 0]; // INITIALIZE ELEMENT STATUS (07h)
|
||||||
cmd.extend(&[0x07, 0, 0, 0, 0, 0]); // INITIALIZE ELEMENT STATUS (07h)
|
|
||||||
|
|
||||||
sg_raw
|
sg_raw
|
||||||
.do_command(&cmd)
|
.do_command(cmd)
|
||||||
.map_err(|err| format_err!("initializte element status (07h) failed - {}", err))?;
|
.map_err(|err| format_err!("initializte element status (07h) failed - {}", err))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -123,15 +122,16 @@ fn read_element_address_assignment<F: AsRawFd>(
|
|||||||
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
|
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
|
||||||
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
|
sg_raw.set_timeout(SCSI_CHANGER_DEFAULT_TIMEOUT);
|
||||||
|
|
||||||
let mut cmd = Vec::new();
|
let cmd = &[
|
||||||
cmd.push(0x1A); // MODE SENSE6 (1Ah)
|
0x1A, // MODE SENSE6 (1Ah)
|
||||||
cmd.push(0x08); // DBD=1 (The Disable Block Descriptors)
|
0x08, // DBD=1 (The Disable Block Descriptors)
|
||||||
cmd.push(0x1D); // Element Address Assignment Page
|
0x1D, // Element Address Assignment Page
|
||||||
cmd.push(0);
|
0,
|
||||||
cmd.push(allocation_len); // allocation len
|
allocation_len, // allocation len
|
||||||
cmd.push(0); //control
|
0, //control
|
||||||
|
];
|
||||||
|
|
||||||
let data = execute_scsi_command(&mut sg_raw, &cmd, "read element address assignment", true)?;
|
let data = execute_scsi_command(&mut sg_raw, cmd, "read element address assignment", true)?;
|
||||||
|
|
||||||
proxmox_lang::try_block!({
|
proxmox_lang::try_block!({
|
||||||
let mut reader = &data[..];
|
let mut reader = &data[..];
|
||||||
@ -146,6 +146,7 @@ fn read_element_address_assignment<F: AsRawFd>(
|
|||||||
.map_err(|err: Error| format_err!("decode element address assignment page failed - {}", err))
|
.map_err(|err: Error| format_err!("decode element address assignment page failed - {}", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn scsi_move_medium_cdb(
|
fn scsi_move_medium_cdb(
|
||||||
medium_transport_address: u16,
|
medium_transport_address: u16,
|
||||||
source_element_address: u16,
|
source_element_address: u16,
|
||||||
@ -276,6 +277,7 @@ impl ElementType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn scsi_read_element_status_cdb(
|
fn scsi_read_element_status_cdb(
|
||||||
start_element_address: u16,
|
start_element_address: u16,
|
||||||
number_of_elements: u16,
|
number_of_elements: u16,
|
||||||
|
@ -175,6 +175,7 @@ impl SgTape {
|
|||||||
/// of data. After the command is successfully completed, the
|
/// of data. After the command is successfully completed, the
|
||||||
/// drive is positioned immediately before End Of Data (not End Of
|
/// drive is positioned immediately before End Of Data (not End Of
|
||||||
/// Tape).
|
/// Tape).
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> {
|
pub fn erase_media(&mut self, fast: bool) -> Result<(), Error> {
|
||||||
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
|
let mut sg_raw = SgRaw::new(&mut self.file, 16)?;
|
||||||
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
||||||
@ -273,6 +274,7 @@ impl SgTape {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unusual_byte_groupings)]
|
||||||
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
|
pub fn locate_file(&mut self, position: u64) -> Result<(), Error> {
|
||||||
if position == 0 {
|
if position == 0 {
|
||||||
return self.rewind();
|
return self.rewind();
|
||||||
@ -534,8 +536,7 @@ impl SgTape {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
||||||
let mut cmd = Vec::new();
|
let mut cmd = vec![0x10];
|
||||||
cmd.push(0x10);
|
|
||||||
if immediate {
|
if immediate {
|
||||||
cmd.push(1); // IMMED=1
|
cmd.push(1); // IMMED=1
|
||||||
} else {
|
} else {
|
||||||
@ -668,16 +669,17 @@ impl SgTape {
|
|||||||
let mut sg_raw = SgRaw::new(&mut self.file, 0).unwrap(); // cannot fail with size 0
|
let mut sg_raw = SgRaw::new(&mut self.file, 0).unwrap(); // cannot fail with size 0
|
||||||
|
|
||||||
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
sg_raw.set_timeout(Self::SCSI_TAPE_DEFAULT_TIMEOUT);
|
||||||
let mut cmd = Vec::new();
|
let cmd = &[
|
||||||
cmd.push(0x08); // READ
|
0x08, // READ
|
||||||
cmd.push(0x02); // VARIABLE SIZED BLOCKS, SILI=1
|
0x02, // VARIABLE SIZED BLOCKS, SILI=1
|
||||||
//cmd.push(0x00); // VARIABLE SIZED BLOCKS, SILI=0
|
//0x00, // VARIABLE SIZED BLOCKS, SILI=0
|
||||||
cmd.push(((transfer_len >> 16) & 0xff) as u8);
|
((transfer_len >> 16) & 0xff) as u8,
|
||||||
cmd.push(((transfer_len >> 8) & 0xff) as u8);
|
((transfer_len >> 8) & 0xff) as u8,
|
||||||
cmd.push((transfer_len & 0xff) as u8);
|
(transfer_len & 0xff) as u8,
|
||||||
cmd.push(0); // control byte
|
0, // control byte
|
||||||
|
];
|
||||||
|
|
||||||
let data = match sg_raw.do_in_command(&cmd, buffer) {
|
let data = match sg_raw.do_in_command(cmd, buffer) {
|
||||||
Ok(data) => data,
|
Ok(data) => data,
|
||||||
Err(ScsiError::Sense(SenseInfo {
|
Err(ScsiError::Sense(SenseInfo {
|
||||||
sense_key: 0,
|
sense_key: 0,
|
||||||
@ -734,6 +736,7 @@ impl SgTape {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Set important drive options
|
/// Set important drive options
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
pub fn set_drive_options(
|
pub fn set_drive_options(
|
||||||
&mut self,
|
&mut self,
|
||||||
compression: Option<bool>,
|
compression: Option<bool>,
|
||||||
|
@ -73,6 +73,7 @@ struct SspSetDataEncryptionPage {
|
|||||||
/* key follows */
|
/* key follows */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn sg_spout_set_encryption<F: AsRawFd>(
|
fn sg_spout_set_encryption<F: AsRawFd>(
|
||||||
file: &mut F,
|
file: &mut F,
|
||||||
algorythm_index: u8,
|
algorythm_index: u8,
|
||||||
@ -129,6 +130,7 @@ fn sg_spout_set_encryption<F: AsRawFd>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Warning: this blocks and fails if there is no media loaded
|
// Warning: this blocks and fails if there is no media loaded
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
let allocation_len: u32 = 8192 + 4;
|
let allocation_len: u32 = 8192 + 4;
|
||||||
|
|
||||||
@ -157,6 +159,7 @@ fn sg_spin_data_encryption_status<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, E
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Warning: this blocks and fails if there is no media loaded
|
// Warning: this blocks and fails if there is no media loaded
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn sg_spin_data_encryption_caps<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn sg_spin_data_encryption_caps<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
let allocation_len: u32 = 8192 + 4;
|
let allocation_len: u32 = 8192 + 4;
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ struct MamAttributeHeader {
|
|||||||
len: u16,
|
len: u16,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
enum MamFormat {
|
enum MamFormat {
|
||||||
BINARY,
|
BINARY,
|
||||||
ASCII,
|
ASCII,
|
||||||
|
@ -79,6 +79,7 @@ pub fn read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<TapeAlertFlags,
|
|||||||
decode_tape_alert_flags(&data)
|
decode_tape_alert_flags(&data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn sg_read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn sg_read_tape_alert_flags<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
let mut sg_raw = SgRaw::new(file, 512)?;
|
let mut sg_raw = SgRaw::new(file, 512)?;
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ pub fn read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Lp17VolumeStat
|
|||||||
decode_volume_statistics(&data)
|
decode_volume_statistics(&data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::vec_init_then_push)]
|
||||||
fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
fn sg_read_volume_statistics<F: AsRawFd>(file: &mut F) -> Result<Vec<u8>, Error> {
|
||||||
let alloc_len: u16 = 8192;
|
let alloc_len: u16 = 8192;
|
||||||
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
|
let mut sg_raw = SgRaw::new(file, alloc_len as usize)?;
|
||||||
|
@ -236,10 +236,12 @@ pub struct ModeParameterHeader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ModeParameterHeader {
|
impl ModeParameterHeader {
|
||||||
|
#[allow(clippy::unusual_byte_groupings)]
|
||||||
pub fn buffer_mode(&self) -> u8 {
|
pub fn buffer_mode(&self) -> u8 {
|
||||||
(self.flags3 & 0b0111_0000) >> 4
|
(self.flags3 & 0b0_111_0000) >> 4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unusual_byte_groupings)]
|
||||||
pub fn set_buffer_mode(&mut self, buffer_mode: bool) {
|
pub fn set_buffer_mode(&mut self, buffer_mode: bool) {
|
||||||
let mut mode = self.flags3 & 0b1_000_1111;
|
let mut mode = self.flags3 & 0b1_000_1111;
|
||||||
if buffer_mode {
|
if buffer_mode {
|
||||||
@ -248,8 +250,9 @@ impl ModeParameterHeader {
|
|||||||
self.flags3 = mode;
|
self.flags3 = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::unusual_byte_groupings)]
|
||||||
pub fn write_protect(&self) -> bool {
|
pub fn write_protect(&self) -> bool {
|
||||||
(self.flags3 & 0b1000_0000) != 0
|
(self.flags3 & 0b1_000_0000) != 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,13 +383,11 @@ impl<'a, F: AsRawFd> SgRaw<'a, F> {
|
|||||||
///
|
///
|
||||||
/// The file must be a handle to a SCSI device.
|
/// The file must be a handle to a SCSI device.
|
||||||
pub fn new(file: &'a mut F, buffer_size: usize) -> Result<Self, Error> {
|
pub fn new(file: &'a mut F, buffer_size: usize) -> Result<Self, Error> {
|
||||||
let buffer;
|
let buffer = if buffer_size > 0 {
|
||||||
|
alloc_page_aligned_buffer(buffer_size)?
|
||||||
if buffer_size > 0 {
|
|
||||||
buffer = alloc_page_aligned_buffer(buffer_size)?;
|
|
||||||
} else {
|
} else {
|
||||||
buffer = Box::new([]);
|
Box::new([])
|
||||||
}
|
};
|
||||||
|
|
||||||
let sense_buffer = [0u8; 32];
|
let sense_buffer = [0u8; 32];
|
||||||
|
|
||||||
@ -683,8 +684,7 @@ pub fn scsi_mode_sense<F: AsRawFd, P: Endian>(
|
|||||||
let allocation_len: u16 = 4096;
|
let allocation_len: u16 = 4096;
|
||||||
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
|
let mut sg_raw = SgRaw::new(file, allocation_len as usize)?;
|
||||||
|
|
||||||
let mut cmd = Vec::new();
|
let mut cmd = vec![0x5A]; // MODE SENSE(10)
|
||||||
cmd.push(0x5A); // MODE SENSE(10)
|
|
||||||
if disable_block_descriptor {
|
if disable_block_descriptor {
|
||||||
cmd.push(8); // DBD=1 (Disable Block Descriptors)
|
cmd.push(8); // DBD=1 (Disable Block Descriptors)
|
||||||
} else {
|
} else {
|
||||||
|
@ -702,7 +702,7 @@ async fn create_backup(
|
|||||||
|
|
||||||
let backup_id = param["backup-id"]
|
let backup_id = param["backup-id"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap_or(proxmox_sys::nodename());
|
.unwrap_or_else(|| proxmox_sys::nodename());
|
||||||
|
|
||||||
let backup_ns = optional_ns_param(¶m)?;
|
let backup_ns = optional_ns_param(¶m)?;
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ impl ServerAdapter for StaticAuthAdapter {
|
|||||||
> {
|
> {
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
match headers.get(hyper::header::AUTHORIZATION) {
|
match headers.get(hyper::header::AUTHORIZATION) {
|
||||||
Some(header) if header.to_str().unwrap_or("") == &self.ticket => {
|
Some(header) if header.to_str().unwrap_or("") == self.ticket => {
|
||||||
let user_info: Box<dyn UserInformation + Send + Sync> =
|
let user_info: Box<dyn UserInformation + Send + Sync> =
|
||||||
Box::new(SimpleUserInformation {});
|
Box::new(SimpleUserInformation {});
|
||||||
Ok((String::from("root@pam"), user_info))
|
Ok((String::from("root@pam"), user_info))
|
||||||
|
@ -82,6 +82,7 @@ struct LVMBucketData {
|
|||||||
/// more subdirectories
|
/// more subdirectories
|
||||||
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
|
/// e.g.: "/drive-scsi0/part/0/etc/passwd"
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
enum Bucket {
|
enum Bucket {
|
||||||
Partition(PartitionBucketData),
|
Partition(PartitionBucketData),
|
||||||
RawFs(PartitionBucketData),
|
RawFs(PartitionBucketData),
|
||||||
@ -91,7 +92,7 @@ enum Bucket {
|
|||||||
|
|
||||||
impl Bucket {
|
impl Bucket {
|
||||||
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
|
fn filter_mut<'a, A: AsRef<str>, B: AsRef<str>>(
|
||||||
haystack: &'a mut Vec<Bucket>,
|
haystack: &'a mut [Bucket],
|
||||||
ty: A,
|
ty: A,
|
||||||
comp: &[B],
|
comp: &[B],
|
||||||
) -> Option<&'a mut Bucket> {
|
) -> Option<&'a mut Bucket> {
|
||||||
|
@ -676,7 +676,7 @@ const fn retry() -> Retry {
|
|||||||
impl Retry {
|
impl Retry {
|
||||||
fn tick(&mut self) -> Result<(), Error> {
|
fn tick(&mut self) -> Result<(), Error> {
|
||||||
if self.0 >= 3 {
|
if self.0 >= 3 {
|
||||||
Err(Error::Client(format!("kept getting a badNonce error!")))
|
Err(Error::Client("kept getting a badNonce error!".to_string()))
|
||||||
} else {
|
} else {
|
||||||
self.0 += 1;
|
self.0 += 1;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -257,7 +257,7 @@ fn openid_auth_url(
|
|||||||
|
|
||||||
let url = open_id.authorize_url(PROXMOX_BACKUP_RUN_DIR_M!(), &realm)?;
|
let url = open_id.authorize_url(PROXMOX_BACKUP_RUN_DIR_M!(), &realm)?;
|
||||||
|
|
||||||
Ok(url.into())
|
Ok(url)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[sortable]
|
#[sortable]
|
||||||
|
@ -100,7 +100,7 @@ fn check_privs_and_load_store(
|
|||||||
|
|
||||||
if limited {
|
if limited {
|
||||||
let owner = datastore.get_owner(ns, backup_group)?;
|
let owner = datastore.get_owner(ns, backup_group)?;
|
||||||
check_backup_owner(&owner, &auth_id)?;
|
check_backup_owner(&owner, auth_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(datastore)
|
Ok(datastore)
|
||||||
@ -778,6 +778,7 @@ pub async fn status(
|
|||||||
///
|
///
|
||||||
/// This function can verify a single backup snapshot, all backup from a backup group,
|
/// This function can verify a single backup snapshot, all backup from a backup group,
|
||||||
/// or all backups in the datastore.
|
/// or all backups in the datastore.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn verify(
|
pub fn verify(
|
||||||
store: String,
|
store: String,
|
||||||
ns: Option<BackupNamespace>,
|
ns: Option<BackupNamespace>,
|
||||||
@ -1287,7 +1288,7 @@ pub fn download_file(
|
|||||||
|
|
||||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||||
let datastore = check_privs_and_load_store(
|
let datastore = check_privs_and_load_store(
|
||||||
&store,
|
store,
|
||||||
&backup_ns,
|
&backup_ns,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
@ -1301,7 +1302,7 @@ pub fn download_file(
|
|||||||
println!(
|
println!(
|
||||||
"Download {} from {} ({}/{})",
|
"Download {} from {} ({}/{})",
|
||||||
file_name,
|
file_name,
|
||||||
print_store_and_ns(&store, &backup_ns),
|
print_store_and_ns(store, &backup_ns),
|
||||||
backup_dir,
|
backup_dir,
|
||||||
file_name
|
file_name
|
||||||
);
|
);
|
||||||
@ -1372,7 +1373,7 @@ pub fn download_file_decoded(
|
|||||||
|
|
||||||
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||||
let datastore = check_privs_and_load_store(
|
let datastore = check_privs_and_load_store(
|
||||||
&store,
|
store,
|
||||||
&backup_ns,
|
&backup_ns,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
@ -1394,7 +1395,7 @@ pub fn download_file_decoded(
|
|||||||
println!(
|
println!(
|
||||||
"Download {} from {} ({}/{})",
|
"Download {} from {} ({}/{})",
|
||||||
file_name,
|
file_name,
|
||||||
print_store_and_ns(&store, &backup_ns),
|
print_store_and_ns(store, &backup_ns),
|
||||||
backup_dir_api,
|
backup_dir_api,
|
||||||
file_name
|
file_name
|
||||||
);
|
);
|
||||||
@ -1403,7 +1404,7 @@ pub fn download_file_decoded(
|
|||||||
path.push(backup_dir.relative_path());
|
path.push(backup_dir.relative_path());
|
||||||
path.push(&file_name);
|
path.push(&file_name);
|
||||||
|
|
||||||
let extension = file_name.rsplitn(2, '.').next().unwrap();
|
let (_, extension) = file_name.rsplit_once('.').unwrap();
|
||||||
|
|
||||||
let body = match extension {
|
let body = match extension {
|
||||||
"didx" => {
|
"didx" => {
|
||||||
@ -1503,7 +1504,7 @@ pub fn upload_backup_log(
|
|||||||
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||||
|
|
||||||
let datastore = check_privs_and_load_store(
|
let datastore = check_privs_and_load_store(
|
||||||
&store,
|
store,
|
||||||
&backup_ns,
|
&backup_ns,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
0,
|
0,
|
||||||
@ -1524,7 +1525,7 @@ pub fn upload_backup_log(
|
|||||||
|
|
||||||
println!(
|
println!(
|
||||||
"Upload backup log to {} {backup_dir_api}/{file_name}",
|
"Upload backup log to {} {backup_dir_api}/{file_name}",
|
||||||
print_store_and_ns(&store, &backup_ns),
|
print_store_and_ns(store, &backup_ns),
|
||||||
);
|
);
|
||||||
|
|
||||||
let data = req_body
|
let data = req_body
|
||||||
@ -1667,7 +1668,7 @@ pub fn pxar_file_download(
|
|||||||
|
|
||||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||||
let datastore = check_privs_and_load_store(
|
let datastore = check_privs_and_load_store(
|
||||||
&store,
|
store,
|
||||||
&ns,
|
&ns,
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
|
@ -255,7 +255,7 @@ impl BackupEnvironment {
|
|||||||
pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option<u32> {
|
pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option<u32> {
|
||||||
let state = self.state.lock().unwrap();
|
let state = self.state.lock().unwrap();
|
||||||
|
|
||||||
state.known_chunks.get(digest).map(|len| *len)
|
state.known_chunks.get(digest).copied()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the writer with an unique ID
|
/// Store the writer with an unique ID
|
||||||
|
@ -246,9 +246,9 @@ pub async fn do_register_account<'a>(
|
|||||||
rsa_bits: Option<u32>,
|
rsa_bits: Option<u32>,
|
||||||
) -> Result<&'a Account, Error> {
|
) -> Result<&'a Account, Error> {
|
||||||
let contact = account_contact_from_string(&contact);
|
let contact = account_contact_from_string(&contact);
|
||||||
Ok(client
|
client
|
||||||
.new_account(name, agree_to_tos, contact, rsa_bits)
|
.new_account(name, agree_to_tos, contact, rsa_bits)
|
||||||
.await?)
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -481,11 +481,7 @@ pub async fn scan_remote_groups(
|
|||||||
|
|
||||||
let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
|
let client = remote_client(&remote, None).await.map_err(map_remote_err)?;
|
||||||
|
|
||||||
let args = if let Some(ns) = namespace {
|
let args = namespace.map(|ns| json!({ "ns": ns }));
|
||||||
Some(json!({ "ns": ns }))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let api_res = client
|
let api_res = client
|
||||||
.get(&format!("api2/json/admin/datastore/{}/groups", store), args)
|
.get(&format!("api2/json/admin/datastore/{}/groups", store), args)
|
||||||
|
@ -90,6 +90,7 @@ pub fn list_keys(
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Change the encryption key's password (and password hint).
|
/// Change the encryption key's password (and password hint).
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn change_passphrase(
|
pub fn change_passphrase(
|
||||||
kdf: Option<Kdf>,
|
kdf: Option<Kdf>,
|
||||||
password: Option<String>,
|
password: Option<String>,
|
||||||
|
@ -397,7 +397,7 @@ async fn order_certificate(
|
|||||||
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
.ok_or_else(|| format_err!("missing 'finalize' URL in order"))?;
|
||||||
if let Err(err) = acme.finalize(finalize, &csr.data).await {
|
if let Err(err) = acme.finalize(finalize, &csr.data).await {
|
||||||
if finalize_error_cnt >= 5 {
|
if finalize_error_cnt >= 5 {
|
||||||
return Err(err.into());
|
return Err(err);
|
||||||
}
|
}
|
||||||
|
|
||||||
finalize_error_cnt += 1;
|
finalize_error_cnt += 1;
|
||||||
|
@ -88,7 +88,7 @@ pub fn list_datastore_mounts() -> Result<Vec<DatastoreMountInfo>, Error> {
|
|||||||
let name = data
|
let name = data
|
||||||
.Where
|
.Where
|
||||||
.strip_prefix(BASE_MOUNT_DIR)
|
.strip_prefix(BASE_MOUNT_DIR)
|
||||||
.unwrap_or_else(|| &data.Where)
|
.unwrap_or(&data.Where)
|
||||||
.to_string();
|
.to_string();
|
||||||
|
|
||||||
list.push(DatastoreMountInfo {
|
list.push(DatastoreMountInfo {
|
||||||
|
@ -59,6 +59,7 @@ use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_AUDIT};
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Read syslog entries.
|
/// Read syslog entries.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
fn get_journal(
|
fn get_journal(
|
||||||
since: Option<i64>,
|
since: Option<i64>,
|
||||||
until: Option<i64>,
|
until: Option<i64>,
|
||||||
|
@ -41,9 +41,9 @@ pub fn create_value_from_rrd(
|
|||||||
|
|
||||||
let mut t = start;
|
let mut t = start;
|
||||||
|
|
||||||
for index in 0..data.len() {
|
for value in data {
|
||||||
let entry = timemap.entry(t).or_insert(json!({ "time": t }));
|
let entry = timemap.entry(t).or_insert_with(|| json!({ "time": t }));
|
||||||
if let Some(value) = data[index] {
|
if let Some(value) = value {
|
||||||
entry[*name] = value.into();
|
entry[*name] = value.into();
|
||||||
}
|
}
|
||||||
t += reso;
|
t += reso;
|
||||||
|
@ -144,7 +144,7 @@ fn get_syslog(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let service = param["service"]
|
let service = param["service"]
|
||||||
.as_str()
|
.as_str()
|
||||||
.map(|service| crate::api2::node::services::real_service_name(service));
|
.map(crate::api2::node::services::real_service_name);
|
||||||
|
|
||||||
let (count, lines) = dump_journal(
|
let (count, lines) = dump_journal(
|
||||||
param["start"].as_u64(),
|
param["start"].as_u64(),
|
||||||
|
@ -216,6 +216,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Sync store from other repository
|
/// Sync store from other repository
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn pull(
|
async fn pull(
|
||||||
store: String,
|
store: String,
|
||||||
ns: Option<BackupNamespace>,
|
ns: Option<BackupNamespace>,
|
||||||
|
@ -49,7 +49,7 @@ pub async fn datastore_status(
|
|||||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||||
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
|
let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0;
|
||||||
if !allowed {
|
if !allowed {
|
||||||
if let Ok(datastore) = DataStore::lookup_datastore(&store, Some(Operation::Lookup)) {
|
if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) {
|
||||||
if can_access_any_namespace(datastore, &auth_id, &user_info) {
|
if can_access_any_namespace(datastore, &auth_id, &user_info) {
|
||||||
list.push(DataStoreStatusListItem::empty(store, None));
|
list.push(DataStoreStatusListItem::empty(store, None));
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ pub async fn datastore_status(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let datastore = match DataStore::lookup_datastore(&store, Some(Operation::Read)) {
|
let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) {
|
||||||
Ok(datastore) => datastore,
|
Ok(datastore) => datastore,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
list.push(DataStoreStatusListItem::empty(store, Some(err.to_string())));
|
list.push(DataStoreStatusListItem::empty(store, Some(err.to_string())));
|
||||||
@ -127,7 +127,7 @@ pub async fn datastore_status(
|
|||||||
list.push(entry);
|
list.push(entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(list.into())
|
Ok(list)
|
||||||
}
|
}
|
||||||
|
|
||||||
const SUBDIRS: SubdirMap = &[(
|
const SUBDIRS: SubdirMap = &[(
|
||||||
|
@ -214,30 +214,28 @@ pub async fn list_media(
|
|||||||
let inventory = Inventory::load(status_path)?;
|
let inventory = Inventory::load(status_path)?;
|
||||||
|
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]);
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "pool"]);
|
||||||
if (privs & PRIV_TAPE_AUDIT) != 0 {
|
if (privs & PRIV_TAPE_AUDIT) != 0 && pool.is_none() {
|
||||||
if pool.is_none() {
|
for media_id in inventory.list_unassigned_media() {
|
||||||
for media_id in inventory.list_unassigned_media() {
|
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
||||||
let (mut status, location) = inventory.status_and_location(&media_id.label.uuid);
|
|
||||||
|
|
||||||
if status == MediaStatus::Unknown {
|
if status == MediaStatus::Unknown {
|
||||||
status = MediaStatus::Writable;
|
status = MediaStatus::Writable;
|
||||||
}
|
|
||||||
|
|
||||||
list.push(MediaListEntry {
|
|
||||||
uuid: media_id.label.uuid.clone(),
|
|
||||||
ctime: media_id.label.ctime,
|
|
||||||
label_text: media_id.label.label_text.to_string(),
|
|
||||||
location,
|
|
||||||
status,
|
|
||||||
catalog: true, // empty, so we do not need a catalog
|
|
||||||
expired: false,
|
|
||||||
media_set_uuid: None,
|
|
||||||
media_set_name: None,
|
|
||||||
media_set_ctime: None,
|
|
||||||
seq_nr: None,
|
|
||||||
pool: None,
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list.push(MediaListEntry {
|
||||||
|
uuid: media_id.label.uuid.clone(),
|
||||||
|
ctime: media_id.label.ctime,
|
||||||
|
label_text: media_id.label.label_text.to_string(),
|
||||||
|
location,
|
||||||
|
status,
|
||||||
|
catalog: true, // empty, so we do not need a catalog
|
||||||
|
expired: false,
|
||||||
|
media_set_uuid: None,
|
||||||
|
media_set_name: None,
|
||||||
|
media_set_ctime: None,
|
||||||
|
seq_nr: None,
|
||||||
|
pool: None,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,6 +316,7 @@ pub const ROUTER: Router = Router::new().post(&API_METHOD_RESTORE);
|
|||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Restore data from media-set. Namespaces will be automatically created if necessary.
|
/// Restore data from media-set. Namespaces will be automatically created if necessary.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn restore(
|
pub fn restore(
|
||||||
store: String,
|
store: String,
|
||||||
drive: String,
|
drive: String,
|
||||||
@ -631,7 +632,7 @@ fn restore_list_worker(
|
|||||||
let mut restorable = Vec::new();
|
let mut restorable = Vec::new();
|
||||||
// restore source namespaces
|
// restore source namespaces
|
||||||
for (store, snapshot) in catalog.list_snapshots() {
|
for (store, snapshot) in catalog.list_snapshots() {
|
||||||
let (ns, dir) = match parse_ns_and_snapshot(&snapshot) {
|
let (ns, dir) = match parse_ns_and_snapshot(snapshot) {
|
||||||
Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir),
|
Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}");
|
task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}");
|
||||||
@ -1194,7 +1195,6 @@ fn restore_partial_chunk_archive<'a>(
|
|||||||
let verify_and_write_channel = writer_pool.channel();
|
let verify_and_write_channel = writer_pool.channel();
|
||||||
|
|
||||||
while let Some((digest, blob)) = decoder.next_chunk()? {
|
while let Some((digest, blob)) = decoder.next_chunk()? {
|
||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
if chunk_list.remove(&digest) {
|
if chunk_list.remove(&digest) {
|
||||||
@ -1878,13 +1878,10 @@ pub fn fast_catalog_restore(
|
|||||||
|
|
||||||
let catalog_uuid = &archive_header.uuid;
|
let catalog_uuid = &archive_header.uuid;
|
||||||
|
|
||||||
let wanted = media_set
|
let wanted = media_set.media_list().iter().any(|e| match e {
|
||||||
.media_list()
|
None => false,
|
||||||
.iter()
|
Some(uuid) => uuid == catalog_uuid,
|
||||||
.any(|e| match e {
|
});
|
||||||
None => false,
|
|
||||||
Some(uuid) => uuid == catalog_uuid,
|
|
||||||
});
|
|
||||||
|
|
||||||
if !wanted {
|
if !wanted {
|
||||||
task_log!(
|
task_log!(
|
||||||
|
@ -17,6 +17,7 @@ pub trait ProxmoxAuthenticator {
|
|||||||
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
|
fn remove_password(&self, username: &UsernameRef) -> Result<(), Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
struct PAM();
|
struct PAM();
|
||||||
|
|
||||||
impl ProxmoxAuthenticator for PAM {
|
impl ProxmoxAuthenticator for PAM {
|
||||||
@ -70,6 +71,7 @@ impl ProxmoxAuthenticator for PAM {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::upper_case_acronyms)]
|
||||||
struct PBS();
|
struct PBS();
|
||||||
|
|
||||||
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
|
const SHADOW_CONFIG_FILENAME: &str = configdir!("/shadow.json");
|
||||||
|
@ -85,7 +85,7 @@ pub fn can_access_any_namespace(
|
|||||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
|
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
|
||||||
let name = store.name();
|
let name = store.name();
|
||||||
iter.any(|ns| -> bool {
|
iter.any(|ns| -> bool {
|
||||||
let user_privs = user_info.lookup_privs(&auth_id, &["datastore", name, &ns.to_string()]);
|
let user_privs = user_info.lookup_privs(auth_id, &["datastore", name, &ns.to_string()]);
|
||||||
user_privs & wanted != 0
|
user_privs & wanted != 0
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -136,7 +136,7 @@ impl<'a> ListAccessibleBackupGroups<'a> {
|
|||||||
override_owner_priv: override_owner_priv.unwrap_or(0),
|
override_owner_priv: override_owner_priv.unwrap_or(0),
|
||||||
owner_and_priv: owner_and_priv.unwrap_or(0),
|
owner_and_priv: owner_and_priv.unwrap_or(0),
|
||||||
state: None,
|
state: None,
|
||||||
store: store,
|
store,
|
||||||
user_info: CachedUserInfo::new()?,
|
user_info: CachedUserInfo::new()?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -157,11 +157,10 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
|
|||||||
return Some(Ok(group));
|
return Some(Ok(group));
|
||||||
}
|
}
|
||||||
if let Some(auth_id) = &self.auth_id {
|
if let Some(auth_id) = &self.auth_id {
|
||||||
match self.store.owns_backup(
|
match self
|
||||||
&group.backup_ns(),
|
.store
|
||||||
group.group(),
|
.owns_backup(group.backup_ns(), group.group(), auth_id)
|
||||||
&auth_id,
|
{
|
||||||
) {
|
|
||||||
Ok(is_owner) if is_owner => return Some(Ok(group)),
|
Ok(is_owner) if is_owner => return Some(Ok(group)),
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
Err(err) => return Some(Err(err)),
|
Err(err) => return Some(Err(err)),
|
||||||
@ -182,8 +181,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
|
|||||||
if let Some(auth_id) = &self.auth_id {
|
if let Some(auth_id) = &self.auth_id {
|
||||||
let info = &self.user_info;
|
let info = &self.user_info;
|
||||||
|
|
||||||
let privs =
|
let privs = info.lookup_privs(auth_id, &ns.acl_path(self.store.name()));
|
||||||
info.lookup_privs(&auth_id, &ns.acl_path(self.store.name()));
|
|
||||||
|
|
||||||
if privs & NS_PRIVS_OK == 0 {
|
if privs & NS_PRIVS_OK == 0 {
|
||||||
continue;
|
continue;
|
||||||
@ -196,7 +194,7 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
|
|||||||
continue; // no owner override and no extra privs -> nothing visible
|
continue; // no owner override and no extra privs -> nothing visible
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.state = match ListGroups::new(Arc::clone(&self.store), ns) {
|
self.state = match ListGroups::new(Arc::clone(self.store), ns) {
|
||||||
Ok(iter) => Some((iter, override_owner)),
|
Ok(iter) => Some((iter, override_owner)),
|
||||||
Err(err) => return Some(Err(err)),
|
Err(err) => return Some(Err(err)),
|
||||||
};
|
};
|
||||||
|
@ -198,7 +198,7 @@ fn verify_index_chunks(
|
|||||||
let chunk_list =
|
let chunk_list =
|
||||||
verify_worker
|
verify_worker
|
||||||
.datastore
|
.datastore
|
||||||
.get_chunks_in_order(&index, skip_chunk, check_abort)?;
|
.get_chunks_in_order(&*index, skip_chunk, check_abort)?;
|
||||||
|
|
||||||
for (pos, _) in chunk_list {
|
for (pos, _) in chunk_list {
|
||||||
verify_worker.worker.check_abort()?;
|
verify_worker.worker.check_abort()?;
|
||||||
|
@ -276,6 +276,7 @@ fn task_mgmt_cli() -> CommandLineInterface {
|
|||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
/// Sync datastore from another repository
|
/// Sync datastore from another repository
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn pull_datastore(
|
async fn pull_datastore(
|
||||||
remote: String,
|
remote: String,
|
||||||
remote_store: String,
|
remote_store: String,
|
||||||
|
@ -231,10 +231,7 @@ pub struct NodeConfig {
|
|||||||
impl NodeConfig {
|
impl NodeConfig {
|
||||||
pub fn acme_config(&self) -> Option<Result<AcmeConfig, Error>> {
|
pub fn acme_config(&self) -> Option<Result<AcmeConfig, Error>> {
|
||||||
self.acme.as_deref().map(|config| -> Result<_, Error> {
|
self.acme.as_deref().map(|config| -> Result<_, Error> {
|
||||||
Ok(crate::tools::config::from_property_string(
|
crate::tools::config::from_property_string(config, &AcmeConfig::API_SCHEMA)
|
||||||
config,
|
|
||||||
&AcmeConfig::API_SCHEMA,
|
|
||||||
)?)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -538,7 +538,7 @@ pub fn send_updates_available(updates: &[&APTUpdateInfo]) -> Result<(), Error> {
|
|||||||
/// send email on certificate renewal failure.
|
/// send email on certificate renewal failure.
|
||||||
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
|
pub fn send_certificate_renewal_mail(result: &Result<(), Error>) -> Result<(), Error> {
|
||||||
let error: String = match result {
|
let error: String = match result {
|
||||||
Err(e) => e.to_string().into(),
|
Err(e) => e.to_string(),
|
||||||
_ => return Ok(()),
|
_ => return Ok(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -620,8 +620,7 @@ fn handlebars_humam_bytes_helper(
|
|||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param = h
|
let param = h
|
||||||
.param(0)
|
.param(0)
|
||||||
.map(|v| v.value().as_u64())
|
.and_then(|v| v.value().as_u64())
|
||||||
.flatten()
|
|
||||||
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
.ok_or_else(|| RenderError::new("human-bytes: param not found"))?;
|
||||||
|
|
||||||
out.write(&HumanByte::from(param).to_string())?;
|
out.write(&HumanByte::from(param).to_string())?;
|
||||||
@ -638,13 +637,11 @@ fn handlebars_relative_percentage_helper(
|
|||||||
) -> HelperResult {
|
) -> HelperResult {
|
||||||
let param0 = h
|
let param0 = h
|
||||||
.param(0)
|
.param(0)
|
||||||
.map(|v| v.value().as_f64())
|
.and_then(|v| v.value().as_f64())
|
||||||
.flatten()
|
|
||||||
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param0 not found"))?;
|
||||||
let param1 = h
|
let param1 = h
|
||||||
.param(1)
|
.param(1)
|
||||||
.map(|v| v.value().as_f64())
|
.and_then(|v| v.value().as_f64())
|
||||||
.flatten()
|
|
||||||
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
.ok_or_else(|| RenderError::new("relative-percentage: param1 not found"))?;
|
||||||
|
|
||||||
if param1 == 0.0 {
|
if param1 == 0.0 {
|
||||||
|
@ -324,10 +324,12 @@ pub fn compute_schedule_status(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut status = JobScheduleStatus::default();
|
let mut status = JobScheduleStatus {
|
||||||
status.last_run_upid = upid.map(String::from);
|
last_run_upid: upid.map(String::from),
|
||||||
status.last_run_state = state;
|
last_run_state: state,
|
||||||
status.last_run_endtime = endtime;
|
last_run_endtime: endtime,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
if let Some(schedule) = schedule {
|
if let Some(schedule) = schedule {
|
||||||
if let Ok(event) = schedule.parse::<CalendarEvent>() {
|
if let Ok(event) = schedule.parse::<CalendarEvent>() {
|
||||||
|
@ -164,9 +164,9 @@ pub fn do_prune_job(
|
|||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
let auth_id = auth_id.clone();
|
let auth_id = auth_id.clone();
|
||||||
let worker_id = match &prune_options.ns {
|
let worker_id = match &prune_options.ns {
|
||||||
Some(ns) if ns.is_root() => format!("{store}"),
|
Some(ns) if ns.is_root() => store,
|
||||||
Some(ns) => format!("{store}:{ns}"),
|
Some(ns) => format!("{store}:{ns}"),
|
||||||
None => format!("{store}"),
|
None => store,
|
||||||
};
|
};
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
|
@ -788,7 +788,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<
|
|||||||
let mut created = false;
|
let mut created = false;
|
||||||
let store_ns_str = print_store_and_ns(params.store.name(), ns);
|
let store_ns_str = print_store_and_ns(params.store.name(), ns);
|
||||||
|
|
||||||
if !ns.is_root() && !params.store.namespace_path(&ns).exists() {
|
if !ns.is_root() && !params.store.namespace_path(ns).exists() {
|
||||||
check_ns_modification_privs(params.store.name(), ns, ¶ms.owner)
|
check_ns_modification_privs(params.store.name(), ns, ¶ms.owner)
|
||||||
.map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?;
|
.map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?;
|
||||||
|
|
||||||
@ -817,7 +817,7 @@ fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result<bool, Error> {
|
fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result<bool, Error> {
|
||||||
check_ns_modification_privs(¶ms.store.name(), local_ns, ¶ms.owner)
|
check_ns_modification_privs(params.store.name(), local_ns, ¶ms.owner)
|
||||||
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
|
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
|
||||||
|
|
||||||
params.store.remove_namespace_recursive(local_ns, true)
|
params.store.remove_namespace_recursive(local_ns, true)
|
||||||
|
@ -116,13 +116,11 @@ impl<'a> ChunkArchiveWriter<'a> {
|
|||||||
} else {
|
} else {
|
||||||
self.write_all(&blob_data[start..end])?
|
self.write_all(&blob_data[start..end])?
|
||||||
};
|
};
|
||||||
if leom {
|
if leom && self.close_on_leom {
|
||||||
if self.close_on_leom {
|
let mut writer = self.writer.take().unwrap();
|
||||||
let mut writer = self.writer.take().unwrap();
|
writer.finish(false)?;
|
||||||
writer.finish(false)?;
|
self.bytes_written = writer.bytes_written();
|
||||||
self.bytes_written = writer.bytes_written();
|
return Ok(chunk_is_complete);
|
||||||
return Ok(chunk_is_complete);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
start = end;
|
start = end;
|
||||||
}
|
}
|
||||||
|
@ -610,9 +610,7 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
self.pending.extend(store.as_bytes());
|
self.pending.extend(store.as_bytes());
|
||||||
|
|
||||||
self.content
|
self.content.entry(store.to_string()).or_default();
|
||||||
.entry(store.to_string())
|
|
||||||
.or_default();
|
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number, store.to_string()));
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
|
|
||||||
@ -726,10 +724,7 @@ impl MediaCatalog {
|
|||||||
self.pending.push(b':');
|
self.pending.push(b':');
|
||||||
self.pending.extend(path.as_bytes());
|
self.pending.extend(path.as_bytes());
|
||||||
|
|
||||||
let content = self
|
let content = self.content.entry(store.to_string()).or_default();
|
||||||
.content
|
|
||||||
.entry(store.to_string())
|
|
||||||
.or_default();
|
|
||||||
|
|
||||||
content.snapshot_index.insert(path, file_number);
|
content.snapshot_index.insert(path, file_number);
|
||||||
|
|
||||||
@ -857,9 +852,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
self.check_start_chunk_archive(file_number)?;
|
self.check_start_chunk_archive(file_number)?;
|
||||||
|
|
||||||
self.content
|
self.content.entry(store.to_string()).or_default();
|
||||||
.entry(store.to_string())
|
|
||||||
.or_default();
|
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number, store.to_string()));
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
}
|
}
|
||||||
@ -893,10 +886,7 @@ impl MediaCatalog {
|
|||||||
let _ = parse_ns_and_snapshot(snapshot)?;
|
let _ = parse_ns_and_snapshot(snapshot)?;
|
||||||
self.check_register_snapshot(file_number)?;
|
self.check_register_snapshot(file_number)?;
|
||||||
|
|
||||||
let content = self
|
let content = self.content.entry(store.to_string()).or_default();
|
||||||
.content
|
|
||||||
.entry(store.to_string())
|
|
||||||
.or_default();
|
|
||||||
|
|
||||||
content
|
content
|
||||||
.snapshot_index
|
.snapshot_index
|
||||||
@ -1015,19 +1005,14 @@ impl MediaSetCatalog {
|
|||||||
/// as (datastore, snapshot).
|
/// as (datastore, snapshot).
|
||||||
/// The snapshot contains namespaces in the format 'ns/namespace'.
|
/// The snapshot contains namespaces in the format 'ns/namespace'.
|
||||||
pub fn list_snapshots(&self) -> impl Iterator<Item = (&str, &str)> {
|
pub fn list_snapshots(&self) -> impl Iterator<Item = (&str, &str)> {
|
||||||
self.catalog_list
|
self.catalog_list.values().flat_map(|catalog| {
|
||||||
.values()
|
catalog.content.iter().flat_map(|(store, content)| {
|
||||||
.flat_map(|catalog| {
|
content
|
||||||
catalog
|
.snapshot_index
|
||||||
.content
|
.keys()
|
||||||
.iter()
|
.map(move |key| (store.as_str(), key.as_str()))
|
||||||
.flat_map(|(store, content)| {
|
|
||||||
content
|
|
||||||
.snapshot_index
|
|
||||||
.keys()
|
|
||||||
.map(move |key| (store.as_str(), key.as_str()))
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,13 +344,11 @@ impl MediaPool {
|
|||||||
MediaLocation::Online(name) => {
|
MediaLocation::Online(name) => {
|
||||||
if self.force_media_availability {
|
if self.force_media_availability {
|
||||||
true
|
true
|
||||||
|
} else if let Some(ref changer_name) = self.changer_name {
|
||||||
|
name == changer_name
|
||||||
} else {
|
} else {
|
||||||
if let Some(ref changer_name) = self.changer_name {
|
// a standalone drive cannot use media currently inside a library
|
||||||
name == changer_name
|
false
|
||||||
} else {
|
|
||||||
// a standalone drive cannot use media currently inside a library
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MediaLocation::Offline => {
|
MediaLocation::Offline => {
|
||||||
@ -686,10 +684,8 @@ impl MediaPool {
|
|||||||
let media_location = media.location();
|
let media_location = media.location();
|
||||||
if self.location_is_available(media_location) {
|
if self.location_is_available(media_location) {
|
||||||
last_is_writable = true;
|
last_is_writable = true;
|
||||||
} else {
|
} else if let MediaLocation::Vault(vault) = media_location {
|
||||||
if let MediaLocation::Vault(vault) = media_location {
|
bail!("writable media offsite in vault '{}'", vault);
|
||||||
bail!("writable media offsite in vault '{}'", vault);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => bail!(
|
_ => bail!(
|
||||||
|
@ -110,13 +110,12 @@ fn get_changelog_url(
|
|||||||
command.arg("--print-uris");
|
command.arg("--print-uris");
|
||||||
command.arg(package);
|
command.arg(package);
|
||||||
let output = proxmox_sys::command::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
|
let output = proxmox_sys::command::run_command(command, None)?; // format: 'http://foo/bar' package.changelog
|
||||||
let output = match output.splitn(2, ' ').next() {
|
let output = match output.split_once(' ') {
|
||||||
Some(output) => {
|
Some((uri, _file_name)) if uri.len() > 2 => uri[1..uri.len() - 1].to_owned(),
|
||||||
if output.len() < 2 {
|
Some((uri, _file_name)) => bail!(
|
||||||
bail!("invalid output (URI part too short) from 'apt-get changelog --print-uris': {}", output)
|
"invalid output (URI part too short) from 'apt-get changelog --print-uris': {}",
|
||||||
}
|
uri
|
||||||
output[1..output.len() - 1].to_owned()
|
),
|
||||||
}
|
|
||||||
None => bail!(
|
None => bail!(
|
||||||
"invalid output from 'apt-get changelog --print-uris': {}",
|
"invalid output from 'apt-get changelog --print-uris': {}",
|
||||||
output
|
output
|
||||||
|
@ -478,7 +478,7 @@ impl Disk {
|
|||||||
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
|
let stat = unsafe { std::str::from_utf8_unchecked(&stat) };
|
||||||
let stat: Vec<u64> = stat
|
let stat: Vec<u64> = stat
|
||||||
.split_ascii_whitespace()
|
.split_ascii_whitespace()
|
||||||
.map(|s| u64::from_str_radix(s, 10).unwrap_or(0))
|
.map(|s| s.parse().unwrap_or_default())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if stat.len() < 15 {
|
if stat.len() < 15 {
|
||||||
@ -821,7 +821,7 @@ fn get_partitions_info(
|
|||||||
|
|
||||||
let mut used = PartitionUsageType::Unused;
|
let mut used = PartitionUsageType::Unused;
|
||||||
|
|
||||||
if let Some(devnum) = disk.devnum().ok() {
|
if let Ok(devnum) = disk.devnum() {
|
||||||
if lvm_devices.contains(&devnum) {
|
if lvm_devices.contains(&devnum) {
|
||||||
used = PartitionUsageType::LVM;
|
used = PartitionUsageType::LVM;
|
||||||
} else if zfs_devices.contains(&devnum) {
|
} else if zfs_devices.contains(&devnum) {
|
||||||
|
@ -203,7 +203,7 @@ pub fn get_smart_data(disk: &super::Disk, health_only: bool) -> Result<SmartData
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
static WEAROUT_FIELD_ORDER: &[&'static str] = &[
|
static WEAROUT_FIELD_ORDER: &[&str] = &[
|
||||||
"Media_Wearout_Indicator",
|
"Media_Wearout_Indicator",
|
||||||
"SSD_Life_Left",
|
"SSD_Life_Left",
|
||||||
"Wear_Leveling_Count",
|
"Wear_Leveling_Count",
|
||||||
|
@ -21,9 +21,9 @@ lazy_static! {
|
|||||||
|
|
||||||
fn get_pool_from_dataset(dataset: &str) -> &str {
|
fn get_pool_from_dataset(dataset: &str) -> &str {
|
||||||
if let Some(idx) = dataset.find('/') {
|
if let Some(idx) = dataset.find('/') {
|
||||||
&dataset[0..idx].as_ref()
|
dataset[0..idx].as_ref()
|
||||||
} else {
|
} else {
|
||||||
dataset.as_ref()
|
dataset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ pub fn zfs_pool_stats(pool: &OsStr) -> Result<Option<BlockDevStat>, Error> {
|
|||||||
// All times are nanoseconds
|
// All times are nanoseconds
|
||||||
let stat: Vec<u64> = lines[2]
|
let stat: Vec<u64> = lines[2]
|
||||||
.split_ascii_whitespace()
|
.split_ascii_whitespace()
|
||||||
.map(|s| u64::from_str_radix(s, 10).unwrap_or(0))
|
.map(|s| s.parse().unwrap_or_default())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let ticks = (stat[4] + stat[7]) / 1_000_000; // convert to milisec
|
let ticks = (stat[4] + stat[7]) / 1_000_000; // convert to milisec
|
||||||
@ -147,12 +147,10 @@ fn parse_objset_stat(pool: &str, objset_id: &str) -> Result<(String, BlockDevSta
|
|||||||
let value = parts.next().ok_or_else(|| format_err!("no value found"))?;
|
let value = parts.next().ok_or_else(|| format_err!("no value found"))?;
|
||||||
match name {
|
match name {
|
||||||
Some("dataset_name") => dataset_name = value.to_string(),
|
Some("dataset_name") => dataset_name = value.to_string(),
|
||||||
Some("writes") => stat.write_ios = u64::from_str_radix(value, 10).unwrap_or(0),
|
Some("writes") => stat.write_ios = value.parse().unwrap_or_default(),
|
||||||
Some("nwritten") => {
|
Some("nwritten") => stat.write_sectors = value.parse::<u64>().unwrap_or_default() / 512,
|
||||||
stat.write_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512
|
Some("reads") => stat.read_ios = value.parse().unwrap_or_default(),
|
||||||
}
|
Some("nread") => stat.read_sectors = value.parse::<u64>().unwrap_or_default() / 512,
|
||||||
Some("reads") => stat.read_ios = u64::from_str_radix(value, 10).unwrap_or(0),
|
|
||||||
Some("nread") => stat.read_sectors = u64::from_str_radix(value, 10).unwrap_or(0) / 512,
|
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use std::mem::replace;
|
use std::mem::{replace, take};
|
||||||
|
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
@ -255,7 +255,7 @@ where
|
|||||||
stack.push(StackItem {
|
stack.push(StackItem {
|
||||||
node: replace(&mut cur.node, node),
|
node: replace(&mut cur.node, node),
|
||||||
level: replace(&mut cur.level, vdev_level),
|
level: replace(&mut cur.level, vdev_level),
|
||||||
children_of_parent: replace(&mut cur.children_of_parent, Vec::new()),
|
children_of_parent: take(&mut cur.children_of_parent),
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// same indentation level, add to children of the previous level:
|
// same indentation level, add to children of the previous level:
|
||||||
|
@ -56,7 +56,7 @@ impl<T: Any> AsAny for T {
|
|||||||
|
|
||||||
/// The default 2 hours are far too long for PBS
|
/// The default 2 hours are far too long for PBS
|
||||||
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120;
|
||||||
pub const DEFAULT_USER_AGENT_STRING: &'static str = "proxmox-backup-client/1.0";
|
pub const DEFAULT_USER_AGENT_STRING: &str = "proxmox-backup-client/1.0";
|
||||||
|
|
||||||
/// Returns a new instance of `SimpleHttp` configured for PBS usage.
|
/// Returns a new instance of `SimpleHttp` configured for PBS usage.
|
||||||
pub fn pbs_simple_http(proxy_config: Option<ProxyConfig>) -> SimpleHttp {
|
pub fn pbs_simple_http(proxy_config: Option<ProxyConfig>) -> SimpleHttp {
|
||||||
@ -64,7 +64,6 @@ pub fn pbs_simple_http(proxy_config: Option<ProxyConfig>) -> SimpleHttp {
|
|||||||
proxy_config,
|
proxy_config,
|
||||||
user_agent: Some(DEFAULT_USER_AGENT_STRING.to_string()),
|
user_agent: Some(DEFAULT_USER_AGENT_STRING.to_string()),
|
||||||
tcp_keepalive: Some(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME),
|
tcp_keepalive: Some(PROXMOX_BACKUP_TCP_KEEPALIVE_TIME),
|
||||||
..Default::default()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
SimpleHttp::with_options(options)
|
SimpleHttp::with_options(options)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user