backup/verify: improve speed by sorting chunks by inode
before reading the chunks from disk in the order of the index file, stat them first and sort them by inode number. this can have a very positive impact on read speed on spinning disks, even with the additional stat'ing of the chunks. memory footprint should be tolerable, for 1_000_000 chunks we need about ~16MiB of memory (Vec of 64bit position + 64bit inode) (assuming 4MiB Chunks, such an index would reference 4TiB of data) two small benchmarks (single spinner, ext4) here showed an improvement from ~430 seconds to ~330 seconds for a 32GiB fixed index and from ~160 seconds to ~120 seconds for a 10GiB dynamic index Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
parent
7afb98a912
commit
7f394c807b
@ -686,6 +686,11 @@ impl DataStore {
|
||||
}
|
||||
|
||||
|
||||
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
|
||||
let (chunk_path, _digest_str) = self.chunk_store.chunk_path(digest);
|
||||
std::fs::metadata(chunk_path).map_err(Error::from)
|
||||
}
|
||||
|
||||
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
||||
|
||||
let (chunk_path, digest_str) = self.chunk_store.chunk_path(digest);
|
||||
|
@ -159,13 +159,16 @@ fn verify_index_chunks(
|
||||
}
|
||||
);
|
||||
|
||||
for pos in 0..index.index_count() {
|
||||
let index_count = index.index_count();
|
||||
let mut chunk_list = Vec::with_capacity(index_count);
|
||||
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
|
||||
for pos in 0..index_count {
|
||||
verify_worker.worker.check_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
let size = info.size();
|
||||
|
||||
if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) {
|
||||
continue; // already verified
|
||||
@ -178,15 +181,38 @@ fn verify_index_chunks(
|
||||
continue;
|
||||
}
|
||||
|
||||
match verify_worker.datastore.stat_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
task_log!(verify_worker.worker, "can't verify chunk, stat failed - {}", err);
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
|
||||
}
|
||||
Ok(metadata) => {
|
||||
chunk_list.push((pos, metadata.ino()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| {
|
||||
ino_a.cmp(&ino_b)
|
||||
});
|
||||
|
||||
for (pos, _) in chunk_list {
|
||||
verify_worker.worker.check_abort()?;
|
||||
crate::tools::fail_on_shutdown()?;
|
||||
|
||||
let info = index.chunk_info(pos).unwrap();
|
||||
|
||||
match verify_worker.datastore.load_chunk(&info.digest) {
|
||||
Err(err) => {
|
||||
verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);
|
||||
task_log!(verify_worker.worker, "can't verify chunk, load failed - {}", err);
|
||||
errors.fetch_add(1, Ordering::SeqCst);
|
||||
rename_corrupted_chunk(verify_worker.datastore.clone(), &info.digest, &verify_worker.worker);
|
||||
continue;
|
||||
}
|
||||
Ok(chunk) => {
|
||||
let size = info.size();
|
||||
read_bytes += chunk.raw_size();
|
||||
decoder_pool.send((chunk, info.digest, size))?;
|
||||
decoded_bytes += size;
|
||||
|
Loading…
Reference in New Issue
Block a user