mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
bcache: rewrite bcache_write_zeros()
It now uses GF_ZERO to avoid reading blocks that are going to be completely zeroed.
This commit is contained in:
parent
dc30d4b2f2
commit
8b755f1e04
@ -48,8 +48,6 @@ static uint64_t _min(uint64_t lhs, uint64_t rhs)
|
||||
return lhs;
|
||||
}
|
||||
|
||||
// These functions are all utilities, they should only use the public
|
||||
// interface to bcache.
|
||||
// FIXME: there's common code that can be factored out of these 3
|
||||
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
|
||||
{
|
||||
@ -125,38 +123,68 @@ bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len
|
||||
return errors ? false : true;
|
||||
}
|
||||
|
||||
bool bcache_write_zeros(struct bcache *cache, int fd, uint64_t start, size_t len)
|
||||
//----------------------------------------------------------------
|
||||
|
||||
static bool _zero_whole_blocks(struct bcache *cache, int fd, block_address bb, block_address be)
|
||||
{
|
||||
struct block *b;
|
||||
block_address bb, be, i;
|
||||
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
|
||||
int errors = 0;
|
||||
|
||||
byte_range_to_block_range(cache, start, len, &bb, &be);
|
||||
for (i = bb; i < be; i++)
|
||||
bcache_prefetch(cache, fd, i);
|
||||
|
||||
for (i = bb; i < be; i++) {
|
||||
if (!bcache_get(cache, fd, i, GF_DIRTY, &b, NULL)) {
|
||||
errors++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (i == bb) {
|
||||
uint64_t block_offset = start % block_size;
|
||||
size_t blen = _min(block_size - block_offset, len);
|
||||
memset(((unsigned char *) b->data) + block_offset, 0, blen);
|
||||
len -= blen;
|
||||
} else {
|
||||
size_t blen = _min(block_size, len);
|
||||
memset(b->data, 0, blen);
|
||||
len -= blen;
|
||||
}
|
||||
|
||||
bcache_put(b);
|
||||
for (; bb != be; bb++) {
|
||||
if (!bcache_get(cache, fd, bb, GF_ZERO, &b, NULL))
|
||||
return false;
|
||||
bcache_put(b);
|
||||
}
|
||||
|
||||
return errors ? false : true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _zero_partial(struct bcache *cache, int fd, block_address bb, uint64_t offset, size_t len)
|
||||
{
|
||||
struct block *b;
|
||||
|
||||
if (!bcache_get(cache, fd, bb, GF_DIRTY, &b, NULL))
|
||||
return false;
|
||||
|
||||
memset(((unsigned char *) b->data) + offset, 0, len);
|
||||
bcache_put(b);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool bcache_write_zeros(struct bcache *cache, int fd, uint64_t start, size_t len)
|
||||
{
|
||||
block_address bb, be;
|
||||
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
|
||||
uint64_t block_offset = start % block_size;
|
||||
uint64_t nr_whole;
|
||||
|
||||
byte_range_to_block_range(cache, start, len, &bb, &be);
|
||||
|
||||
// First block may be partial
|
||||
if (block_offset) {
|
||||
size_t blen = _min(block_size - block_offset, len);
|
||||
if (!_zero_partial(cache, fd, bb, block_offset, blen))
|
||||
return false;
|
||||
|
||||
len -= blen;
|
||||
if (!len)
|
||||
return true;
|
||||
|
||||
bb++;
|
||||
}
|
||||
|
||||
// Now we write out a set of whole blocks
|
||||
nr_whole = len / block_size;
|
||||
if (!_zero_whole_blocks(cache, fd, bb, bb + nr_whole))
|
||||
return false;
|
||||
bb += nr_whole;
|
||||
len -= nr_whole * block_size;
|
||||
|
||||
if (!len)
|
||||
return true;
|
||||
|
||||
// Finally we write a partial end block
|
||||
return _zero_partial(cache, fd, bb, 0, len);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------
|
||||
|
Loading…
Reference in New Issue
Block a user