1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

bcache-utils: rewrite

They take care to avoid redundant reads now.
This commit is contained in:
Joe Thornber 2018-05-03 11:36:15 +01:00
parent 2688aafefb
commit dfc320f5b8

View File

@ -29,6 +29,16 @@ static void byte_range_to_block_range(struct bcache *cache, uint64_t start, size
*be = (start + len + block_size - 1) / block_size; *be = (start + len + block_size - 1) / block_size;
} }
static uint64_t _min(uint64_t lhs, uint64_t rhs)
{
if (rhs < lhs)
return rhs;
return lhs;
}
//----------------------------------------------------------------
void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len) void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{ {
block_address bb, be; block_address bb, be;
@ -40,119 +50,55 @@ void bcache_prefetch_bytes(struct bcache *cache, int fd, uint64_t start, size_t
} }
} }
static uint64_t _min(uint64_t lhs, uint64_t rhs) //----------------------------------------------------------------
{
if (rhs < lhs)
return rhs;
return lhs;
}
// FIXME: there's common code that can be factored out of these 3
bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data) bool bcache_read_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{ {
struct block *b; struct block *b;
block_address bb, be, i; block_address bb, be;
unsigned char *udata = data;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT; uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
int errors = 0; uint64_t block_offset = start % block_size;
bcache_prefetch_bytes(cache, fd, start, len);
byte_range_to_block_range(cache, start, len, &bb, &be); byte_range_to_block_range(cache, start, len, &bb, &be);
for (i = bb; i < be; i++)
bcache_prefetch(cache, fd, i);
for (i = bb; i < be; i++) { for (; bb != be; bb++) {
if (!bcache_get(cache, fd, i, 0, &b, NULL)) { if (!bcache_get(cache, fd, bb, 0, &b, NULL))
errors++; return false;
continue;
}
if (i == bb) {
uint64_t block_offset = start % block_size;
size_t blen = _min(block_size - block_offset, len);
memcpy(udata, ((unsigned char *) b->data) + block_offset, blen);
len -= blen;
udata += blen;
} else {
size_t blen = _min(block_size, len);
memcpy(udata, b->data, blen);
len -= blen;
udata += blen;
}
size_t blen = _min(block_size - block_offset, len);
memcpy(data, ((unsigned char *) b->data) + block_offset, blen);
bcache_put(b); bcache_put(b);
block_offset = 0;
len -= blen;
data = ((unsigned char *) data) + blen;
} }
return errors ? false : true; return true;
}
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct block *b;
block_address bb, be, i;
unsigned char *udata = data;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
int errors = 0;
byte_range_to_block_range(cache, start, len, &bb, &be);
for (i = bb; i < be; i++)
bcache_prefetch(cache, fd, i);
for (i = bb; i < be; i++) {
if (!bcache_get(cache, fd, i, GF_DIRTY, &b, NULL)) {
errors++;
continue;
}
if (i == bb) {
uint64_t block_offset = start % block_size;
size_t blen = _min(block_size - block_offset, len);
memcpy(((unsigned char *) b->data) + block_offset, udata, blen);
len -= blen;
udata += blen;
} else {
size_t blen = _min(block_size, len);
memcpy(b->data, udata, blen);
len -= blen;
udata += blen;
}
bcache_put(b);
}
return errors ? false : true;
} }
//---------------------------------------------------------------- //----------------------------------------------------------------
static bool _zero_whole_blocks(struct bcache *cache, int fd, block_address bb, block_address be) // Writing bytes and zeroing bytes are very similar, so we factor out
{ // this common code.
struct block *b;
struct updater;
for (; bb != be; bb++) {
if (!bcache_get(cache, fd, bb, GF_ZERO, &b, NULL)) typedef bool (*partial_update_fn)(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len);
return false; typedef bool (*whole_update_fn)(struct updater *u, int fd, block_address bb, block_address be);
bcache_put(b);
} struct updater {
struct bcache *cache;
return true; partial_update_fn partial_fn;
} whole_update_fn whole_fn;
void *data;
static bool _zero_partial(struct bcache *cache, int fd, block_address bb, uint64_t offset, size_t len) };
{
struct block *b; static bool _update_bytes(struct updater *u, int fd, uint64_t start, size_t len)
if (!bcache_get(cache, fd, bb, GF_DIRTY, &b, NULL))
return false;
memset(((unsigned char *) b->data) + offset, 0, len);
bcache_put(b);
return true;
}
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{ {
struct bcache *cache = u->cache;
block_address bb, be; block_address bb, be;
uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT; uint64_t block_size = bcache_block_sectors(cache) << SECTOR_SHIFT;
uint64_t block_offset = start % block_size; uint64_t block_offset = start % block_size;
@ -160,10 +106,15 @@ bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
byte_range_to_block_range(cache, start, len, &bb, &be); byte_range_to_block_range(cache, start, len, &bb, &be);
// If the last block is partial, we will require a read, so let's
// prefetch it.
if ((start + len) % block_size)
bcache_prefetch(cache, fd, (start + len) / block_size);
// First block may be partial // First block may be partial
if (block_offset) { if (block_offset) {
size_t blen = _min(block_size - block_offset, len); size_t blen = _min(block_size - block_offset, len);
if (!_zero_partial(cache, fd, bb, block_offset, blen)) if (!u->partial_fn(u, fd, bb, block_offset, blen))
return false; return false;
len -= blen; len -= blen;
@ -175,8 +126,9 @@ bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
// Now we write out a set of whole blocks // Now we write out a set of whole blocks
nr_whole = len / block_size; nr_whole = len / block_size;
if (!_zero_whole_blocks(cache, fd, bb, bb + nr_whole)) if (!u->whole_fn(u, fd, bb, bb + nr_whole))
return false; return false;
bb += nr_whole; bb += nr_whole;
len -= nr_whole * block_size; len -= nr_whole * block_size;
@ -184,7 +136,94 @@ bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
return true; return true;
// Finally we write a partial end block // Finally we write a partial end block
return _zero_partial(cache, fd, bb, 0, len); return u->partial_fn(u, fd, bb, 0, len);
}
//----------------------------------------------------------------
static bool _write_partial(struct updater *u, int fd, block_address bb,
uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b, NULL))
return false;
memcpy(((unsigned char *) b->data) + offset, u->data, len);
u->data = ((unsigned char *) u->data) + len;
bcache_put(b);
return true;
}
static bool _write_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
uint64_t block_size = bcache_block_sectors(u->cache) << SECTOR_SHIFT;
for (; bb != be; bb++) {
// We don't need to read the block since we are overwriting
// it completely.
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b, NULL))
return false;
memcpy(b->data, u->data, block_size);
u->data = ((unsigned char *) u->data) + block_size;
bcache_put(b);
}
return true;
}
bool bcache_write_bytes(struct bcache *cache, int fd, uint64_t start, size_t len, void *data)
{
struct updater u;
u.cache = cache;
u.partial_fn = _write_partial;
u.whole_fn = _write_whole;
u.data = data;
return _update_bytes(&u, fd, start, len);
}
//----------------------------------------------------------------
static bool _zero_partial(struct updater *u, int fd, block_address bb, uint64_t offset, size_t len)
{
struct block *b;
if (!bcache_get(u->cache, fd, bb, GF_DIRTY, &b, NULL))
return false;
memset(((unsigned char *) b->data) + offset, 0, len);
bcache_put(b);
return true;
}
static bool _zero_whole(struct updater *u, int fd, block_address bb, block_address be)
{
struct block *b;
for (; bb != be; bb++) {
if (!bcache_get(u->cache, fd, bb, GF_ZERO, &b, NULL))
return false;
bcache_put(b);
}
return true;
}
bool bcache_zero_bytes(struct bcache *cache, int fd, uint64_t start, size_t len)
{
struct updater u;
u.cache = cache;
u.partial_fn = _zero_partial;
u.whole_fn = _zero_whole;
u.data = NULL;
return _update_bytes(&u, fd, start, len);
} }
//---------------------------------------------------------------- //----------------------------------------------------------------