bcache: remove for_each_cache()
Since now each cache_set explicitly has single cache, for_each_cache() is unnecessary. This patch removes this macro, and update all locations where it is used, and makes sure all code logic still being consistent. Signed-off-by: Coly Li <colyli@suse.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
697e23495c
commit
08fdb2cddb
@ -88,7 +88,6 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
|
|||||||
struct cache *ca;
|
struct cache *ca;
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
|
unsigned long next = c->nbuckets * c->sb.bucket_size / 1024;
|
||||||
unsigned int i;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
atomic_sub(sectors, &c->rescale);
|
atomic_sub(sectors, &c->rescale);
|
||||||
@ -104,14 +103,14 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
|
|||||||
|
|
||||||
c->min_prio = USHRT_MAX;
|
c->min_prio = USHRT_MAX;
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
ca = c->cache;
|
||||||
for_each_bucket(b, ca)
|
for_each_bucket(b, ca)
|
||||||
if (b->prio &&
|
if (b->prio &&
|
||||||
b->prio != BTREE_PRIO &&
|
b->prio != BTREE_PRIO &&
|
||||||
!atomic_read(&b->pin)) {
|
!atomic_read(&b->pin)) {
|
||||||
b->prio--;
|
b->prio--;
|
||||||
c->min_prio = min(c->min_prio, b->prio);
|
c->min_prio = min(c->min_prio, b->prio);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
}
|
}
|
||||||
|
@ -887,9 +887,6 @@ do { \
|
|||||||
|
|
||||||
/* Looping macros */
|
/* Looping macros */
|
||||||
|
|
||||||
#define for_each_cache(ca, cs, iter) \
|
|
||||||
for (iter = 0; ca = cs->cache, iter < 1; iter++)
|
|
||||||
|
|
||||||
#define for_each_bucket(b, ca) \
|
#define for_each_bucket(b, ca) \
|
||||||
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
|
for (b = (ca)->buckets + (ca)->sb.first_bucket; \
|
||||||
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
|
b < (ca)->buckets + (ca)->sb.nbuckets; b++)
|
||||||
@ -931,11 +928,9 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
|
|||||||
|
|
||||||
static inline void wake_up_allocators(struct cache_set *c)
|
static inline void wake_up_allocators(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
wake_up_process(ca->alloc_thread);
|
||||||
wake_up_process(ca->alloc_thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void closure_bio_submit(struct cache_set *c,
|
static inline void closure_bio_submit(struct cache_set *c,
|
||||||
|
@ -1167,19 +1167,18 @@ static void make_btree_freeing_key(struct btree *b, struct bkey *k)
|
|||||||
static int btree_check_reserve(struct btree *b, struct btree_op *op)
|
static int btree_check_reserve(struct btree *b, struct btree_op *op)
|
||||||
{
|
{
|
||||||
struct cache_set *c = b->c;
|
struct cache_set *c = b->c;
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
|
unsigned int reserve = (c->root->level - b->level) * 2 + 1;
|
||||||
|
|
||||||
mutex_lock(&c->bucket_lock);
|
mutex_lock(&c->bucket_lock);
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
|
||||||
if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
|
if (op)
|
||||||
if (op)
|
prepare_to_wait(&c->btree_cache_wait, &op->wait,
|
||||||
prepare_to_wait(&c->btree_cache_wait, &op->wait,
|
TASK_UNINTERRUPTIBLE);
|
||||||
TASK_UNINTERRUPTIBLE);
|
mutex_unlock(&c->bucket_lock);
|
||||||
mutex_unlock(&c->bucket_lock);
|
return -EINTR;
|
||||||
return -EINTR;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
|
|
||||||
@ -1695,7 +1694,6 @@ static void btree_gc_start(struct cache_set *c)
|
|||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca;
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
if (!c->gc_mark_valid)
|
if (!c->gc_mark_valid)
|
||||||
return;
|
return;
|
||||||
@ -1705,14 +1703,14 @@ static void btree_gc_start(struct cache_set *c)
|
|||||||
c->gc_mark_valid = 0;
|
c->gc_mark_valid = 0;
|
||||||
c->gc_done = ZERO_KEY;
|
c->gc_done = ZERO_KEY;
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
ca = c->cache;
|
||||||
for_each_bucket(b, ca) {
|
for_each_bucket(b, ca) {
|
||||||
b->last_gc = b->gen;
|
b->last_gc = b->gen;
|
||||||
if (!atomic_read(&b->pin)) {
|
if (!atomic_read(&b->pin)) {
|
||||||
SET_GC_MARK(b, 0);
|
SET_GC_MARK(b, 0);
|
||||||
SET_GC_SECTORS_USED(b, 0);
|
SET_GC_SECTORS_USED(b, 0);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
}
|
}
|
||||||
@ -1721,7 +1719,8 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
|||||||
{
|
{
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
struct cache *ca;
|
struct cache *ca;
|
||||||
unsigned int i;
|
unsigned int i, j;
|
||||||
|
uint64_t *k;
|
||||||
|
|
||||||
mutex_lock(&c->bucket_lock);
|
mutex_lock(&c->bucket_lock);
|
||||||
|
|
||||||
@ -1739,7 +1738,6 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
|||||||
struct bcache_device *d = c->devices[i];
|
struct bcache_device *d = c->devices[i];
|
||||||
struct cached_dev *dc;
|
struct cached_dev *dc;
|
||||||
struct keybuf_key *w, *n;
|
struct keybuf_key *w, *n;
|
||||||
unsigned int j;
|
|
||||||
|
|
||||||
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
|
if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
|
||||||
continue;
|
continue;
|
||||||
@ -1756,29 +1754,27 @@ static void bch_btree_gc_finish(struct cache_set *c)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
c->avail_nbuckets = 0;
|
c->avail_nbuckets = 0;
|
||||||
for_each_cache(ca, c, i) {
|
|
||||||
uint64_t *i;
|
|
||||||
|
|
||||||
ca->invalidate_needs_gc = 0;
|
ca = c->cache;
|
||||||
|
ca->invalidate_needs_gc = 0;
|
||||||
|
|
||||||
for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
|
for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
|
||||||
SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
|
SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
|
||||||
|
|
||||||
for (i = ca->prio_buckets;
|
for (k = ca->prio_buckets;
|
||||||
i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
|
k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
|
||||||
SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
|
SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
|
||||||
|
|
||||||
for_each_bucket(b, ca) {
|
for_each_bucket(b, ca) {
|
||||||
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
|
c->need_gc = max(c->need_gc, bucket_gc_gen(b));
|
||||||
|
|
||||||
if (atomic_read(&b->pin))
|
if (atomic_read(&b->pin))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
|
BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
|
||||||
|
|
||||||
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
|
if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
|
||||||
c->avail_nbuckets++;
|
c->avail_nbuckets++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
@ -1830,12 +1826,10 @@ static void bch_btree_gc(struct cache_set *c)
|
|||||||
|
|
||||||
static bool gc_should_run(struct cache_set *c)
|
static bool gc_should_run(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
if (ca->invalidate_needs_gc)
|
||||||
if (ca->invalidate_needs_gc)
|
return true;
|
||||||
return true;
|
|
||||||
|
|
||||||
if (atomic_read(&c->sectors_to_gc) < 0)
|
if (atomic_read(&c->sectors_to_gc) < 0)
|
||||||
return true;
|
return true;
|
||||||
@ -2081,9 +2075,8 @@ out:
|
|||||||
|
|
||||||
void bch_initial_gc_finish(struct cache_set *c)
|
void bch_initial_gc_finish(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
bch_btree_gc_finish(c);
|
bch_btree_gc_finish(c);
|
||||||
|
|
||||||
@ -2098,20 +2091,18 @@ void bch_initial_gc_finish(struct cache_set *c)
|
|||||||
* This is only safe for buckets that have no live data in them, which
|
* This is only safe for buckets that have no live data in them, which
|
||||||
* there should always be some of.
|
* there should always be some of.
|
||||||
*/
|
*/
|
||||||
for_each_cache(ca, c, i) {
|
for_each_bucket(b, ca) {
|
||||||
for_each_bucket(b, ca) {
|
if (fifo_full(&ca->free[RESERVE_PRIO]) &&
|
||||||
if (fifo_full(&ca->free[RESERVE_PRIO]) &&
|
fifo_full(&ca->free[RESERVE_BTREE]))
|
||||||
fifo_full(&ca->free[RESERVE_BTREE]))
|
break;
|
||||||
break;
|
|
||||||
|
|
||||||
if (bch_can_invalidate_bucket(ca, b) &&
|
if (bch_can_invalidate_bucket(ca, b) &&
|
||||||
!GC_MARK(b)) {
|
!GC_MARK(b)) {
|
||||||
__bch_invalidate_one_bucket(ca, b);
|
__bch_invalidate_one_bucket(ca, b);
|
||||||
if (!fifo_push(&ca->free[RESERVE_PRIO],
|
if (!fifo_push(&ca->free[RESERVE_PRIO],
|
||||||
b - ca->buckets))
|
b - ca->buckets))
|
||||||
fifo_push(&ca->free[RESERVE_BTREE],
|
fifo_push(&ca->free[RESERVE_BTREE],
|
||||||
b - ca->buckets);
|
b - ca->buckets);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,112 +179,109 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
|
|||||||
ret; \
|
ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
unsigned int iter;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
struct journal_device *ja = &ca->journal;
|
||||||
|
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
|
||||||
|
unsigned int i, l, r, m;
|
||||||
|
uint64_t seq;
|
||||||
|
|
||||||
for_each_cache(ca, c, iter) {
|
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
|
||||||
struct journal_device *ja = &ca->journal;
|
pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
|
||||||
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
|
|
||||||
unsigned int i, l, r, m;
|
|
||||||
uint64_t seq;
|
|
||||||
|
|
||||||
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
|
|
||||||
pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read journal buckets ordered by golden ratio hash to quickly
|
||||||
|
* find a sequence of buckets with valid journal entries
|
||||||
|
*/
|
||||||
|
for (i = 0; i < ca->sb.njournal_buckets; i++) {
|
||||||
/*
|
/*
|
||||||
* Read journal buckets ordered by golden ratio hash to quickly
|
* We must try the index l with ZERO first for
|
||||||
* find a sequence of buckets with valid journal entries
|
* correctness due to the scenario that the journal
|
||||||
|
* bucket is circular buffer which might have wrapped
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < ca->sb.njournal_buckets; i++) {
|
l = (i * 2654435769U) % ca->sb.njournal_buckets;
|
||||||
/*
|
|
||||||
* We must try the index l with ZERO first for
|
|
||||||
* correctness due to the scenario that the journal
|
|
||||||
* bucket is circular buffer which might have wrapped
|
|
||||||
*/
|
|
||||||
l = (i * 2654435769U) % ca->sb.njournal_buckets;
|
|
||||||
|
|
||||||
if (test_bit(l, bitmap))
|
if (test_bit(l, bitmap))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (read_bucket(l))
|
if (read_bucket(l))
|
||||||
goto bsearch;
|
goto bsearch;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If that fails, check all the buckets we haven't checked
|
|
||||||
* already
|
|
||||||
*/
|
|
||||||
pr_debug("falling back to linear search\n");
|
|
||||||
|
|
||||||
for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
|
|
||||||
if (read_bucket(l))
|
|
||||||
goto bsearch;
|
|
||||||
|
|
||||||
/* no journal entries on this device? */
|
|
||||||
if (l == ca->sb.njournal_buckets)
|
|
||||||
continue;
|
|
||||||
bsearch:
|
|
||||||
BUG_ON(list_empty(list));
|
|
||||||
|
|
||||||
/* Binary search */
|
|
||||||
m = l;
|
|
||||||
r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
|
|
||||||
pr_debug("starting binary search, l %u r %u\n", l, r);
|
|
||||||
|
|
||||||
while (l + 1 < r) {
|
|
||||||
seq = list_entry(list->prev, struct journal_replay,
|
|
||||||
list)->j.seq;
|
|
||||||
|
|
||||||
m = (l + r) >> 1;
|
|
||||||
read_bucket(m);
|
|
||||||
|
|
||||||
if (seq != list_entry(list->prev, struct journal_replay,
|
|
||||||
list)->j.seq)
|
|
||||||
l = m;
|
|
||||||
else
|
|
||||||
r = m;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Read buckets in reverse order until we stop finding more
|
|
||||||
* journal entries
|
|
||||||
*/
|
|
||||||
pr_debug("finishing up: m %u njournal_buckets %u\n",
|
|
||||||
m, ca->sb.njournal_buckets);
|
|
||||||
l = m;
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
if (!l--)
|
|
||||||
l = ca->sb.njournal_buckets - 1;
|
|
||||||
|
|
||||||
if (l == m)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (test_bit(l, bitmap))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!read_bucket(l))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
seq = 0;
|
|
||||||
|
|
||||||
for (i = 0; i < ca->sb.njournal_buckets; i++)
|
|
||||||
if (ja->seq[i] > seq) {
|
|
||||||
seq = ja->seq[i];
|
|
||||||
/*
|
|
||||||
* When journal_reclaim() goes to allocate for
|
|
||||||
* the first time, it'll use the bucket after
|
|
||||||
* ja->cur_idx
|
|
||||||
*/
|
|
||||||
ja->cur_idx = i;
|
|
||||||
ja->last_idx = ja->discard_idx = (i + 1) %
|
|
||||||
ca->sb.njournal_buckets;
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If that fails, check all the buckets we haven't checked
|
||||||
|
* already
|
||||||
|
*/
|
||||||
|
pr_debug("falling back to linear search\n");
|
||||||
|
|
||||||
|
for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
|
||||||
|
if (read_bucket(l))
|
||||||
|
goto bsearch;
|
||||||
|
|
||||||
|
/* no journal entries on this device? */
|
||||||
|
if (l == ca->sb.njournal_buckets)
|
||||||
|
goto out;
|
||||||
|
bsearch:
|
||||||
|
BUG_ON(list_empty(list));
|
||||||
|
|
||||||
|
/* Binary search */
|
||||||
|
m = l;
|
||||||
|
r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
|
||||||
|
pr_debug("starting binary search, l %u r %u\n", l, r);
|
||||||
|
|
||||||
|
while (l + 1 < r) {
|
||||||
|
seq = list_entry(list->prev, struct journal_replay,
|
||||||
|
list)->j.seq;
|
||||||
|
|
||||||
|
m = (l + r) >> 1;
|
||||||
|
read_bucket(m);
|
||||||
|
|
||||||
|
if (seq != list_entry(list->prev, struct journal_replay,
|
||||||
|
list)->j.seq)
|
||||||
|
l = m;
|
||||||
|
else
|
||||||
|
r = m;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Read buckets in reverse order until we stop finding more
|
||||||
|
* journal entries
|
||||||
|
*/
|
||||||
|
pr_debug("finishing up: m %u njournal_buckets %u\n",
|
||||||
|
m, ca->sb.njournal_buckets);
|
||||||
|
l = m;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
if (!l--)
|
||||||
|
l = ca->sb.njournal_buckets - 1;
|
||||||
|
|
||||||
|
if (l == m)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (test_bit(l, bitmap))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!read_bucket(l))
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
seq = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < ca->sb.njournal_buckets; i++)
|
||||||
|
if (ja->seq[i] > seq) {
|
||||||
|
seq = ja->seq[i];
|
||||||
|
/*
|
||||||
|
* When journal_reclaim() goes to allocate for
|
||||||
|
* the first time, it'll use the bucket after
|
||||||
|
* ja->cur_idx
|
||||||
|
*/
|
||||||
|
ja->cur_idx = i;
|
||||||
|
ja->last_idx = ja->discard_idx = (i + 1) %
|
||||||
|
ca->sb.njournal_buckets;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
if (!list_empty(list))
|
if (!list_empty(list))
|
||||||
c->journal.seq = list_entry(list->prev,
|
c->journal.seq = list_entry(list->prev,
|
||||||
struct journal_replay,
|
struct journal_replay,
|
||||||
@ -342,12 +339,10 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
|
|||||||
|
|
||||||
static bool is_discard_enabled(struct cache_set *s)
|
static bool is_discard_enabled(struct cache_set *s)
|
||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca = s->cache;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
for_each_cache(ca, s, i)
|
if (ca->discard)
|
||||||
if (ca->discard)
|
return true;
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -633,9 +628,10 @@ static void do_journal_discard(struct cache *ca)
|
|||||||
static void journal_reclaim(struct cache_set *c)
|
static void journal_reclaim(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct bkey *k = &c->journal.key;
|
struct bkey *k = &c->journal.key;
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
uint64_t last_seq;
|
uint64_t last_seq;
|
||||||
unsigned int iter, n = 0;
|
unsigned int next;
|
||||||
|
struct journal_device *ja = &ca->journal;
|
||||||
atomic_t p __maybe_unused;
|
atomic_t p __maybe_unused;
|
||||||
|
|
||||||
atomic_long_inc(&c->reclaim);
|
atomic_long_inc(&c->reclaim);
|
||||||
@ -647,46 +643,31 @@ static void journal_reclaim(struct cache_set *c)
|
|||||||
|
|
||||||
/* Update last_idx */
|
/* Update last_idx */
|
||||||
|
|
||||||
for_each_cache(ca, c, iter) {
|
while (ja->last_idx != ja->cur_idx &&
|
||||||
struct journal_device *ja = &ca->journal;
|
ja->seq[ja->last_idx] < last_seq)
|
||||||
|
ja->last_idx = (ja->last_idx + 1) %
|
||||||
|
ca->sb.njournal_buckets;
|
||||||
|
|
||||||
while (ja->last_idx != ja->cur_idx &&
|
do_journal_discard(ca);
|
||||||
ja->seq[ja->last_idx] < last_seq)
|
|
||||||
ja->last_idx = (ja->last_idx + 1) %
|
|
||||||
ca->sb.njournal_buckets;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_cache(ca, c, iter)
|
|
||||||
do_journal_discard(ca);
|
|
||||||
|
|
||||||
if (c->journal.blocks_free)
|
if (c->journal.blocks_free)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
|
||||||
* Allocate:
|
/* No space available on this device */
|
||||||
* XXX: Sort by free journal space
|
if (next == ja->discard_idx)
|
||||||
*/
|
goto out;
|
||||||
|
|
||||||
for_each_cache(ca, c, iter) {
|
ja->cur_idx = next;
|
||||||
struct journal_device *ja = &ca->journal;
|
k->ptr[0] = MAKE_PTR(0,
|
||||||
unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
|
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
|
||||||
|
ca->sb.nr_this_dev);
|
||||||
|
atomic_long_inc(&c->reclaimed_journal_buckets);
|
||||||
|
|
||||||
/* No space available on this device */
|
bkey_init(k);
|
||||||
if (next == ja->discard_idx)
|
SET_KEY_PTRS(k, 1);
|
||||||
continue;
|
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
|
||||||
|
|
||||||
ja->cur_idx = next;
|
|
||||||
k->ptr[n++] = MAKE_PTR(0,
|
|
||||||
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
|
|
||||||
ca->sb.nr_this_dev);
|
|
||||||
atomic_long_inc(&c->reclaimed_journal_buckets);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (n) {
|
|
||||||
bkey_init(k);
|
|
||||||
SET_KEY_PTRS(k, n);
|
|
||||||
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
|
|
||||||
}
|
|
||||||
out:
|
out:
|
||||||
if (!journal_full(&c->journal))
|
if (!journal_full(&c->journal))
|
||||||
__closure_wake_up(&c->journal.wait);
|
__closure_wake_up(&c->journal.wait);
|
||||||
@ -750,7 +731,7 @@ static void journal_write_unlocked(struct closure *cl)
|
|||||||
__releases(c->journal.lock)
|
__releases(c->journal.lock)
|
||||||
{
|
{
|
||||||
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
|
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
struct journal_write *w = c->journal.cur;
|
struct journal_write *w = c->journal.cur;
|
||||||
struct bkey *k = &c->journal.key;
|
struct bkey *k = &c->journal.key;
|
||||||
unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
|
unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
|
||||||
@ -780,9 +761,7 @@ static void journal_write_unlocked(struct closure *cl)
|
|||||||
bkey_copy(&w->data->btree_root, &c->root->key);
|
bkey_copy(&w->data->btree_root, &c->root->key);
|
||||||
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
|
bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
|
||||||
w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
|
|
||||||
|
|
||||||
w->data->magic = jset_magic(&c->sb);
|
w->data->magic = jset_magic(&c->sb);
|
||||||
w->data->version = BCACHE_JSET_VERSION;
|
w->data->version = BCACHE_JSET_VERSION;
|
||||||
w->data->last_seq = last_seq(&c->journal);
|
w->data->last_seq = last_seq(&c->journal);
|
||||||
|
@ -196,50 +196,48 @@ static unsigned int bucket_heap_top(struct cache *ca)
|
|||||||
|
|
||||||
void bch_moving_gc(struct cache_set *c)
|
void bch_moving_gc(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
struct bucket *b;
|
struct bucket *b;
|
||||||
unsigned int i;
|
unsigned long sectors_to_move, reserve_sectors;
|
||||||
|
|
||||||
if (!c->copy_gc_enabled)
|
if (!c->copy_gc_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&c->bucket_lock);
|
mutex_lock(&c->bucket_lock);
|
||||||
|
|
||||||
for_each_cache(ca, c, i) {
|
sectors_to_move = 0;
|
||||||
unsigned long sectors_to_move = 0;
|
reserve_sectors = ca->sb.bucket_size *
|
||||||
unsigned long reserve_sectors = ca->sb.bucket_size *
|
|
||||||
fifo_used(&ca->free[RESERVE_MOVINGGC]);
|
fifo_used(&ca->free[RESERVE_MOVINGGC]);
|
||||||
|
|
||||||
ca->heap.used = 0;
|
ca->heap.used = 0;
|
||||||
|
|
||||||
for_each_bucket(b, ca) {
|
for_each_bucket(b, ca) {
|
||||||
if (GC_MARK(b) == GC_MARK_METADATA ||
|
if (GC_MARK(b) == GC_MARK_METADATA ||
|
||||||
!GC_SECTORS_USED(b) ||
|
!GC_SECTORS_USED(b) ||
|
||||||
GC_SECTORS_USED(b) == ca->sb.bucket_size ||
|
GC_SECTORS_USED(b) == ca->sb.bucket_size ||
|
||||||
atomic_read(&b->pin))
|
atomic_read(&b->pin))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!heap_full(&ca->heap)) {
|
if (!heap_full(&ca->heap)) {
|
||||||
sectors_to_move += GC_SECTORS_USED(b);
|
sectors_to_move += GC_SECTORS_USED(b);
|
||||||
heap_add(&ca->heap, b, bucket_cmp);
|
heap_add(&ca->heap, b, bucket_cmp);
|
||||||
} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
|
} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
|
||||||
sectors_to_move -= bucket_heap_top(ca);
|
sectors_to_move -= bucket_heap_top(ca);
|
||||||
sectors_to_move += GC_SECTORS_USED(b);
|
sectors_to_move += GC_SECTORS_USED(b);
|
||||||
|
|
||||||
ca->heap.data[0] = b;
|
ca->heap.data[0] = b;
|
||||||
heap_sift(&ca->heap, 0, bucket_cmp);
|
heap_sift(&ca->heap, 0, bucket_cmp);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
while (sectors_to_move > reserve_sectors) {
|
|
||||||
heap_pop(&ca->heap, b, bucket_cmp);
|
|
||||||
sectors_to_move -= GC_SECTORS_USED(b);
|
|
||||||
}
|
|
||||||
|
|
||||||
while (heap_pop(&ca->heap, b, bucket_cmp))
|
|
||||||
SET_GC_MOVE(b, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
while (sectors_to_move > reserve_sectors) {
|
||||||
|
heap_pop(&ca->heap, b, bucket_cmp);
|
||||||
|
sectors_to_move -= GC_SECTORS_USED(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (heap_pop(&ca->heap, b, bucket_cmp))
|
||||||
|
SET_GC_MOVE(b, 1);
|
||||||
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
|
|
||||||
c->moving_gc_keys.last_scanned = ZERO_KEY;
|
c->moving_gc_keys.last_scanned = ZERO_KEY;
|
||||||
|
@ -343,8 +343,9 @@ static void bcache_write_super_unlock(struct closure *cl)
|
|||||||
void bcache_write_super(struct cache_set *c)
|
void bcache_write_super(struct cache_set *c)
|
||||||
{
|
{
|
||||||
struct closure *cl = &c->sb_write;
|
struct closure *cl = &c->sb_write;
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
unsigned int i, version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
|
struct bio *bio = &ca->sb_bio;
|
||||||
|
unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
|
||||||
|
|
||||||
down(&c->sb_write_mutex);
|
down(&c->sb_write_mutex);
|
||||||
closure_init(cl, &c->cl);
|
closure_init(cl, &c->cl);
|
||||||
@ -354,23 +355,19 @@ void bcache_write_super(struct cache_set *c)
|
|||||||
if (c->sb.version > version)
|
if (c->sb.version > version)
|
||||||
version = c->sb.version;
|
version = c->sb.version;
|
||||||
|
|
||||||
for_each_cache(ca, c, i) {
|
ca->sb.version = version;
|
||||||
struct bio *bio = &ca->sb_bio;
|
ca->sb.seq = c->sb.seq;
|
||||||
|
ca->sb.last_mount = c->sb.last_mount;
|
||||||
|
|
||||||
ca->sb.version = version;
|
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
|
||||||
ca->sb.seq = c->sb.seq;
|
|
||||||
ca->sb.last_mount = c->sb.last_mount;
|
|
||||||
|
|
||||||
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
|
bio_init(bio, ca->sb_bv, 1);
|
||||||
|
bio_set_dev(bio, ca->bdev);
|
||||||
|
bio->bi_end_io = write_super_endio;
|
||||||
|
bio->bi_private = ca;
|
||||||
|
|
||||||
bio_init(bio, ca->sb_bv, 1);
|
closure_get(cl);
|
||||||
bio_set_dev(bio, ca->bdev);
|
__write_super(&ca->sb, ca->sb_disk, bio);
|
||||||
bio->bi_end_io = write_super_endio;
|
|
||||||
bio->bi_private = ca;
|
|
||||||
|
|
||||||
closure_get(cl);
|
|
||||||
__write_super(&ca->sb, ca->sb_disk, bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
closure_return_with_destructor(cl, bcache_write_super_unlock);
|
closure_return_with_destructor(cl, bcache_write_super_unlock);
|
||||||
}
|
}
|
||||||
@ -772,26 +769,22 @@ static void bcache_device_unlink(struct bcache_device *d)
|
|||||||
lockdep_assert_held(&bch_register_lock);
|
lockdep_assert_held(&bch_register_lock);
|
||||||
|
|
||||||
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
|
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
|
||||||
unsigned int i;
|
struct cache *ca = d->c->cache;
|
||||||
struct cache *ca;
|
|
||||||
|
|
||||||
sysfs_remove_link(&d->c->kobj, d->name);
|
sysfs_remove_link(&d->c->kobj, d->name);
|
||||||
sysfs_remove_link(&d->kobj, "cache");
|
sysfs_remove_link(&d->kobj, "cache");
|
||||||
|
|
||||||
for_each_cache(ca, d->c, i)
|
bd_unlink_disk_holder(ca->bdev, d->disk);
|
||||||
bd_unlink_disk_holder(ca->bdev, d->disk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
|
static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
|
||||||
const char *name)
|
const char *name)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
struct cache *ca = c->cache;
|
||||||
struct cache *ca;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
for_each_cache(ca, d->c, i)
|
bd_link_disk_holder(ca->bdev, d->disk);
|
||||||
bd_link_disk_holder(ca->bdev, d->disk);
|
|
||||||
|
|
||||||
snprintf(d->name, BCACHEDEVNAME_SIZE,
|
snprintf(d->name, BCACHEDEVNAME_SIZE,
|
||||||
"%s%u", name, d->id);
|
"%s%u", name, d->id);
|
||||||
@ -1662,7 +1655,6 @@ static void cache_set_free(struct closure *cl)
|
|||||||
{
|
{
|
||||||
struct cache_set *c = container_of(cl, struct cache_set, cl);
|
struct cache_set *c = container_of(cl, struct cache_set, cl);
|
||||||
struct cache *ca;
|
struct cache *ca;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
debugfs_remove(c->debug);
|
debugfs_remove(c->debug);
|
||||||
|
|
||||||
@ -1671,12 +1663,12 @@ static void cache_set_free(struct closure *cl)
|
|||||||
bch_journal_free(c);
|
bch_journal_free(c);
|
||||||
|
|
||||||
mutex_lock(&bch_register_lock);
|
mutex_lock(&bch_register_lock);
|
||||||
for_each_cache(ca, c, i)
|
ca = c->cache;
|
||||||
if (ca) {
|
if (ca) {
|
||||||
ca->set = NULL;
|
ca->set = NULL;
|
||||||
c->cache = NULL;
|
c->cache = NULL;
|
||||||
kobject_put(&ca->kobj);
|
kobject_put(&ca->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
bch_bset_sort_state_free(&c->sort);
|
bch_bset_sort_state_free(&c->sort);
|
||||||
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
|
free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->sb)));
|
||||||
@ -1702,9 +1694,8 @@ static void cache_set_free(struct closure *cl)
|
|||||||
static void cache_set_flush(struct closure *cl)
|
static void cache_set_flush(struct closure *cl)
|
||||||
{
|
{
|
||||||
struct cache_set *c = container_of(cl, struct cache_set, caching);
|
struct cache_set *c = container_of(cl, struct cache_set, caching);
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
struct btree *b;
|
struct btree *b;
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
bch_cache_accounting_destroy(&c->accounting);
|
bch_cache_accounting_destroy(&c->accounting);
|
||||||
|
|
||||||
@ -1729,9 +1720,8 @@ static void cache_set_flush(struct closure *cl)
|
|||||||
mutex_unlock(&b->write_lock);
|
mutex_unlock(&b->write_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
if (ca->alloc_thread)
|
||||||
if (ca->alloc_thread)
|
kthread_stop(ca->alloc_thread);
|
||||||
kthread_stop(ca->alloc_thread);
|
|
||||||
|
|
||||||
if (c->journal.cur) {
|
if (c->journal.cur) {
|
||||||
cancel_delayed_work_sync(&c->journal.work);
|
cancel_delayed_work_sync(&c->journal.work);
|
||||||
@ -1972,16 +1962,14 @@ static int run_cache_set(struct cache_set *c)
|
|||||||
{
|
{
|
||||||
const char *err = "cannot allocate memory";
|
const char *err = "cannot allocate memory";
|
||||||
struct cached_dev *dc, *t;
|
struct cached_dev *dc, *t;
|
||||||
struct cache *ca;
|
struct cache *ca = c->cache;
|
||||||
struct closure cl;
|
struct closure cl;
|
||||||
unsigned int i;
|
|
||||||
LIST_HEAD(journal);
|
LIST_HEAD(journal);
|
||||||
struct journal_replay *l;
|
struct journal_replay *l;
|
||||||
|
|
||||||
closure_init_stack(&cl);
|
closure_init_stack(&cl);
|
||||||
|
|
||||||
for_each_cache(ca, c, i)
|
c->nbuckets = ca->sb.nbuckets;
|
||||||
c->nbuckets += ca->sb.nbuckets;
|
|
||||||
set_gc_sectors(c);
|
set_gc_sectors(c);
|
||||||
|
|
||||||
if (CACHE_SYNC(&c->sb)) {
|
if (CACHE_SYNC(&c->sb)) {
|
||||||
@ -2001,10 +1989,8 @@ static int run_cache_set(struct cache_set *c)
|
|||||||
j = &list_entry(journal.prev, struct journal_replay, list)->j;
|
j = &list_entry(journal.prev, struct journal_replay, list)->j;
|
||||||
|
|
||||||
err = "IO error reading priorities";
|
err = "IO error reading priorities";
|
||||||
for_each_cache(ca, c, i) {
|
if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
|
||||||
if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
|
goto err;
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If prio_read() fails it'll call cache_set_error and we'll
|
* If prio_read() fails it'll call cache_set_error and we'll
|
||||||
@ -2048,9 +2034,8 @@ static int run_cache_set(struct cache_set *c)
|
|||||||
bch_journal_next(&c->journal);
|
bch_journal_next(&c->journal);
|
||||||
|
|
||||||
err = "error starting allocator thread";
|
err = "error starting allocator thread";
|
||||||
for_each_cache(ca, c, i)
|
if (bch_cache_allocator_start(ca))
|
||||||
if (bch_cache_allocator_start(ca))
|
goto err;
|
||||||
goto err;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First place it's safe to allocate: btree_check() and
|
* First place it's safe to allocate: btree_check() and
|
||||||
@ -2069,28 +2054,23 @@ static int run_cache_set(struct cache_set *c)
|
|||||||
if (bch_journal_replay(c, &journal))
|
if (bch_journal_replay(c, &journal))
|
||||||
goto err;
|
goto err;
|
||||||
} else {
|
} else {
|
||||||
|
unsigned int j;
|
||||||
|
|
||||||
pr_notice("invalidating existing data\n");
|
pr_notice("invalidating existing data\n");
|
||||||
|
ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
|
||||||
|
2, SB_JOURNAL_BUCKETS);
|
||||||
|
|
||||||
for_each_cache(ca, c, i) {
|
for (j = 0; j < ca->sb.keys; j++)
|
||||||
unsigned int j;
|
ca->sb.d[j] = ca->sb.first_bucket + j;
|
||||||
|
|
||||||
ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
|
|
||||||
2, SB_JOURNAL_BUCKETS);
|
|
||||||
|
|
||||||
for (j = 0; j < ca->sb.keys; j++)
|
|
||||||
ca->sb.d[j] = ca->sb.first_bucket + j;
|
|
||||||
}
|
|
||||||
|
|
||||||
bch_initial_gc_finish(c);
|
bch_initial_gc_finish(c);
|
||||||
|
|
||||||
err = "error starting allocator thread";
|
err = "error starting allocator thread";
|
||||||
for_each_cache(ca, c, i)
|
if (bch_cache_allocator_start(ca))
|
||||||
if (bch_cache_allocator_start(ca))
|
goto err;
|
||||||
goto err;
|
|
||||||
|
|
||||||
mutex_lock(&c->bucket_lock);
|
mutex_lock(&c->bucket_lock);
|
||||||
for_each_cache(ca, c, i)
|
bch_prio_write(ca, true);
|
||||||
bch_prio_write(ca, true);
|
|
||||||
mutex_unlock(&c->bucket_lock);
|
mutex_unlock(&c->bucket_lock);
|
||||||
|
|
||||||
err = "cannot allocate new UUID bucket";
|
err = "cannot allocate new UUID bucket";
|
||||||
@ -2465,13 +2445,14 @@ static bool bch_is_open_backing(struct block_device *bdev)
|
|||||||
static bool bch_is_open_cache(struct block_device *bdev)
|
static bool bch_is_open_cache(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
struct cache_set *c, *tc;
|
struct cache_set *c, *tc;
|
||||||
struct cache *ca;
|
|
||||||
unsigned int i;
|
|
||||||
|
|
||||||
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
|
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
|
||||||
for_each_cache(ca, c, i)
|
struct cache *ca = c->cache;
|
||||||
if (ca->bdev == bdev)
|
|
||||||
return true;
|
if (ca->bdev == bdev)
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user