Merge branch 'for-linus' of git://git.kernel.dk/linux-block
* 'for-linus' of git://git.kernel.dk/linux-block: drbd: we should write meta data updates with FLUSH FUA drbd: fix limit define, we support 1 PiByte now drbd: when receive times out on meta socket, also check last receive time on data socket drbd: account bitmap IO during resync as resync-(related-)-io drbd: don't cond_resched_lock with IRQs disabled drbd: add missing spinlock to bitmap receive drbd: Use the correct max_bio_size when creating resync requests cfq-iosched: make code consistent cfq-iosched: fix a rcu warning
This commit is contained in:
commit
2a9d6df425
@ -2773,11 +2773,14 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
|
||||
smp_wmb();
|
||||
cic->key = cfqd_dead_key(cfqd);
|
||||
|
||||
rcu_read_lock();
|
||||
if (rcu_dereference(ioc->ioc_data) == cic) {
|
||||
rcu_read_unlock();
|
||||
spin_lock(&ioc->lock);
|
||||
rcu_assign_pointer(ioc->ioc_data, NULL);
|
||||
spin_unlock(&ioc->lock);
|
||||
}
|
||||
} else
|
||||
rcu_read_unlock();
|
||||
|
||||
if (cic->cfqq[BLK_RW_ASYNC]) {
|
||||
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
|
||||
@ -3084,7 +3087,8 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
|
||||
|
||||
spin_lock_irqsave(&ioc->lock, flags);
|
||||
|
||||
BUG_ON(ioc->ioc_data == cic);
|
||||
BUG_ON(rcu_dereference_check(ioc->ioc_data,
|
||||
lockdep_is_held(&ioc->lock)) == cic);
|
||||
|
||||
radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
|
||||
hlist_del_rcu(&cic->cic_list);
|
||||
|
@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
|
||||
md_io.error = 0;
|
||||
|
||||
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
|
||||
rw |= REQ_FUA;
|
||||
rw |= REQ_FUA | REQ_FLUSH;
|
||||
rw |= REQ_SYNC;
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, 1);
|
||||
|
@ -112,9 +112,6 @@ struct drbd_bitmap {
|
||||
struct task_struct *bm_task;
|
||||
};
|
||||
|
||||
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
|
||||
unsigned long e, int val, const enum km_type km);
|
||||
|
||||
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
|
||||
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
|
||||
{
|
||||
@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
|
||||
bio_endio(bio, -EIO);
|
||||
} else {
|
||||
submit_bio(rw, bio);
|
||||
/* this should not count as user activity and cause the
|
||||
* resync to throttle -- see drbd_rs_should_slow_down(). */
|
||||
atomic_add(len >> 9, &mdev->rs_sect_ev);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
|
||||
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
|
||||
* Must hold bitmap lock already. */
|
||||
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
|
||||
unsigned long e, int val, const enum km_type km)
|
||||
unsigned long e, int val)
|
||||
{
|
||||
struct drbd_bitmap *b = mdev->bitmap;
|
||||
unsigned long *p_addr = NULL;
|
||||
@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
|
||||
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
|
||||
if (page_nr != last_page_nr) {
|
||||
if (p_addr)
|
||||
__bm_unmap(p_addr, km);
|
||||
__bm_unmap(p_addr, KM_IRQ1);
|
||||
if (c < 0)
|
||||
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
|
||||
else if (c > 0)
|
||||
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
|
||||
changed_total += c;
|
||||
c = 0;
|
||||
p_addr = __bm_map_pidx(b, page_nr, km);
|
||||
p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
|
||||
last_page_nr = page_nr;
|
||||
}
|
||||
if (val)
|
||||
@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
|
||||
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
|
||||
}
|
||||
if (p_addr)
|
||||
__bm_unmap(p_addr, km);
|
||||
__bm_unmap(p_addr, KM_IRQ1);
|
||||
if (c < 0)
|
||||
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
|
||||
else if (c > 0)
|
||||
@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
|
||||
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
|
||||
bm_print_lock_info(mdev);
|
||||
|
||||
c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
|
||||
c = __bm_change_bits_to(mdev, s, e, val);
|
||||
|
||||
spin_unlock_irqrestore(&b->bm_lock, flags);
|
||||
return c;
|
||||
@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
|
||||
{
|
||||
int i;
|
||||
int bits;
|
||||
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
|
||||
unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
|
||||
for (i = first_word; i < last_word; i++) {
|
||||
bits = hweight_long(paddr[i]);
|
||||
paddr[i] = ~0UL;
|
||||
b->bm_set += BITS_PER_LONG - bits;
|
||||
}
|
||||
kunmap_atomic(paddr, KM_USER0);
|
||||
kunmap_atomic(paddr, KM_IRQ1);
|
||||
}
|
||||
|
||||
/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
|
||||
/* Same thing as drbd_bm_set_bits,
|
||||
* but more efficient for a large bit range.
|
||||
* You must first drbd_bm_lock().
|
||||
* Can be called to set the whole bitmap in one go.
|
||||
* Sets bits from s to e _inclusive_. */
|
||||
@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
||||
* Do not use memset, because we must account for changes,
|
||||
* so we need to loop over the words with hweight() anyways.
|
||||
*/
|
||||
struct drbd_bitmap *b = mdev->bitmap;
|
||||
unsigned long sl = ALIGN(s,BITS_PER_LONG);
|
||||
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
|
||||
int first_page;
|
||||
@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
||||
|
||||
if (e - s <= 3*BITS_PER_LONG) {
|
||||
/* don't bother; el and sl may even be wrong. */
|
||||
__bm_change_bits_to(mdev, s, e, 1, KM_USER0);
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
__bm_change_bits_to(mdev, s, e, 1);
|
||||
spin_unlock_irq(&b->bm_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* difference is large enough that we can trust sl and el */
|
||||
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
|
||||
/* bits filling the current long */
|
||||
if (sl)
|
||||
__bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
|
||||
__bm_change_bits_to(mdev, s, sl-1, 1);
|
||||
|
||||
first_page = sl >> (3 + PAGE_SHIFT);
|
||||
last_page = el >> (3 + PAGE_SHIFT);
|
||||
@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
||||
/* first and full pages, unless first page == last page */
|
||||
for (page_nr = first_page; page_nr < last_page; page_nr++) {
|
||||
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
|
||||
spin_unlock_irq(&b->bm_lock);
|
||||
cond_resched();
|
||||
first_word = 0;
|
||||
spin_lock_irq(&b->bm_lock);
|
||||
}
|
||||
|
||||
/* last page (respectively only page, for first page == last page) */
|
||||
@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
|
||||
* it would trigger an assert in __bm_change_bits_to()
|
||||
*/
|
||||
if (el <= e)
|
||||
__bm_change_bits_to(mdev, el, e, 1, KM_USER0);
|
||||
__bm_change_bits_to(mdev, el, e, 1);
|
||||
spin_unlock_irq(&b->bm_lock);
|
||||
}
|
||||
|
||||
/* returns bit state
|
||||
|
@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
|
||||
dev_err(DEV, "meta connection shut down by peer.\n");
|
||||
goto reconnect;
|
||||
} else if (rv == -EAGAIN) {
|
||||
/* If the data socket received something meanwhile,
|
||||
* that is good enough: peer is still alive. */
|
||||
if (time_after(mdev->last_received,
|
||||
jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
|
||||
continue;
|
||||
if (ping_timeout_active) {
|
||||
dev_err(DEV, "PingAck did not arrive in time.\n");
|
||||
goto reconnect;
|
||||
@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
|
||||
goto reconnect;
|
||||
}
|
||||
if (received == expect) {
|
||||
mdev->last_received = jiffies;
|
||||
D_ASSERT(cmd != NULL);
|
||||
if (!cmd->process(mdev, h))
|
||||
goto reconnect;
|
||||
|
@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* starting with drbd 8.3.8, we can handle multi-bio EEs,
|
||||
* if it should be necessary */
|
||||
max_bio_size =
|
||||
mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
|
||||
mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
|
||||
|
||||
max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
|
||||
number = drbd_rs_number_requests(mdev);
|
||||
if (number == 0)
|
||||
goto requeue;
|
||||
|
@ -117,10 +117,10 @@
|
||||
/* drbdsetup XY resize -d Z
|
||||
* you are free to reduce the device size to nothing, if you want to.
|
||||
* the upper limit with 64bit kernel, enough ram and flexible meta data
|
||||
* is 16 TB, currently. */
|
||||
* is 1 PiB, currently. */
|
||||
/* DRBD_MAX_SECTORS */
|
||||
#define DRBD_DISK_SIZE_SECT_MIN 0
|
||||
#define DRBD_DISK_SIZE_SECT_MAX (16 * (2LLU << 30))
|
||||
#define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40))
|
||||
#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
|
||||
|
||||
#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
|
||||
|
Loading…
Reference in New Issue
Block a user