kill BH_Ordered flag

Instead of abusing a buffer_head flag just add a variant of
sync_dirty_buffer which allows passing the exact type of write
flag required.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Christoph Hellwig 2010-08-11 17:05:45 +02:00 committed by Al Viro
parent dad5eb6daa
commit 87e99511ea
5 changed files with 63 additions and 73 deletions

View File

@ -2911,13 +2911,6 @@ int submit_bh(int rw, struct buffer_head * bh)
BUG_ON(buffer_delay(bh)); BUG_ON(buffer_delay(bh));
BUG_ON(buffer_unwritten(bh)); BUG_ON(buffer_unwritten(bh));
/*
* Mask in barrier bit for a write (could be either a WRITE or a
* WRITE_SYNC
*/
if (buffer_ordered(bh) && (rw & WRITE))
rw |= WRITE_BARRIER;
/* /*
* Only clear out a write error when rewriting * Only clear out a write error when rewriting
*/ */
@ -3021,7 +3014,7 @@ EXPORT_SYMBOL(ll_rw_block);
* and then start new I/O and then wait upon it. The caller must have a ref on * and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head. * the buffer_head.
*/ */
int sync_dirty_buffer(struct buffer_head *bh) int __sync_dirty_buffer(struct buffer_head *bh, int rw)
{ {
int ret = 0; int ret = 0;
@ -3030,7 +3023,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
get_bh(bh); get_bh(bh);
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
ret = submit_bh(WRITE_SYNC, bh); ret = submit_bh(rw, bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (buffer_eopnotsupp(bh)) { if (buffer_eopnotsupp(bh)) {
clear_buffer_eopnotsupp(bh); clear_buffer_eopnotsupp(bh);
@ -3043,6 +3036,12 @@ int sync_dirty_buffer(struct buffer_head *bh)
} }
return ret; return ret;
} }
EXPORT_SYMBOL(__sync_dirty_buffer);
int sync_dirty_buffer(struct buffer_head *bh)
{
return __sync_dirty_buffer(bh, WRITE_SYNC);
}
EXPORT_SYMBOL(sync_dirty_buffer); EXPORT_SYMBOL(sync_dirty_buffer);
/* /*

View File

@ -119,7 +119,6 @@ static int journal_write_commit_record(journal_t *journal,
struct buffer_head *bh; struct buffer_head *bh;
journal_header_t *header; journal_header_t *header;
int ret; int ret;
int barrier_done = 0;
if (is_journal_aborted(journal)) if (is_journal_aborted(journal))
return 0; return 0;
@ -137,34 +136,36 @@ static int journal_write_commit_record(journal_t *journal,
JBUFFER_TRACE(descriptor, "write commit block"); JBUFFER_TRACE(descriptor, "write commit block");
set_buffer_dirty(bh); set_buffer_dirty(bh);
if (journal->j_flags & JFS_BARRIER) { if (journal->j_flags & JFS_BARRIER) {
set_buffer_ordered(bh); ret = __sync_dirty_buffer(bh, WRITE_SYNC | WRITE_BARRIER);
barrier_done = 1;
}
ret = sync_dirty_buffer(bh);
if (barrier_done)
clear_buffer_ordered(bh);
/* is it possible for another commit to fail at roughly
* the same time as this one? If so, we don't want to
* trust the barrier flag in the super, but instead want
* to remember if we sent a barrier request
*/
if (ret == -EOPNOTSUPP && barrier_done) {
char b[BDEVNAME_SIZE];
printk(KERN_WARNING /*
"JBD: barrier-based sync failed on %s - " * Is it possible for another commit to fail at roughly
"disabling barriers\n", * the same time as this one? If so, we don't want to
bdevname(journal->j_dev, b)); * trust the barrier flag in the super, but instead want
spin_lock(&journal->j_state_lock); * to remember if we sent a barrier request
journal->j_flags &= ~JFS_BARRIER; */
spin_unlock(&journal->j_state_lock); if (ret == -EOPNOTSUPP) {
char b[BDEVNAME_SIZE];
/* And try again, without the barrier */ printk(KERN_WARNING
set_buffer_uptodate(bh); "JBD: barrier-based sync failed on %s - "
set_buffer_dirty(bh); "disabling barriers\n",
bdevname(journal->j_dev, b));
spin_lock(&journal->j_state_lock);
journal->j_flags &= ~JFS_BARRIER;
spin_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
ret = sync_dirty_buffer(bh);
}
} else {
ret = sync_dirty_buffer(bh); ret = sync_dirty_buffer(bh);
} }
put_bh(bh); /* One for getblk() */ put_bh(bh); /* One for getblk() */
journal_put_journal_head(descriptor); journal_put_journal_head(descriptor);

View File

@ -101,7 +101,6 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp; struct commit_header *tmp;
struct buffer_head *bh; struct buffer_head *bh;
int ret; int ret;
int barrier_done = 0;
struct timespec now = current_kernel_time(); struct timespec now = current_kernel_time();
if (is_journal_aborted(journal)) if (is_journal_aborted(journal))
@ -136,30 +135,22 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER && if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal, !JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
set_buffer_ordered(bh); ret = submit_bh(WRITE_SYNC_PLUG | WRITE_BARRIER, bh);
barrier_done = 1; if (ret == -EOPNOTSUPP) {
} printk(KERN_WARNING
ret = submit_bh(WRITE_SYNC_PLUG, bh); "JBD2: Disabling barriers on %s, "
if (barrier_done) "not supported by device\n", journal->j_devname);
clear_buffer_ordered(bh); write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
write_unlock(&journal->j_state_lock);
/* is it possible for another commit to fail at roughly /* And try again, without the barrier */
* the same time as this one? If so, we don't want to lock_buffer(bh);
* trust the barrier flag in the super, but instead want set_buffer_uptodate(bh);
* to remember if we sent a barrier request clear_buffer_dirty(bh);
*/ ret = submit_bh(WRITE_SYNC_PLUG, bh);
if (ret == -EOPNOTSUPP && barrier_done) { }
printk(KERN_WARNING } else {
"JBD2: Disabling barriers on %s, "
"not supported by device\n", journal->j_devname);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_BARRIER;
write_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
lock_buffer(bh);
set_buffer_uptodate(bh);
clear_buffer_dirty(bh);
ret = submit_bh(WRITE_SYNC_PLUG, bh); ret = submit_bh(WRITE_SYNC_PLUG, bh);
} }
*cbh = bh; *cbh = bh;

View File

@ -175,24 +175,24 @@ static int nilfs_sync_super(struct nilfs_sb_info *sbi, int flag)
{ {
struct the_nilfs *nilfs = sbi->s_nilfs; struct the_nilfs *nilfs = sbi->s_nilfs;
int err; int err;
int barrier_done = 0;
if (nilfs_test_opt(sbi, BARRIER)) {
set_buffer_ordered(nilfs->ns_sbh[0]);
barrier_done = 1;
}
retry: retry:
set_buffer_dirty(nilfs->ns_sbh[0]); set_buffer_dirty(nilfs->ns_sbh[0]);
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
if (err == -EOPNOTSUPP && barrier_done) { if (nilfs_test_opt(sbi, BARRIER)) {
nilfs_warning(sbi->s_super, __func__, err = __sync_dirty_buffer(nilfs->ns_sbh[0],
"barrier-based sync failed. " WRITE_SYNC | WRITE_BARRIER);
"disabling barriers\n"); if (err == -EOPNOTSUPP) {
nilfs_clear_opt(sbi, BARRIER); nilfs_warning(sbi->s_super, __func__,
barrier_done = 0; "barrier-based sync failed. "
clear_buffer_ordered(nilfs->ns_sbh[0]); "disabling barriers\n");
goto retry; nilfs_clear_opt(sbi, BARRIER);
goto retry;
}
} else {
err = sync_dirty_buffer(nilfs->ns_sbh[0]);
} }
if (unlikely(err)) { if (unlikely(err)) {
printk(KERN_ERR printk(KERN_ERR
"NILFS: unable to write superblock (err=%d)\n", err); "NILFS: unable to write superblock (err=%d)\n", err);

View File

@ -32,7 +32,6 @@ enum bh_state_bits {
BH_Delay, /* Buffer is not yet allocated on disk */ BH_Delay, /* Buffer is not yet allocated on disk */
BH_Boundary, /* Block is followed by a discontiguity */ BH_Boundary, /* Block is followed by a discontiguity */
BH_Write_EIO, /* I/O error on write */ BH_Write_EIO, /* I/O error on write */
BH_Ordered, /* ordered write */
BH_Eopnotsupp, /* operation not supported (barrier) */ BH_Eopnotsupp, /* operation not supported (barrier) */
BH_Unwritten, /* Buffer is allocated on disk but not written */ BH_Unwritten, /* Buffer is allocated on disk but not written */
BH_Quiet, /* Buffer Error Prinks to be quiet */ BH_Quiet, /* Buffer Error Prinks to be quiet */
@ -125,7 +124,6 @@ BUFFER_FNS(Async_Write, async_write)
BUFFER_FNS(Delay, delay) BUFFER_FNS(Delay, delay)
BUFFER_FNS(Boundary, boundary) BUFFER_FNS(Boundary, boundary)
BUFFER_FNS(Write_EIO, write_io_error) BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Ordered, ordered)
BUFFER_FNS(Eopnotsupp, eopnotsupp) BUFFER_FNS(Eopnotsupp, eopnotsupp)
BUFFER_FNS(Unwritten, unwritten) BUFFER_FNS(Unwritten, unwritten)
@ -183,6 +181,7 @@ void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh); void __lock_buffer(struct buffer_head *bh);
void ll_rw_block(int, int, struct buffer_head * bh[]); void ll_rw_block(int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh); int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int rw);
int submit_bh(int, struct buffer_head *); int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev, void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize); sector_t bblock, unsigned blocksize);