block: switch bios to blk_status_t
Replace bi_error with a new bi_status to allow for a clear conversion. Note that device mapper overloaded bi_error with a private value, which we'll have to keep arround at least for now and thus propagate to a proper blk_status_t value. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
committed by
Jens Axboe
parent
fc17b6534e
commit
4e4cbee93d
@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
|
||||
bio_end_io_t *end_io;
|
||||
void *private;
|
||||
struct btrfs_fs_info *info;
|
||||
int error;
|
||||
blk_status_t status;
|
||||
enum btrfs_wq_endio_type metadata;
|
||||
struct list_head list;
|
||||
struct btrfs_work work;
|
||||
@ -131,7 +131,7 @@ struct async_submit_bio {
|
||||
*/
|
||||
u64 bio_offset;
|
||||
struct btrfs_work work;
|
||||
int error;
|
||||
blk_status_t status;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
|
||||
btrfs_work_func_t func;
|
||||
|
||||
fs_info = end_io_wq->info;
|
||||
end_io_wq->error = bio->bi_error;
|
||||
end_io_wq->status = bio->bi_status;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE) {
|
||||
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
|
||||
@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
|
||||
btrfs_queue_work(wq, &end_io_wq->work);
|
||||
}
|
||||
|
||||
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
|
||||
enum btrfs_wq_endio_type metadata)
|
||||
{
|
||||
struct btrfs_end_io_wq *end_io_wq;
|
||||
|
||||
end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
|
||||
if (!end_io_wq)
|
||||
return -ENOMEM;
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
end_io_wq->private = bio->bi_private;
|
||||
end_io_wq->end_io = bio->bi_end_io;
|
||||
end_io_wq->info = info;
|
||||
end_io_wq->error = 0;
|
||||
end_io_wq->status = 0;
|
||||
end_io_wq->bio = bio;
|
||||
end_io_wq->metadata = metadata;
|
||||
|
||||
@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
|
||||
static void run_one_async_start(struct btrfs_work *work)
|
||||
{
|
||||
struct async_submit_bio *async;
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
async = container_of(work, struct async_submit_bio, work);
|
||||
ret = async->submit_bio_start(async->inode, async->bio,
|
||||
async->mirror_num, async->bio_flags,
|
||||
async->bio_offset);
|
||||
if (ret)
|
||||
async->error = ret;
|
||||
async->status = ret;
|
||||
}
|
||||
|
||||
static void run_one_async_done(struct btrfs_work *work)
|
||||
@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
|
||||
wake_up(&fs_info->async_submit_wait);
|
||||
|
||||
/* If an error occurred we just want to clean up the bio and move on */
|
||||
if (async->error) {
|
||||
async->bio->bi_error = async->error;
|
||||
if (async->status) {
|
||||
async->bio->bi_status = async->status;
|
||||
bio_endio(async->bio);
|
||||
return;
|
||||
}
|
||||
@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
|
||||
kfree(async);
|
||||
}
|
||||
|
||||
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags,
|
||||
u64 bio_offset,
|
||||
extent_submit_bio_hook_t *submit_bio_start,
|
||||
extent_submit_bio_hook_t *submit_bio_done)
|
||||
blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
|
||||
struct inode *inode, struct bio *bio, int mirror_num,
|
||||
unsigned long bio_flags, u64 bio_offset,
|
||||
extent_submit_bio_hook_t *submit_bio_start,
|
||||
extent_submit_bio_hook_t *submit_bio_done)
|
||||
{
|
||||
struct async_submit_bio *async;
|
||||
|
||||
async = kmalloc(sizeof(*async), GFP_NOFS);
|
||||
if (!async)
|
||||
return -ENOMEM;
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
async->inode = inode;
|
||||
async->bio = bio;
|
||||
@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
async->bio_flags = bio_flags;
|
||||
async->bio_offset = bio_offset;
|
||||
|
||||
async->error = 0;
|
||||
async->status = 0;
|
||||
|
||||
atomic_inc(&fs_info->nr_async_submits);
|
||||
|
||||
@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btree_csum_one_bio(struct bio *bio)
|
||||
static blk_status_t btree_csum_one_bio(struct bio *bio)
|
||||
{
|
||||
struct bio_vec *bvec;
|
||||
struct btrfs_root *root;
|
||||
@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return errno_to_blk_status(ret);
|
||||
}
|
||||
|
||||
static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
static blk_status_t __btree_submit_bio_start(struct inode *inode,
|
||||
struct bio *bio, int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
/*
|
||||
* when we're called for a write, we're already in the async
|
||||
@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
|
||||
return btree_csum_one_bio(bio);
|
||||
}
|
||||
|
||||
static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
static blk_status_t __btree_submit_bio_done(struct inode *inode,
|
||||
struct bio *bio, int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
/*
|
||||
* when we're called for a write, we're already in the async
|
||||
@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
|
||||
*/
|
||||
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
|
||||
if (ret) {
|
||||
bio->bi_error = ret;
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
}
|
||||
return ret;
|
||||
@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||
static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||
int mirror_num, unsigned long bio_flags,
|
||||
u64 bio_offset)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
int async = check_async_write(bio_flags);
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_WRITE) {
|
||||
/*
|
||||
@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
|
||||
return 0;
|
||||
|
||||
out_w_error:
|
||||
bio->bi_error = ret;
|
||||
bio->bi_status = ret;
|
||||
bio_endio(bio);
|
||||
return ret;
|
||||
}
|
||||
@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
|
||||
end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
|
||||
bio = end_io_wq->bio;
|
||||
|
||||
bio->bi_error = end_io_wq->error;
|
||||
bio->bi_status = end_io_wq->status;
|
||||
bio->bi_private = end_io_wq->private;
|
||||
bio->bi_end_io = end_io_wq->end_io;
|
||||
kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
|
||||
@ -3495,11 +3494,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
|
||||
* any device where the flush fails with eopnotsupp are flagged as not-barrier
|
||||
* capable
|
||||
*/
|
||||
static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
struct bio *bio;
|
||||
int ret = 0;
|
||||
blk_status_t ret = 0;
|
||||
|
||||
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
return 0;
|
||||
@ -3511,8 +3510,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
|
||||
wait_for_completion(&device->flush_wait);
|
||||
|
||||
if (bio->bi_error) {
|
||||
ret = bio->bi_error;
|
||||
if (bio->bi_status) {
|
||||
ret = bio->bi_status;
|
||||
btrfs_dev_stat_inc_and_print(device,
|
||||
BTRFS_DEV_STAT_FLUSH_ERRS);
|
||||
}
|
||||
@ -3531,7 +3530,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
|
||||
device->flush_bio = NULL;
|
||||
bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
bio->bi_bdev = device->bdev;
|
||||
@ -3556,7 +3555,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
|
||||
struct btrfs_device *dev;
|
||||
int errors_send = 0;
|
||||
int errors_wait = 0;
|
||||
int ret;
|
||||
blk_status_t ret;
|
||||
|
||||
/* send down all the barriers */
|
||||
head = &info->fs_devices->devices;
|
||||
|
Reference in New Issue
Block a user