bio: fix bio_kmalloc()
Impact: fix bio_kmalloc() and its destruction path bio_kmalloc() was broken in two ways. * bvec_alloc_bs() first allocates bvec using kmalloc() and then ignores it and allocates again like non-kmalloc bvecs. * bio_kmalloc_destructor() didn't check for and free bio integrity data. This patch fixes the above problems. kmalloc patch is separated out from bio_alloc_bioset() and allocates the requested number of bvecs as inline bvecs. * bio_alloc_bioset() no longer takes NULL @bs. None other than bio_kmalloc() used it and outside users can't know how it was allocated anyway. * Define and use BIO_POOL_NONE so that pool index check in bvec_free_bs() triggers if inline or kmalloc allocated bvec gets there. * Relocate destructors on top of each allocation function so that how they're used is more clear. Jens Axboe suggested allocating bvecs inline. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
This commit is contained in:
parent
cd0aca2d55
commit
451a9ebf65
116
fs/bio.c
116
fs/bio.c
@ -174,14 +174,6 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
|
|||||||
{
|
{
|
||||||
struct bio_vec *bvl;
|
struct bio_vec *bvl;
|
||||||
|
|
||||||
/*
|
|
||||||
* If 'bs' is given, lookup the pool and do the mempool alloc.
|
|
||||||
* If not, this is a bio_kmalloc() allocation and just do a
|
|
||||||
* kzalloc() for the exact number of vecs right away.
|
|
||||||
*/
|
|
||||||
if (!bs)
|
|
||||||
bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* see comment near bvec_array define!
|
* see comment near bvec_array define!
|
||||||
*/
|
*/
|
||||||
@ -260,21 +252,6 @@ void bio_free(struct bio *bio, struct bio_set *bs)
|
|||||||
mempool_free(p, bs->bio_pool);
|
mempool_free(p, bs->bio_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* default destructor for a bio allocated with bio_alloc_bioset()
|
|
||||||
*/
|
|
||||||
static void bio_fs_destructor(struct bio *bio)
|
|
||||||
{
|
|
||||||
bio_free(bio, fs_bio_set);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bio_kmalloc_destructor(struct bio *bio)
|
|
||||||
{
|
|
||||||
if (bio_has_allocated_vec(bio))
|
|
||||||
kfree(bio->bi_io_vec);
|
|
||||||
kfree(bio);
|
|
||||||
}
|
|
||||||
|
|
||||||
void bio_init(struct bio *bio)
|
void bio_init(struct bio *bio)
|
||||||
{
|
{
|
||||||
memset(bio, 0, sizeof(*bio));
|
memset(bio, 0, sizeof(*bio));
|
||||||
@ -301,21 +278,15 @@ void bio_init(struct bio *bio)
|
|||||||
**/
|
**/
|
||||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||||
{
|
{
|
||||||
|
unsigned long idx = BIO_POOL_NONE;
|
||||||
struct bio_vec *bvl = NULL;
|
struct bio_vec *bvl = NULL;
|
||||||
struct bio *bio = NULL;
|
struct bio *bio;
|
||||||
unsigned long idx = 0;
|
void *p;
|
||||||
void *p = NULL;
|
|
||||||
|
|
||||||
if (bs) {
|
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
if (unlikely(!p))
|
||||||
if (!p)
|
return NULL;
|
||||||
goto err;
|
bio = p + bs->front_pad;
|
||||||
bio = p + bs->front_pad;
|
|
||||||
} else {
|
|
||||||
bio = kmalloc(sizeof(*bio), gfp_mask);
|
|
||||||
if (!bio)
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
bio_init(bio);
|
bio_init(bio);
|
||||||
|
|
||||||
@ -332,22 +303,50 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
|||||||
|
|
||||||
nr_iovecs = bvec_nr_vecs(idx);
|
nr_iovecs = bvec_nr_vecs(idx);
|
||||||
}
|
}
|
||||||
|
out_set:
|
||||||
bio->bi_flags |= idx << BIO_POOL_OFFSET;
|
bio->bi_flags |= idx << BIO_POOL_OFFSET;
|
||||||
bio->bi_max_vecs = nr_iovecs;
|
bio->bi_max_vecs = nr_iovecs;
|
||||||
out_set:
|
|
||||||
bio->bi_io_vec = bvl;
|
bio->bi_io_vec = bvl;
|
||||||
|
|
||||||
return bio;
|
return bio;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
if (bs)
|
mempool_free(p, bs->bio_pool);
|
||||||
mempool_free(p, bs->bio_pool);
|
|
||||||
else
|
|
||||||
kfree(bio);
|
|
||||||
err:
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bio_fs_destructor(struct bio *bio)
|
||||||
|
{
|
||||||
|
bio_free(bio, fs_bio_set);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bio_alloc - allocate a new bio, memory pool backed
|
||||||
|
* @gfp_mask: allocation mask to use
|
||||||
|
* @nr_iovecs: number of iovecs
|
||||||
|
*
|
||||||
|
* Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask
|
||||||
|
* contains __GFP_WAIT, the allocation is guaranteed to succeed.
|
||||||
|
*
|
||||||
|
* RETURNS:
|
||||||
|
* Pointer to new bio on success, NULL on failure.
|
||||||
|
*/
|
||||||
|
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
|
||||||
|
{
|
||||||
|
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
|
||||||
|
|
||||||
|
if (bio)
|
||||||
|
bio->bi_destructor = bio_fs_destructor;
|
||||||
|
|
||||||
|
return bio;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bio_kmalloc_destructor(struct bio *bio)
|
||||||
|
{
|
||||||
|
if (bio_integrity(bio))
|
||||||
|
bio_integrity_free(bio);
|
||||||
|
kfree(bio);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_alloc - allocate a bio for I/O
|
* bio_alloc - allocate a bio for I/O
|
||||||
* @gfp_mask: the GFP_ mask given to the slab allocator
|
* @gfp_mask: the GFP_ mask given to the slab allocator
|
||||||
@ -366,29 +365,20 @@ err:
|
|||||||
* do so can cause livelocks under memory pressure.
|
* do so can cause livelocks under memory pressure.
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
|
|
||||||
{
|
|
||||||
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
|
|
||||||
|
|
||||||
if (bio)
|
|
||||||
bio->bi_destructor = bio_fs_destructor;
|
|
||||||
|
|
||||||
return bio;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Like bio_alloc(), but doesn't use a mempool backing. This means that
|
|
||||||
* it CAN fail, but while bio_alloc() can only be used for allocations
|
|
||||||
* that have a short (finite) life span, bio_kmalloc() should be used
|
|
||||||
* for more permanent bio allocations (like allocating some bio's for
|
|
||||||
* initalization or setup purposes).
|
|
||||||
*/
|
|
||||||
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
|
||||||
{
|
{
|
||||||
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
|
struct bio *bio;
|
||||||
|
|
||||||
if (bio)
|
bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
|
||||||
bio->bi_destructor = bio_kmalloc_destructor;
|
gfp_mask);
|
||||||
|
if (unlikely(!bio))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
bio_init(bio);
|
||||||
|
bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
|
||||||
|
bio->bi_max_vecs = nr_iovecs;
|
||||||
|
bio->bi_io_vec = bio->bi_inline_vecs;
|
||||||
|
bio->bi_destructor = bio_kmalloc_destructor;
|
||||||
|
|
||||||
return bio;
|
return bio;
|
||||||
}
|
}
|
||||||
|
@ -132,6 +132,7 @@ struct bio {
|
|||||||
* top 4 bits of bio flags indicate the pool this bio came from
|
* top 4 bits of bio flags indicate the pool this bio came from
|
||||||
*/
|
*/
|
||||||
#define BIO_POOL_BITS (4)
|
#define BIO_POOL_BITS (4)
|
||||||
|
#define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1)
|
||||||
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
|
#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
|
||||||
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
|
#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
|
||||||
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
|
#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
|
||||||
|
Loading…
Reference in New Issue
Block a user