dm io: use slab for struct io
Allocate "struct io" from a slab. This patch changes dm-io, so that "struct io" is allocated from a slab cache. It used to be allocated with kmalloc. Allocating from a slab will be needed for the next patch, because it requires a special alignment of "struct io" and kmalloc cannot meet this alignment. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
542da31766
commit
952b355760
@ -5,6 +5,8 @@
|
|||||||
* This file is released under the GPL.
|
* This file is released under the GPL.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "dm.h"
|
||||||
|
|
||||||
#include <linux/device-mapper.h>
|
#include <linux/device-mapper.h>
|
||||||
|
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
@ -30,6 +32,8 @@ struct io {
|
|||||||
void *context;
|
void *context;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct kmem_cache *_dm_io_cache;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* io contexts are only dynamically allocated for asynchronous
|
* io contexts are only dynamically allocated for asynchronous
|
||||||
* io. Since async io is likely to be the majority of io we'll
|
* io. Since async io is likely to be the majority of io we'll
|
||||||
@ -53,7 +57,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
|
|||||||
if (!client)
|
if (!client)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
|
client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
|
||||||
if (!client->pool)
|
if (!client->pool)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
@ -472,3 +476,18 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
|
|||||||
&dp, io_req->notify.fn, io_req->notify.context);
|
&dp, io_req->notify.fn, io_req->notify.context);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dm_io);
|
EXPORT_SYMBOL(dm_io);
|
||||||
|
|
||||||
|
int __init dm_io_init(void)
|
||||||
|
{
|
||||||
|
_dm_io_cache = KMEM_CACHE(io, 0);
|
||||||
|
if (!_dm_io_cache)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void dm_io_exit(void)
|
||||||
|
{
|
||||||
|
kmem_cache_destroy(_dm_io_cache);
|
||||||
|
_dm_io_cache = NULL;
|
||||||
|
}
|
||||||
|
@ -275,6 +275,7 @@ static int (*_inits[])(void) __initdata = {
|
|||||||
dm_target_init,
|
dm_target_init,
|
||||||
dm_linear_init,
|
dm_linear_init,
|
||||||
dm_stripe_init,
|
dm_stripe_init,
|
||||||
|
dm_io_init,
|
||||||
dm_kcopyd_init,
|
dm_kcopyd_init,
|
||||||
dm_interface_init,
|
dm_interface_init,
|
||||||
};
|
};
|
||||||
@ -284,6 +285,7 @@ static void (*_exits[])(void) = {
|
|||||||
dm_target_exit,
|
dm_target_exit,
|
||||||
dm_linear_exit,
|
dm_linear_exit,
|
||||||
dm_stripe_exit,
|
dm_stripe_exit,
|
||||||
|
dm_io_exit,
|
||||||
dm_kcopyd_exit,
|
dm_kcopyd_exit,
|
||||||
dm_interface_exit,
|
dm_interface_exit,
|
||||||
};
|
};
|
||||||
|
@ -118,6 +118,9 @@ int dm_lock_for_deletion(struct mapped_device *md);
|
|||||||
void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
||||||
unsigned cookie);
|
unsigned cookie);
|
||||||
|
|
||||||
|
int dm_io_init(void);
|
||||||
|
void dm_io_exit(void);
|
||||||
|
|
||||||
int dm_kcopyd_init(void);
|
int dm_kcopyd_init(void);
|
||||||
void dm_kcopyd_exit(void);
|
void dm_kcopyd_exit(void);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user