mm, sl[aou]b: Extract common fields from struct kmem_cache
Define a struct that describes common fields used in all slab allocators. A slab allocator either uses the common definition (like SLOB) or is required to provide members of kmem_cache with the definition given. After that it will be possible to share code that only operates on those fields of kmem_cache. The patch basically takes the slob definition of kmem cache and uses the field namees for the other allocators. It also standardizes the names used for basic object lengths in allocators: object_size Struct size specified at kmem_cache_create. Basically the payload expected to be used by the subsystem. size The size of memory allocator for each object. This size is larger than object_size and includes padding, alignment and extra metadata for each object (f.e. for debugging and rcu). Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
350260889b
commit
3b0efdfa1e
@ -92,6 +92,30 @@
|
|||||||
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
|
||||||
(unsigned long)ZERO_SIZE_PTR)
|
(unsigned long)ZERO_SIZE_PTR)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Common fields provided in kmem_cache by all slab allocators
|
||||||
|
* This struct is either used directly by the allocator (SLOB)
|
||||||
|
* or the allocator must include definitions for all fields
|
||||||
|
* provided in kmem_cache_common in their definition of kmem_cache.
|
||||||
|
*
|
||||||
|
* Once we can do anonymous structs (C11 standard) we could put a
|
||||||
|
* anonymous struct definition in these allocators so that the
|
||||||
|
* separate allocations in the kmem_cache structure of SLAB and
|
||||||
|
* SLUB is no longer needed.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_SLOB
|
||||||
|
struct kmem_cache {
|
||||||
|
unsigned int object_size;/* The original size of the object */
|
||||||
|
unsigned int size; /* The aligned/padded/added on size */
|
||||||
|
unsigned int align; /* Alignment as calculated */
|
||||||
|
unsigned long flags; /* Active flags on the slab */
|
||||||
|
const char *name; /* Slab name for sysfs */
|
||||||
|
int refcount; /* Use counter */
|
||||||
|
void (*ctor)(void *); /* Called on object slot creation */
|
||||||
|
struct list_head list; /* List of all slab caches on the system */
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct kmem_cache related prototypes
|
* struct kmem_cache related prototypes
|
||||||
*/
|
*/
|
||||||
|
@ -27,7 +27,7 @@ struct kmem_cache {
|
|||||||
unsigned int limit;
|
unsigned int limit;
|
||||||
unsigned int shared;
|
unsigned int shared;
|
||||||
|
|
||||||
unsigned int buffer_size;
|
unsigned int size;
|
||||||
u32 reciprocal_buffer_size;
|
u32 reciprocal_buffer_size;
|
||||||
/* 2) touched by every alloc & free from the backend */
|
/* 2) touched by every alloc & free from the backend */
|
||||||
|
|
||||||
@ -52,7 +52,10 @@ struct kmem_cache {
|
|||||||
|
|
||||||
/* 4) cache creation/removal */
|
/* 4) cache creation/removal */
|
||||||
const char *name;
|
const char *name;
|
||||||
struct list_head next;
|
struct list_head list;
|
||||||
|
int refcount;
|
||||||
|
int object_size;
|
||||||
|
int align;
|
||||||
|
|
||||||
/* 5) statistics */
|
/* 5) statistics */
|
||||||
#ifdef CONFIG_DEBUG_SLAB
|
#ifdef CONFIG_DEBUG_SLAB
|
||||||
@ -73,12 +76,11 @@ struct kmem_cache {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If debugging is enabled, then the allocator can add additional
|
* If debugging is enabled, then the allocator can add additional
|
||||||
* fields and/or padding to every object. buffer_size contains the total
|
* fields and/or padding to every object. size contains the total
|
||||||
* object size including these internal fields, the following two
|
* object size including these internal fields, the following two
|
||||||
* variables contain the offset to the user object and its size.
|
* variables contain the offset to the user object and its size.
|
||||||
*/
|
*/
|
||||||
int obj_offset;
|
int obj_offset;
|
||||||
int obj_size;
|
|
||||||
#endif /* CONFIG_DEBUG_SLAB */
|
#endif /* CONFIG_DEBUG_SLAB */
|
||||||
|
|
||||||
/* 6) per-cpu/per-node data, touched during every alloc/free */
|
/* 6) per-cpu/per-node data, touched during every alloc/free */
|
||||||
|
@ -82,7 +82,7 @@ struct kmem_cache {
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long min_partial;
|
unsigned long min_partial;
|
||||||
int size; /* The size of an object including meta data */
|
int size; /* The size of an object including meta data */
|
||||||
int objsize; /* The size of an object without meta data */
|
int object_size; /* The size of an object without meta data */
|
||||||
int offset; /* Free pointer offset. */
|
int offset; /* Free pointer offset. */
|
||||||
int cpu_partial; /* Number of per cpu partial objects to keep around */
|
int cpu_partial; /* Number of per cpu partial objects to keep around */
|
||||||
struct kmem_cache_order_objects oo;
|
struct kmem_cache_order_objects oo;
|
||||||
|
117
mm/slab.c
117
mm/slab.c
@ -424,8 +424,8 @@ static void kmem_list3_init(struct kmem_list3 *parent)
|
|||||||
* cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
|
* cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
|
||||||
* redzone word.
|
* redzone word.
|
||||||
* cachep->obj_offset: The real object.
|
* cachep->obj_offset: The real object.
|
||||||
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
|
* cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
|
||||||
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
|
* cachep->size - 1* BYTES_PER_WORD: last caller address
|
||||||
* [BYTES_PER_WORD long]
|
* [BYTES_PER_WORD long]
|
||||||
*/
|
*/
|
||||||
static int obj_offset(struct kmem_cache *cachep)
|
static int obj_offset(struct kmem_cache *cachep)
|
||||||
@ -435,7 +435,7 @@ static int obj_offset(struct kmem_cache *cachep)
|
|||||||
|
|
||||||
static int obj_size(struct kmem_cache *cachep)
|
static int obj_size(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
return cachep->obj_size;
|
return cachep->object_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
|
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
|
||||||
@ -449,23 +449,23 @@ static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
|
|||||||
{
|
{
|
||||||
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
|
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
|
||||||
if (cachep->flags & SLAB_STORE_USER)
|
if (cachep->flags & SLAB_STORE_USER)
|
||||||
return (unsigned long long *)(objp + cachep->buffer_size -
|
return (unsigned long long *)(objp + cachep->size -
|
||||||
sizeof(unsigned long long) -
|
sizeof(unsigned long long) -
|
||||||
REDZONE_ALIGN);
|
REDZONE_ALIGN);
|
||||||
return (unsigned long long *) (objp + cachep->buffer_size -
|
return (unsigned long long *) (objp + cachep->size -
|
||||||
sizeof(unsigned long long));
|
sizeof(unsigned long long));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
||||||
{
|
{
|
||||||
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
|
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
|
||||||
return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
|
return (void **)(objp + cachep->size - BYTES_PER_WORD);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define obj_offset(x) 0
|
#define obj_offset(x) 0
|
||||||
#define obj_size(cachep) (cachep->buffer_size)
|
#define obj_size(cachep) (cachep->size)
|
||||||
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
||||||
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
|
||||||
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
|
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
|
||||||
@ -475,7 +475,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
|||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
size_t slab_buffer_size(struct kmem_cache *cachep)
|
size_t slab_buffer_size(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
return cachep->buffer_size;
|
return cachep->size;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(slab_buffer_size);
|
EXPORT_SYMBOL(slab_buffer_size);
|
||||||
#endif
|
#endif
|
||||||
@ -513,13 +513,13 @@ static inline struct slab *virt_to_slab(const void *obj)
|
|||||||
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
|
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
|
||||||
unsigned int idx)
|
unsigned int idx)
|
||||||
{
|
{
|
||||||
return slab->s_mem + cache->buffer_size * idx;
|
return slab->s_mem + cache->size * idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to avoid an expensive divide : (offset / cache->buffer_size)
|
* We want to avoid an expensive divide : (offset / cache->size)
|
||||||
* Using the fact that buffer_size is a constant for a particular cache,
|
* Using the fact that size is a constant for a particular cache,
|
||||||
* we can replace (offset / cache->buffer_size) by
|
* we can replace (offset / cache->size) by
|
||||||
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
|
||||||
*/
|
*/
|
||||||
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
|
||||||
@ -565,7 +565,7 @@ static struct kmem_cache cache_cache = {
|
|||||||
.batchcount = 1,
|
.batchcount = 1,
|
||||||
.limit = BOOT_CPUCACHE_ENTRIES,
|
.limit = BOOT_CPUCACHE_ENTRIES,
|
||||||
.shared = 1,
|
.shared = 1,
|
||||||
.buffer_size = sizeof(struct kmem_cache),
|
.size = sizeof(struct kmem_cache),
|
||||||
.name = "kmem_cache",
|
.name = "kmem_cache",
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1134,7 +1134,7 @@ static int init_cache_nodelists_node(int node)
|
|||||||
struct kmem_list3 *l3;
|
struct kmem_list3 *l3;
|
||||||
const int memsize = sizeof(struct kmem_list3);
|
const int memsize = sizeof(struct kmem_list3);
|
||||||
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
/*
|
/*
|
||||||
* Set up the size64 kmemlist for cpu before we can
|
* Set up the size64 kmemlist for cpu before we can
|
||||||
* begin anything. Make sure some other cpu on this
|
* begin anything. Make sure some other cpu on this
|
||||||
@ -1172,7 +1172,7 @@ static void __cpuinit cpuup_canceled(long cpu)
|
|||||||
int node = cpu_to_mem(cpu);
|
int node = cpu_to_mem(cpu);
|
||||||
const struct cpumask *mask = cpumask_of_node(node);
|
const struct cpumask *mask = cpumask_of_node(node);
|
||||||
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
struct array_cache *nc;
|
struct array_cache *nc;
|
||||||
struct array_cache *shared;
|
struct array_cache *shared;
|
||||||
struct array_cache **alien;
|
struct array_cache **alien;
|
||||||
@ -1222,7 +1222,7 @@ free_array_cache:
|
|||||||
* the respective cache's slabs, now we can go ahead and
|
* the respective cache's slabs, now we can go ahead and
|
||||||
* shrink each nodelist to its limit.
|
* shrink each nodelist to its limit.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
l3 = cachep->nodelists[node];
|
l3 = cachep->nodelists[node];
|
||||||
if (!l3)
|
if (!l3)
|
||||||
continue;
|
continue;
|
||||||
@ -1251,7 +1251,7 @@ static int __cpuinit cpuup_prepare(long cpu)
|
|||||||
* Now we can go ahead with allocating the shared arrays and
|
* Now we can go ahead with allocating the shared arrays and
|
||||||
* array caches
|
* array caches
|
||||||
*/
|
*/
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
struct array_cache *nc;
|
struct array_cache *nc;
|
||||||
struct array_cache *shared = NULL;
|
struct array_cache *shared = NULL;
|
||||||
struct array_cache **alien = NULL;
|
struct array_cache **alien = NULL;
|
||||||
@ -1383,7 +1383,7 @@ static int __meminit drain_cache_nodelists_node(int node)
|
|||||||
struct kmem_cache *cachep;
|
struct kmem_cache *cachep;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
struct kmem_list3 *l3;
|
struct kmem_list3 *l3;
|
||||||
|
|
||||||
l3 = cachep->nodelists[node];
|
l3 = cachep->nodelists[node];
|
||||||
@ -1526,7 +1526,7 @@ void __init kmem_cache_init(void)
|
|||||||
|
|
||||||
/* 1) create the cache_cache */
|
/* 1) create the cache_cache */
|
||||||
INIT_LIST_HEAD(&cache_chain);
|
INIT_LIST_HEAD(&cache_chain);
|
||||||
list_add(&cache_cache.next, &cache_chain);
|
list_add(&cache_cache.list, &cache_chain);
|
||||||
cache_cache.colour_off = cache_line_size();
|
cache_cache.colour_off = cache_line_size();
|
||||||
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
|
||||||
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
|
||||||
@ -1534,18 +1534,16 @@ void __init kmem_cache_init(void)
|
|||||||
/*
|
/*
|
||||||
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
|
||||||
*/
|
*/
|
||||||
cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
cache_cache.size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
|
||||||
nr_node_ids * sizeof(struct kmem_list3 *);
|
nr_node_ids * sizeof(struct kmem_list3 *);
|
||||||
#if DEBUG
|
cache_cache.object_size = cache_cache.size;
|
||||||
cache_cache.obj_size = cache_cache.buffer_size;
|
cache_cache.size = ALIGN(cache_cache.size,
|
||||||
#endif
|
|
||||||
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
|
|
||||||
cache_line_size());
|
cache_line_size());
|
||||||
cache_cache.reciprocal_buffer_size =
|
cache_cache.reciprocal_buffer_size =
|
||||||
reciprocal_value(cache_cache.buffer_size);
|
reciprocal_value(cache_cache.size);
|
||||||
|
|
||||||
for (order = 0; order < MAX_ORDER; order++) {
|
for (order = 0; order < MAX_ORDER; order++) {
|
||||||
cache_estimate(order, cache_cache.buffer_size,
|
cache_estimate(order, cache_cache.size,
|
||||||
cache_line_size(), 0, &left_over, &cache_cache.num);
|
cache_line_size(), 0, &left_over, &cache_cache.num);
|
||||||
if (cache_cache.num)
|
if (cache_cache.num)
|
||||||
break;
|
break;
|
||||||
@ -1671,7 +1669,7 @@ void __init kmem_cache_init_late(void)
|
|||||||
|
|
||||||
/* 6) resize the head arrays to their final sizes */
|
/* 6) resize the head arrays to their final sizes */
|
||||||
mutex_lock(&cache_chain_mutex);
|
mutex_lock(&cache_chain_mutex);
|
||||||
list_for_each_entry(cachep, &cache_chain, next)
|
list_for_each_entry(cachep, &cache_chain, list)
|
||||||
if (enable_cpucache(cachep, GFP_NOWAIT))
|
if (enable_cpucache(cachep, GFP_NOWAIT))
|
||||||
BUG();
|
BUG();
|
||||||
mutex_unlock(&cache_chain_mutex);
|
mutex_unlock(&cache_chain_mutex);
|
||||||
@ -1724,7 +1722,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
|
|||||||
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
||||||
nodeid, gfpflags);
|
nodeid, gfpflags);
|
||||||
printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
|
printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
|
||||||
cachep->name, cachep->buffer_size, cachep->gfporder);
|
cachep->name, cachep->size, cachep->gfporder);
|
||||||
|
|
||||||
for_each_online_node(node) {
|
for_each_online_node(node) {
|
||||||
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
|
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
|
||||||
@ -2028,10 +2026,10 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slab
|
|||||||
|
|
||||||
if (cachep->flags & SLAB_POISON) {
|
if (cachep->flags & SLAB_POISON) {
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||||
if (cachep->buffer_size % PAGE_SIZE == 0 &&
|
if (cachep->size % PAGE_SIZE == 0 &&
|
||||||
OFF_SLAB(cachep))
|
OFF_SLAB(cachep))
|
||||||
kernel_map_pages(virt_to_page(objp),
|
kernel_map_pages(virt_to_page(objp),
|
||||||
cachep->buffer_size / PAGE_SIZE, 1);
|
cachep->size / PAGE_SIZE, 1);
|
||||||
else
|
else
|
||||||
check_poison_obj(cachep, objp);
|
check_poison_obj(cachep, objp);
|
||||||
#else
|
#else
|
||||||
@ -2281,7 +2279,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
mutex_lock(&cache_chain_mutex);
|
mutex_lock(&cache_chain_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(pc, &cache_chain, next) {
|
list_for_each_entry(pc, &cache_chain, list) {
|
||||||
char tmp;
|
char tmp;
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
@ -2294,7 +2292,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
if (res) {
|
if (res) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"SLAB: cache with size %d has lost its name\n",
|
"SLAB: cache with size %d has lost its name\n",
|
||||||
pc->buffer_size);
|
pc->size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2399,8 +2397,9 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
goto oops;
|
goto oops;
|
||||||
|
|
||||||
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
|
||||||
|
cachep->object_size = size;
|
||||||
|
cachep->align = align;
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
cachep->obj_size = size;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Both debugging options require word-alignment which is calculated
|
* Both debugging options require word-alignment which is calculated
|
||||||
@ -2423,7 +2422,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
}
|
}
|
||||||
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
|
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
|
||||||
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
|
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
|
||||||
&& cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
|
&& cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
|
||||||
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
|
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
|
||||||
size = PAGE_SIZE;
|
size = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
@ -2492,7 +2491,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
cachep->gfpflags = 0;
|
cachep->gfpflags = 0;
|
||||||
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
|
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
|
||||||
cachep->gfpflags |= GFP_DMA;
|
cachep->gfpflags |= GFP_DMA;
|
||||||
cachep->buffer_size = size;
|
cachep->size = size;
|
||||||
cachep->reciprocal_buffer_size = reciprocal_value(size);
|
cachep->reciprocal_buffer_size = reciprocal_value(size);
|
||||||
|
|
||||||
if (flags & CFLGS_OFF_SLAB) {
|
if (flags & CFLGS_OFF_SLAB) {
|
||||||
@ -2526,7 +2525,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* cache setup completed, link it into the list */
|
/* cache setup completed, link it into the list */
|
||||||
list_add(&cachep->next, &cache_chain);
|
list_add(&cachep->list, &cache_chain);
|
||||||
oops:
|
oops:
|
||||||
if (!cachep && (flags & SLAB_PANIC))
|
if (!cachep && (flags & SLAB_PANIC))
|
||||||
panic("kmem_cache_create(): failed to create slab `%s'\n",
|
panic("kmem_cache_create(): failed to create slab `%s'\n",
|
||||||
@ -2721,10 +2720,10 @@ void kmem_cache_destroy(struct kmem_cache *cachep)
|
|||||||
/*
|
/*
|
||||||
* the chain is never empty, cache_cache is never destroyed
|
* the chain is never empty, cache_cache is never destroyed
|
||||||
*/
|
*/
|
||||||
list_del(&cachep->next);
|
list_del(&cachep->list);
|
||||||
if (__cache_shrink(cachep)) {
|
if (__cache_shrink(cachep)) {
|
||||||
slab_error(cachep, "Can't free all objects");
|
slab_error(cachep, "Can't free all objects");
|
||||||
list_add(&cachep->next, &cache_chain);
|
list_add(&cachep->list, &cache_chain);
|
||||||
mutex_unlock(&cache_chain_mutex);
|
mutex_unlock(&cache_chain_mutex);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
return;
|
return;
|
||||||
@ -2821,10 +2820,10 @@ static void cache_init_objs(struct kmem_cache *cachep,
|
|||||||
slab_error(cachep, "constructor overwrote the"
|
slab_error(cachep, "constructor overwrote the"
|
||||||
" start of an object");
|
" start of an object");
|
||||||
}
|
}
|
||||||
if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
|
if ((cachep->size % PAGE_SIZE) == 0 &&
|
||||||
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
|
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
|
||||||
kernel_map_pages(virt_to_page(objp),
|
kernel_map_pages(virt_to_page(objp),
|
||||||
cachep->buffer_size / PAGE_SIZE, 0);
|
cachep->size / PAGE_SIZE, 0);
|
||||||
#else
|
#else
|
||||||
if (cachep->ctor)
|
if (cachep->ctor)
|
||||||
cachep->ctor(objp);
|
cachep->ctor(objp);
|
||||||
@ -3058,10 +3057,10 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
|
|||||||
#endif
|
#endif
|
||||||
if (cachep->flags & SLAB_POISON) {
|
if (cachep->flags & SLAB_POISON) {
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||||
if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
|
if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
|
||||||
store_stackinfo(cachep, objp, (unsigned long)caller);
|
store_stackinfo(cachep, objp, (unsigned long)caller);
|
||||||
kernel_map_pages(virt_to_page(objp),
|
kernel_map_pages(virt_to_page(objp),
|
||||||
cachep->buffer_size / PAGE_SIZE, 0);
|
cachep->size / PAGE_SIZE, 0);
|
||||||
} else {
|
} else {
|
||||||
poison_obj(cachep, objp, POISON_FREE);
|
poison_obj(cachep, objp, POISON_FREE);
|
||||||
}
|
}
|
||||||
@ -3211,9 +3210,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||||||
return objp;
|
return objp;
|
||||||
if (cachep->flags & SLAB_POISON) {
|
if (cachep->flags & SLAB_POISON) {
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||||
if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
|
if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
|
||||||
kernel_map_pages(virt_to_page(objp),
|
kernel_map_pages(virt_to_page(objp),
|
||||||
cachep->buffer_size / PAGE_SIZE, 1);
|
cachep->size / PAGE_SIZE, 1);
|
||||||
else
|
else
|
||||||
check_poison_obj(cachep, objp);
|
check_poison_obj(cachep, objp);
|
||||||
#else
|
#else
|
||||||
@ -3243,7 +3242,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
|
|||||||
unsigned objnr;
|
unsigned objnr;
|
||||||
|
|
||||||
slabp = virt_to_head_page(objp)->slab_page;
|
slabp = virt_to_head_page(objp)->slab_page;
|
||||||
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
|
objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
|
||||||
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
|
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -3747,7 +3746,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|||||||
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
|
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
|
||||||
|
|
||||||
trace_kmem_cache_alloc(_RET_IP_, ret,
|
trace_kmem_cache_alloc(_RET_IP_, ret,
|
||||||
obj_size(cachep), cachep->buffer_size, flags);
|
obj_size(cachep), cachep->size, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -3775,7 +3774,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
|
|
||||||
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
||||||
obj_size(cachep), cachep->buffer_size,
|
obj_size(cachep), cachep->size,
|
||||||
flags, nodeid);
|
flags, nodeid);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -3857,7 +3856,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
|
|||||||
ret = __cache_alloc(cachep, flags, caller);
|
ret = __cache_alloc(cachep, flags, caller);
|
||||||
|
|
||||||
trace_kmalloc((unsigned long) caller, ret,
|
trace_kmalloc((unsigned long) caller, ret,
|
||||||
size, cachep->buffer_size, flags);
|
size, cachep->size, flags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -4011,7 +4010,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
if (!cachep->next.next) {
|
if (!cachep->list.next) {
|
||||||
/* Cache is not active yet. Roll back what we did */
|
/* Cache is not active yet. Roll back what we did */
|
||||||
node--;
|
node--;
|
||||||
while (node >= 0) {
|
while (node >= 0) {
|
||||||
@ -4105,13 +4104,13 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
|
|||||||
* The numbers are guessed, we should auto-tune as described by
|
* The numbers are guessed, we should auto-tune as described by
|
||||||
* Bonwick.
|
* Bonwick.
|
||||||
*/
|
*/
|
||||||
if (cachep->buffer_size > 131072)
|
if (cachep->size > 131072)
|
||||||
limit = 1;
|
limit = 1;
|
||||||
else if (cachep->buffer_size > PAGE_SIZE)
|
else if (cachep->size > PAGE_SIZE)
|
||||||
limit = 8;
|
limit = 8;
|
||||||
else if (cachep->buffer_size > 1024)
|
else if (cachep->size > 1024)
|
||||||
limit = 24;
|
limit = 24;
|
||||||
else if (cachep->buffer_size > 256)
|
else if (cachep->size > 256)
|
||||||
limit = 54;
|
limit = 54;
|
||||||
else
|
else
|
||||||
limit = 120;
|
limit = 120;
|
||||||
@ -4126,7 +4125,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
|
|||||||
* to a larger limit. Thus disabled by default.
|
* to a larger limit. Thus disabled by default.
|
||||||
*/
|
*/
|
||||||
shared = 0;
|
shared = 0;
|
||||||
if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
|
if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
|
||||||
shared = 8;
|
shared = 8;
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
@ -4196,7 +4195,7 @@ static void cache_reap(struct work_struct *w)
|
|||||||
/* Give up. Setup the next iteration. */
|
/* Give up. Setup the next iteration. */
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
list_for_each_entry(searchp, &cache_chain, next) {
|
list_for_each_entry(searchp, &cache_chain, list) {
|
||||||
check_irq_on();
|
check_irq_on();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4289,7 +4288,7 @@ static void s_stop(struct seq_file *m, void *p)
|
|||||||
|
|
||||||
static int s_show(struct seq_file *m, void *p)
|
static int s_show(struct seq_file *m, void *p)
|
||||||
{
|
{
|
||||||
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
|
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
|
||||||
struct slab *slabp;
|
struct slab *slabp;
|
||||||
unsigned long active_objs;
|
unsigned long active_objs;
|
||||||
unsigned long num_objs;
|
unsigned long num_objs;
|
||||||
@ -4345,7 +4344,7 @@ static int s_show(struct seq_file *m, void *p)
|
|||||||
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
|
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
|
||||||
|
|
||||||
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
|
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
|
||||||
name, active_objs, num_objs, cachep->buffer_size,
|
name, active_objs, num_objs, cachep->size,
|
||||||
cachep->num, (1 << cachep->gfporder));
|
cachep->num, (1 << cachep->gfporder));
|
||||||
seq_printf(m, " : tunables %4u %4u %4u",
|
seq_printf(m, " : tunables %4u %4u %4u",
|
||||||
cachep->limit, cachep->batchcount, cachep->shared);
|
cachep->limit, cachep->batchcount, cachep->shared);
|
||||||
@ -4437,7 +4436,7 @@ static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
|
|||||||
/* Find the cache in the chain of caches. */
|
/* Find the cache in the chain of caches. */
|
||||||
mutex_lock(&cache_chain_mutex);
|
mutex_lock(&cache_chain_mutex);
|
||||||
res = -EINVAL;
|
res = -EINVAL;
|
||||||
list_for_each_entry(cachep, &cache_chain, next) {
|
list_for_each_entry(cachep, &cache_chain, list) {
|
||||||
if (!strcmp(cachep->name, kbuf)) {
|
if (!strcmp(cachep->name, kbuf)) {
|
||||||
if (limit < 1 || batchcount < 1 ||
|
if (limit < 1 || batchcount < 1 ||
|
||||||
batchcount > limit || shared < 0) {
|
batchcount > limit || shared < 0) {
|
||||||
@ -4513,7 +4512,7 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
|
|||||||
int i;
|
int i;
|
||||||
if (n[0] == n[1])
|
if (n[0] == n[1])
|
||||||
return;
|
return;
|
||||||
for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
|
for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
|
||||||
if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
|
if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
|
||||||
continue;
|
continue;
|
||||||
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
|
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
|
||||||
|
@ -506,13 +506,6 @@ size_t ksize(const void *block)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ksize);
|
EXPORT_SYMBOL(ksize);
|
||||||
|
|
||||||
struct kmem_cache {
|
|
||||||
unsigned int size, align;
|
|
||||||
unsigned long flags;
|
|
||||||
const char *name;
|
|
||||||
void (*ctor)(void *);
|
|
||||||
};
|
|
||||||
|
|
||||||
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
||||||
size_t align, unsigned long flags, void (*ctor)(void *))
|
size_t align, unsigned long flags, void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
@ -523,7 +516,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||||||
|
|
||||||
if (c) {
|
if (c) {
|
||||||
c->name = name;
|
c->name = name;
|
||||||
c->size = size;
|
c->size = c->object_size;
|
||||||
if (flags & SLAB_DESTROY_BY_RCU) {
|
if (flags & SLAB_DESTROY_BY_RCU) {
|
||||||
/* leave room for rcu footer at the end of object */
|
/* leave room for rcu footer at the end of object */
|
||||||
c->size += sizeof(struct slob_rcu);
|
c->size += sizeof(struct slob_rcu);
|
||||||
|
80
mm/slub.c
80
mm/slub.c
@ -311,7 +311,7 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
|
|||||||
* and whatever may come after it.
|
* and whatever may come after it.
|
||||||
*/
|
*/
|
||||||
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
|
if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
|
||||||
return s->objsize;
|
return s->object_size;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
@ -609,11 +609,11 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
|
|||||||
if (p > addr + 16)
|
if (p > addr + 16)
|
||||||
print_section("Bytes b4 ", p - 16, 16);
|
print_section("Bytes b4 ", p - 16, 16);
|
||||||
|
|
||||||
print_section("Object ", p, min_t(unsigned long, s->objsize,
|
print_section("Object ", p, min_t(unsigned long, s->object_size,
|
||||||
PAGE_SIZE));
|
PAGE_SIZE));
|
||||||
if (s->flags & SLAB_RED_ZONE)
|
if (s->flags & SLAB_RED_ZONE)
|
||||||
print_section("Redzone ", p + s->objsize,
|
print_section("Redzone ", p + s->object_size,
|
||||||
s->inuse - s->objsize);
|
s->inuse - s->object_size);
|
||||||
|
|
||||||
if (s->offset)
|
if (s->offset)
|
||||||
off = s->offset + sizeof(void *);
|
off = s->offset + sizeof(void *);
|
||||||
@ -655,12 +655,12 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
|
|||||||
u8 *p = object;
|
u8 *p = object;
|
||||||
|
|
||||||
if (s->flags & __OBJECT_POISON) {
|
if (s->flags & __OBJECT_POISON) {
|
||||||
memset(p, POISON_FREE, s->objsize - 1);
|
memset(p, POISON_FREE, s->object_size - 1);
|
||||||
p[s->objsize - 1] = POISON_END;
|
p[s->object_size - 1] = POISON_END;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->flags & SLAB_RED_ZONE)
|
if (s->flags & SLAB_RED_ZONE)
|
||||||
memset(p + s->objsize, val, s->inuse - s->objsize);
|
memset(p + s->object_size, val, s->inuse - s->object_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
|
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
|
||||||
@ -705,10 +705,10 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
|
|||||||
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
|
* Poisoning uses 0x6b (POISON_FREE) and the last byte is
|
||||||
* 0xa5 (POISON_END)
|
* 0xa5 (POISON_END)
|
||||||
*
|
*
|
||||||
* object + s->objsize
|
* object + s->object_size
|
||||||
* Padding to reach word boundary. This is also used for Redzoning.
|
* Padding to reach word boundary. This is also used for Redzoning.
|
||||||
* Padding is extended by another word if Redzoning is enabled and
|
* Padding is extended by another word if Redzoning is enabled and
|
||||||
* objsize == inuse.
|
* object_size == inuse.
|
||||||
*
|
*
|
||||||
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
|
* We fill with 0xbb (RED_INACTIVE) for inactive objects and with
|
||||||
* 0xcc (RED_ACTIVE) for objects in use.
|
* 0xcc (RED_ACTIVE) for objects in use.
|
||||||
@ -727,7 +727,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
|
|||||||
* object + s->size
|
* object + s->size
|
||||||
* Nothing is used beyond s->size.
|
* Nothing is used beyond s->size.
|
||||||
*
|
*
|
||||||
* If slabcaches are merged then the objsize and inuse boundaries are mostly
|
* If slabcaches are merged then the object_size and inuse boundaries are mostly
|
||||||
* ignored. And therefore no slab options that rely on these boundaries
|
* ignored. And therefore no slab options that rely on these boundaries
|
||||||
* may be used with merged slabcaches.
|
* may be used with merged slabcaches.
|
||||||
*/
|
*/
|
||||||
@ -787,25 +787,25 @@ static int check_object(struct kmem_cache *s, struct page *page,
|
|||||||
void *object, u8 val)
|
void *object, u8 val)
|
||||||
{
|
{
|
||||||
u8 *p = object;
|
u8 *p = object;
|
||||||
u8 *endobject = object + s->objsize;
|
u8 *endobject = object + s->object_size;
|
||||||
|
|
||||||
if (s->flags & SLAB_RED_ZONE) {
|
if (s->flags & SLAB_RED_ZONE) {
|
||||||
if (!check_bytes_and_report(s, page, object, "Redzone",
|
if (!check_bytes_and_report(s, page, object, "Redzone",
|
||||||
endobject, val, s->inuse - s->objsize))
|
endobject, val, s->inuse - s->object_size))
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
|
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
|
||||||
check_bytes_and_report(s, page, p, "Alignment padding",
|
check_bytes_and_report(s, page, p, "Alignment padding",
|
||||||
endobject, POISON_INUSE, s->inuse - s->objsize);
|
endobject, POISON_INUSE, s->inuse - s->object_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->flags & SLAB_POISON) {
|
if (s->flags & SLAB_POISON) {
|
||||||
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
|
if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
|
||||||
(!check_bytes_and_report(s, page, p, "Poison", p,
|
(!check_bytes_and_report(s, page, p, "Poison", p,
|
||||||
POISON_FREE, s->objsize - 1) ||
|
POISON_FREE, s->object_size - 1) ||
|
||||||
!check_bytes_and_report(s, page, p, "Poison",
|
!check_bytes_and_report(s, page, p, "Poison",
|
||||||
p + s->objsize - 1, POISON_END, 1)))
|
p + s->object_size - 1, POISON_END, 1)))
|
||||||
return 0;
|
return 0;
|
||||||
/*
|
/*
|
||||||
* check_pad_bytes cleans up on its own.
|
* check_pad_bytes cleans up on its own.
|
||||||
@ -926,7 +926,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
|
|||||||
page->freelist);
|
page->freelist);
|
||||||
|
|
||||||
if (!alloc)
|
if (!alloc)
|
||||||
print_section("Object ", (void *)object, s->objsize);
|
print_section("Object ", (void *)object, s->object_size);
|
||||||
|
|
||||||
dump_stack();
|
dump_stack();
|
||||||
}
|
}
|
||||||
@ -942,14 +942,14 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
|
|||||||
lockdep_trace_alloc(flags);
|
lockdep_trace_alloc(flags);
|
||||||
might_sleep_if(flags & __GFP_WAIT);
|
might_sleep_if(flags & __GFP_WAIT);
|
||||||
|
|
||||||
return should_failslab(s->objsize, flags, s->flags);
|
return should_failslab(s->object_size, flags, s->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
|
static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
|
||||||
{
|
{
|
||||||
flags &= gfp_allowed_mask;
|
flags &= gfp_allowed_mask;
|
||||||
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
|
||||||
kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
|
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
||||||
@ -966,13 +966,13 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
kmemcheck_slab_free(s, x, s->objsize);
|
kmemcheck_slab_free(s, x, s->object_size);
|
||||||
debug_check_no_locks_freed(x, s->objsize);
|
debug_check_no_locks_freed(x, s->object_size);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
if (!(s->flags & SLAB_DEBUG_OBJECTS))
|
||||||
debug_check_no_obj_freed(x, s->objsize);
|
debug_check_no_obj_freed(x, s->object_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1207,7 +1207,7 @@ out:
|
|||||||
|
|
||||||
__setup("slub_debug", setup_slub_debug);
|
__setup("slub_debug", setup_slub_debug);
|
||||||
|
|
||||||
static unsigned long kmem_cache_flags(unsigned long objsize,
|
static unsigned long kmem_cache_flags(unsigned long object_size,
|
||||||
unsigned long flags, const char *name,
|
unsigned long flags, const char *name,
|
||||||
void (*ctor)(void *))
|
void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
@ -1237,7 +1237,7 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
|
|||||||
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
||||||
struct page *page) {}
|
struct page *page) {}
|
||||||
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
|
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
|
||||||
static inline unsigned long kmem_cache_flags(unsigned long objsize,
|
static inline unsigned long kmem_cache_flags(unsigned long object_size,
|
||||||
unsigned long flags, const char *name,
|
unsigned long flags, const char *name,
|
||||||
void (*ctor)(void *))
|
void (*ctor)(void *))
|
||||||
{
|
{
|
||||||
@ -2098,10 +2098,10 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
|||||||
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
"SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
|
||||||
nid, gfpflags);
|
nid, gfpflags);
|
||||||
printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
|
printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
|
||||||
"default order: %d, min order: %d\n", s->name, s->objsize,
|
"default order: %d, min order: %d\n", s->name, s->object_size,
|
||||||
s->size, oo_order(s->oo), oo_order(s->min));
|
s->size, oo_order(s->oo), oo_order(s->min));
|
||||||
|
|
||||||
if (oo_order(s->min) > get_order(s->objsize))
|
if (oo_order(s->min) > get_order(s->object_size))
|
||||||
printk(KERN_WARNING " %s debugging increased min order, use "
|
printk(KERN_WARNING " %s debugging increased min order, use "
|
||||||
"slub_debug=O to disable.\n", s->name);
|
"slub_debug=O to disable.\n", s->name);
|
||||||
|
|
||||||
@ -2374,7 +2374,7 @@ redo:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
if (unlikely(gfpflags & __GFP_ZERO) && object)
|
||||||
memset(object, 0, s->objsize);
|
memset(object, 0, s->object_size);
|
||||||
|
|
||||||
slab_post_alloc_hook(s, gfpflags, object);
|
slab_post_alloc_hook(s, gfpflags, object);
|
||||||
|
|
||||||
@ -2385,7 +2385,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
|
|||||||
{
|
{
|
||||||
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
|
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
|
||||||
|
|
||||||
trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
|
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2415,7 +2415,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
|
|||||||
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
|
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
|
||||||
|
|
||||||
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
trace_kmem_cache_alloc_node(_RET_IP_, ret,
|
||||||
s->objsize, s->size, gfpflags, node);
|
s->object_size, s->size, gfpflags, node);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2910,7 +2910,7 @@ static void set_min_partial(struct kmem_cache *s, unsigned long min)
|
|||||||
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||||
{
|
{
|
||||||
unsigned long flags = s->flags;
|
unsigned long flags = s->flags;
|
||||||
unsigned long size = s->objsize;
|
unsigned long size = s->object_size;
|
||||||
unsigned long align = s->align;
|
unsigned long align = s->align;
|
||||||
int order;
|
int order;
|
||||||
|
|
||||||
@ -2939,7 +2939,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||||||
* end of the object and the free pointer. If not then add an
|
* end of the object and the free pointer. If not then add an
|
||||||
* additional word to have some bytes to store Redzone information.
|
* additional word to have some bytes to store Redzone information.
|
||||||
*/
|
*/
|
||||||
if ((flags & SLAB_RED_ZONE) && size == s->objsize)
|
if ((flags & SLAB_RED_ZONE) && size == s->object_size)
|
||||||
size += sizeof(void *);
|
size += sizeof(void *);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -2987,7 +2987,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
|||||||
* user specified and the dynamic determination of cache line size
|
* user specified and the dynamic determination of cache line size
|
||||||
* on bootup.
|
* on bootup.
|
||||||
*/
|
*/
|
||||||
align = calculate_alignment(flags, align, s->objsize);
|
align = calculate_alignment(flags, align, s->object_size);
|
||||||
s->align = align;
|
s->align = align;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3035,7 +3035,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||||||
memset(s, 0, kmem_size);
|
memset(s, 0, kmem_size);
|
||||||
s->name = name;
|
s->name = name;
|
||||||
s->ctor = ctor;
|
s->ctor = ctor;
|
||||||
s->objsize = size;
|
s->object_size = size;
|
||||||
s->align = align;
|
s->align = align;
|
||||||
s->flags = kmem_cache_flags(size, flags, name, ctor);
|
s->flags = kmem_cache_flags(size, flags, name, ctor);
|
||||||
s->reserved = 0;
|
s->reserved = 0;
|
||||||
@ -3050,7 +3050,7 @@ static int kmem_cache_open(struct kmem_cache *s,
|
|||||||
* Disable debugging flags that store metadata if the min slab
|
* Disable debugging flags that store metadata if the min slab
|
||||||
* order increased.
|
* order increased.
|
||||||
*/
|
*/
|
||||||
if (get_order(s->size) > get_order(s->objsize)) {
|
if (get_order(s->size) > get_order(s->object_size)) {
|
||||||
s->flags &= ~DEBUG_METADATA_FLAGS;
|
s->flags &= ~DEBUG_METADATA_FLAGS;
|
||||||
s->offset = 0;
|
s->offset = 0;
|
||||||
if (!calculate_sizes(s, -1))
|
if (!calculate_sizes(s, -1))
|
||||||
@ -3124,7 +3124,7 @@ error:
|
|||||||
*/
|
*/
|
||||||
unsigned int kmem_cache_size(struct kmem_cache *s)
|
unsigned int kmem_cache_size(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
return s->objsize;
|
return s->object_size;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_size);
|
EXPORT_SYMBOL(kmem_cache_size);
|
||||||
|
|
||||||
@ -3853,11 +3853,11 @@ void __init kmem_cache_init(void)
|
|||||||
|
|
||||||
if (s && s->size) {
|
if (s && s->size) {
|
||||||
char *name = kasprintf(GFP_NOWAIT,
|
char *name = kasprintf(GFP_NOWAIT,
|
||||||
"dma-kmalloc-%d", s->objsize);
|
"dma-kmalloc-%d", s->object_size);
|
||||||
|
|
||||||
BUG_ON(!name);
|
BUG_ON(!name);
|
||||||
kmalloc_dma_caches[i] = create_kmalloc_cache(name,
|
kmalloc_dma_caches[i] = create_kmalloc_cache(name,
|
||||||
s->objsize, SLAB_CACHE_DMA);
|
s->object_size, SLAB_CACHE_DMA);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -3951,7 +3951,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||||||
* Adjust the object sizes so that we clear
|
* Adjust the object sizes so that we clear
|
||||||
* the complete object on kzalloc.
|
* the complete object on kzalloc.
|
||||||
*/
|
*/
|
||||||
s->objsize = max(s->objsize, (int)size);
|
s->object_size = max(s->object_size, (int)size);
|
||||||
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
||||||
|
|
||||||
if (sysfs_slab_alias(s, name)) {
|
if (sysfs_slab_alias(s, name)) {
|
||||||
@ -4634,7 +4634,7 @@ SLAB_ATTR_RO(align);
|
|||||||
|
|
||||||
static ssize_t object_size_show(struct kmem_cache *s, char *buf)
|
static ssize_t object_size_show(struct kmem_cache *s, char *buf)
|
||||||
{
|
{
|
||||||
return sprintf(buf, "%d\n", s->objsize);
|
return sprintf(buf, "%d\n", s->object_size);
|
||||||
}
|
}
|
||||||
SLAB_ATTR_RO(object_size);
|
SLAB_ATTR_RO(object_size);
|
||||||
|
|
||||||
@ -5438,7 +5438,7 @@ __initcall(slab_sysfs_init);
|
|||||||
static void print_slabinfo_header(struct seq_file *m)
|
static void print_slabinfo_header(struct seq_file *m)
|
||||||
{
|
{
|
||||||
seq_puts(m, "slabinfo - version: 2.1\n");
|
seq_puts(m, "slabinfo - version: 2.1\n");
|
||||||
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
|
seq_puts(m, "# name <active_objs> <num_objs> <object_size> "
|
||||||
"<objperslab> <pagesperslab>");
|
"<objperslab> <pagesperslab>");
|
||||||
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
|
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
|
||||||
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
|
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
|
||||||
|
Loading…
Reference in New Issue
Block a user