Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: slub: Allow removal of slab caches during boot Revert "slub: Allow removal of slab caches during boot" slub numa: Fix rare allocation from unexpected node slab: use deferable timers for its periodic housekeeping slub: Use kmem_cache flags to detect if slab is in debugging mode. slub: Allow removal of slab caches during boot slub: Check kasprintf results in kmem_cache_init() SLUB: Constants need UL slub: Use a constant for a unspecified node. SLOB: Free objects to their own list slab: fix caller tracking on !CONFIG_DEBUG_SLAB && CONFIG_TRACING
This commit is contained in:
commit
b57bdda58c
@ -128,7 +128,6 @@ enum pageflags {
|
|||||||
|
|
||||||
/* SLUB */
|
/* SLUB */
|
||||||
PG_slub_frozen = PG_active,
|
PG_slub_frozen = PG_active,
|
||||||
PG_slub_debug = PG_error,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifndef __GENERATING_BOUNDS_H
|
#ifndef __GENERATING_BOUNDS_H
|
||||||
@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
|
|||||||
__PAGEFLAG(SlobFree, slob_free)
|
__PAGEFLAG(SlobFree, slob_free)
|
||||||
|
|
||||||
__PAGEFLAG(SlubFrozen, slub_frozen)
|
__PAGEFLAG(SlubFrozen, slub_frozen)
|
||||||
__PAGEFLAG(SlubDebug, slub_debug)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Private page markings that may be used by the filesystem that owns the page
|
* Private page markings that may be used by the filesystem that owns the page
|
||||||
|
@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
|
|||||||
* allocator where we care about the real place the memory allocation
|
* allocator where we care about the real place the memory allocation
|
||||||
* request comes from.
|
* request comes from.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
|
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
|
||||||
|
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
|
||||||
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
||||||
#define kmalloc_track_caller(size, flags) \
|
#define kmalloc_track_caller(size, flags) \
|
||||||
__kmalloc_track_caller(size, flags, _RET_IP_)
|
__kmalloc_track_caller(size, flags, _RET_IP_)
|
||||||
@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
|
|||||||
* standard allocator where we care about the real place the memory
|
* standard allocator where we care about the real place the memory
|
||||||
* allocation request comes from.
|
* allocation request comes from.
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
|
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
|
||||||
|
(defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
|
||||||
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
|
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
|
||||||
#define kmalloc_node_track_caller(size, flags, node) \
|
#define kmalloc_node_track_caller(size, flags, node) \
|
||||||
__kmalloc_node_track_caller(size, flags, node, \
|
__kmalloc_node_track_caller(size, flags, node, \
|
||||||
|
@ -860,7 +860,7 @@ static void __cpuinit start_cpu_timer(int cpu)
|
|||||||
*/
|
*/
|
||||||
if (keventd_up() && reap_work->work.func == NULL) {
|
if (keventd_up() && reap_work->work.func == NULL) {
|
||||||
init_reap_node(cpu);
|
init_reap_node(cpu);
|
||||||
INIT_DELAYED_WORK(reap_work, cache_reap);
|
INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
|
||||||
schedule_delayed_work_on(cpu, reap_work,
|
schedule_delayed_work_on(cpu, reap_work,
|
||||||
__round_jiffies_relative(HZ, cpu));
|
__round_jiffies_relative(HZ, cpu));
|
||||||
}
|
}
|
||||||
|
@ -396,6 +396,7 @@ static void slob_free(void *block, int size)
|
|||||||
slob_t *prev, *next, *b = (slob_t *)block;
|
slob_t *prev, *next, *b = (slob_t *)block;
|
||||||
slobidx_t units;
|
slobidx_t units;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct list_head *slob_list;
|
||||||
|
|
||||||
if (unlikely(ZERO_OR_NULL_PTR(block)))
|
if (unlikely(ZERO_OR_NULL_PTR(block)))
|
||||||
return;
|
return;
|
||||||
@ -424,7 +425,13 @@ static void slob_free(void *block, int size)
|
|||||||
set_slob(b, units,
|
set_slob(b, units,
|
||||||
(void *)((unsigned long)(b +
|
(void *)((unsigned long)(b +
|
||||||
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
|
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
|
||||||
set_slob_page_free(sp, &free_slob_small);
|
if (size < SLOB_BREAK1)
|
||||||
|
slob_list = &free_slob_small;
|
||||||
|
else if (size < SLOB_BREAK2)
|
||||||
|
slob_list = &free_slob_medium;
|
||||||
|
else
|
||||||
|
slob_list = &free_slob_large;
|
||||||
|
set_slob_page_free(sp, slob_list);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
86
mm/slub.c
86
mm/slub.c
@ -106,11 +106,17 @@
|
|||||||
* the fast path and disables lockless freelists.
|
* the fast path and disables lockless freelists.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
|
||||||
|
SLAB_TRACE | SLAB_DEBUG_FREE)
|
||||||
|
|
||||||
|
static inline int kmem_cache_debug(struct kmem_cache *s)
|
||||||
|
{
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
#define SLABDEBUG 1
|
return unlikely(s->flags & SLAB_DEBUG_FLAGS);
|
||||||
#else
|
#else
|
||||||
#define SLABDEBUG 0
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Issues still to be resolved:
|
* Issues still to be resolved:
|
||||||
@ -161,8 +167,8 @@
|
|||||||
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
|
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
|
||||||
|
|
||||||
/* Internal SLUB flags */
|
/* Internal SLUB flags */
|
||||||
#define __OBJECT_POISON 0x80000000 /* Poison object */
|
#define __OBJECT_POISON 0x80000000UL /* Poison object */
|
||||||
#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
|
#define __SYSFS_ADD_DEFERRED 0x40000000UL /* Not yet visible via sysfs */
|
||||||
|
|
||||||
static int kmem_size = sizeof(struct kmem_cache);
|
static int kmem_size = sizeof(struct kmem_cache);
|
||||||
|
|
||||||
@ -1072,7 +1078,7 @@ static inline struct page *alloc_slab_page(gfp_t flags, int node,
|
|||||||
|
|
||||||
flags |= __GFP_NOTRACK;
|
flags |= __GFP_NOTRACK;
|
||||||
|
|
||||||
if (node == -1)
|
if (node == NUMA_NO_NODE)
|
||||||
return alloc_pages(flags, order);
|
return alloc_pages(flags, order);
|
||||||
else
|
else
|
||||||
return alloc_pages_exact_node(node, flags, order);
|
return alloc_pages_exact_node(node, flags, order);
|
||||||
@ -1156,9 +1162,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||||||
inc_slabs_node(s, page_to_nid(page), page->objects);
|
inc_slabs_node(s, page_to_nid(page), page->objects);
|
||||||
page->slab = s;
|
page->slab = s;
|
||||||
page->flags |= 1 << PG_slab;
|
page->flags |= 1 << PG_slab;
|
||||||
if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
|
|
||||||
SLAB_STORE_USER | SLAB_TRACE))
|
|
||||||
__SetPageSlubDebug(page);
|
|
||||||
|
|
||||||
start = page_address(page);
|
start = page_address(page);
|
||||||
|
|
||||||
@ -1185,14 +1188,13 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|||||||
int order = compound_order(page);
|
int order = compound_order(page);
|
||||||
int pages = 1 << order;
|
int pages = 1 << order;
|
||||||
|
|
||||||
if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
|
if (kmem_cache_debug(s)) {
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
slab_pad_check(s, page);
|
slab_pad_check(s, page);
|
||||||
for_each_object(p, s, page_address(page),
|
for_each_object(p, s, page_address(page),
|
||||||
page->objects)
|
page->objects)
|
||||||
check_object(s, page, p, 0);
|
check_object(s, page, p, 0);
|
||||||
__ClearPageSlubDebug(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kmemcheck_free_shadow(page, compound_order(page));
|
kmemcheck_free_shadow(page, compound_order(page));
|
||||||
@ -1386,10 +1388,10 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
|
|||||||
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int searchnode = (node == -1) ? numa_node_id() : node;
|
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
|
||||||
|
|
||||||
page = get_partial_node(get_node(s, searchnode));
|
page = get_partial_node(get_node(s, searchnode));
|
||||||
if (page || (flags & __GFP_THISNODE))
|
if (page || node != -1)
|
||||||
return page;
|
return page;
|
||||||
|
|
||||||
return get_any_partial(s, flags);
|
return get_any_partial(s, flags);
|
||||||
@ -1414,8 +1416,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|||||||
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
||||||
} else {
|
} else {
|
||||||
stat(s, DEACTIVATE_FULL);
|
stat(s, DEACTIVATE_FULL);
|
||||||
if (SLABDEBUG && PageSlubDebug(page) &&
|
if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
|
||||||
(s->flags & SLAB_STORE_USER))
|
|
||||||
add_full(n, page);
|
add_full(n, page);
|
||||||
}
|
}
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
@ -1514,7 +1515,7 @@ static void flush_all(struct kmem_cache *s)
|
|||||||
static inline int node_match(struct kmem_cache_cpu *c, int node)
|
static inline int node_match(struct kmem_cache_cpu *c, int node)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
if (node != -1 && c->node != node)
|
if (node != NUMA_NO_NODE && c->node != node)
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
return 1;
|
return 1;
|
||||||
@ -1623,7 +1624,7 @@ load_freelist:
|
|||||||
object = c->page->freelist;
|
object = c->page->freelist;
|
||||||
if (unlikely(!object))
|
if (unlikely(!object))
|
||||||
goto another_slab;
|
goto another_slab;
|
||||||
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
|
if (kmem_cache_debug(s))
|
||||||
goto debug;
|
goto debug;
|
||||||
|
|
||||||
c->freelist = get_freepointer(s, object);
|
c->freelist = get_freepointer(s, object);
|
||||||
@ -1726,7 +1727,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
|
|||||||
|
|
||||||
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
|
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
|
||||||
{
|
{
|
||||||
void *ret = slab_alloc(s, gfpflags, -1, _RET_IP_);
|
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
|
||||||
|
|
||||||
trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
|
trace_kmem_cache_alloc(_RET_IP_, ret, s->objsize, s->size, gfpflags);
|
||||||
|
|
||||||
@ -1737,7 +1738,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
|
|||||||
#ifdef CONFIG_TRACING
|
#ifdef CONFIG_TRACING
|
||||||
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
||||||
{
|
{
|
||||||
return slab_alloc(s, gfpflags, -1, _RET_IP_);
|
return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
|
||||||
#endif
|
#endif
|
||||||
@ -1782,7 +1783,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
stat(s, FREE_SLOWPATH);
|
stat(s, FREE_SLOWPATH);
|
||||||
slab_lock(page);
|
slab_lock(page);
|
||||||
|
|
||||||
if (unlikely(SLABDEBUG && PageSlubDebug(page)))
|
if (kmem_cache_debug(s))
|
||||||
goto debug;
|
goto debug;
|
||||||
|
|
||||||
checks_ok:
|
checks_ok:
|
||||||
@ -2489,7 +2490,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|||||||
s->refcount--;
|
s->refcount--;
|
||||||
if (!s->refcount) {
|
if (!s->refcount) {
|
||||||
list_del(&s->list);
|
list_del(&s->list);
|
||||||
up_write(&slub_lock);
|
|
||||||
if (kmem_cache_close(s)) {
|
if (kmem_cache_close(s)) {
|
||||||
printk(KERN_ERR "SLUB %s: %s called for cache that "
|
printk(KERN_ERR "SLUB %s: %s called for cache that "
|
||||||
"still has objects.\n", s->name, __func__);
|
"still has objects.\n", s->name, __func__);
|
||||||
@ -2498,8 +2498,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
|
|||||||
if (s->flags & SLAB_DESTROY_BY_RCU)
|
if (s->flags & SLAB_DESTROY_BY_RCU)
|
||||||
rcu_barrier();
|
rcu_barrier();
|
||||||
sysfs_slab_remove(s);
|
sysfs_slab_remove(s);
|
||||||
} else
|
}
|
||||||
up_write(&slub_lock);
|
up_write(&slub_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_destroy);
|
EXPORT_SYMBOL(kmem_cache_destroy);
|
||||||
|
|
||||||
@ -2727,7 +2727,7 @@ void *__kmalloc(size_t size, gfp_t flags)
|
|||||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||||
return s;
|
return s;
|
||||||
|
|
||||||
ret = slab_alloc(s, flags, -1, _RET_IP_);
|
ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
|
||||||
|
|
||||||
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
|
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
|
||||||
|
|
||||||
@ -3117,9 +3117,12 @@ void __init kmem_cache_init(void)
|
|||||||
slab_state = UP;
|
slab_state = UP;
|
||||||
|
|
||||||
/* Provide the correct kmalloc names now that the caches are up */
|
/* Provide the correct kmalloc names now that the caches are up */
|
||||||
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
|
for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
|
||||||
kmalloc_caches[i]. name =
|
char *s = kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
|
||||||
kasprintf(GFP_NOWAIT, "kmalloc-%d", 1 << i);
|
|
||||||
|
BUG_ON(!s);
|
||||||
|
kmalloc_caches[i].name = s;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
register_cpu_notifier(&slab_notifier);
|
register_cpu_notifier(&slab_notifier);
|
||||||
@ -3222,14 +3225,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||||||
*/
|
*/
|
||||||
s->objsize = max(s->objsize, (int)size);
|
s->objsize = max(s->objsize, (int)size);
|
||||||
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
|
||||||
up_write(&slub_lock);
|
|
||||||
|
|
||||||
if (sysfs_slab_alias(s, name)) {
|
if (sysfs_slab_alias(s, name)) {
|
||||||
down_write(&slub_lock);
|
|
||||||
s->refcount--;
|
s->refcount--;
|
||||||
up_write(&slub_lock);
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
up_write(&slub_lock);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3238,14 +3239,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
|
|||||||
if (kmem_cache_open(s, GFP_KERNEL, name,
|
if (kmem_cache_open(s, GFP_KERNEL, name,
|
||||||
size, align, flags, ctor)) {
|
size, align, flags, ctor)) {
|
||||||
list_add(&s->list, &slab_caches);
|
list_add(&s->list, &slab_caches);
|
||||||
up_write(&slub_lock);
|
|
||||||
if (sysfs_slab_add(s)) {
|
if (sysfs_slab_add(s)) {
|
||||||
down_write(&slub_lock);
|
|
||||||
list_del(&s->list);
|
list_del(&s->list);
|
||||||
up_write(&slub_lock);
|
|
||||||
kfree(s);
|
kfree(s);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
up_write(&slub_lock);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
kfree(s);
|
kfree(s);
|
||||||
@ -3311,7 +3310,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
|
|||||||
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
if (unlikely(ZERO_OR_NULL_PTR(s)))
|
||||||
return s;
|
return s;
|
||||||
|
|
||||||
ret = slab_alloc(s, gfpflags, -1, caller);
|
ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
|
||||||
|
|
||||||
/* Honor the call site pointer we recieved. */
|
/* Honor the call site pointer we recieved. */
|
||||||
trace_kmalloc(caller, ret, size, s->size, gfpflags);
|
trace_kmalloc(caller, ret, size, s->size, gfpflags);
|
||||||
@ -3394,16 +3393,6 @@ static void validate_slab_slab(struct kmem_cache *s, struct page *page,
|
|||||||
} else
|
} else
|
||||||
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
|
printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
|
||||||
s->name, page);
|
s->name, page);
|
||||||
|
|
||||||
if (s->flags & DEBUG_DEFAULT_FLAGS) {
|
|
||||||
if (!PageSlubDebug(page))
|
|
||||||
printk(KERN_ERR "SLUB %s: SlubDebug not set "
|
|
||||||
"on slab 0x%p\n", s->name, page);
|
|
||||||
} else {
|
|
||||||
if (PageSlubDebug(page))
|
|
||||||
printk(KERN_ERR "SLUB %s: SlubDebug set on "
|
|
||||||
"slab 0x%p\n", s->name, page);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int validate_slab_node(struct kmem_cache *s,
|
static int validate_slab_node(struct kmem_cache *s,
|
||||||
@ -4503,6 +4492,13 @@ static int sysfs_slab_add(struct kmem_cache *s)
|
|||||||
|
|
||||||
static void sysfs_slab_remove(struct kmem_cache *s)
|
static void sysfs_slab_remove(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
|
if (slab_state < SYSFS)
|
||||||
|
/*
|
||||||
|
* Sysfs has not been setup yet so no need to remove the
|
||||||
|
* cache from sysfs.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
|
||||||
kobject_uevent(&s->kobj, KOBJ_REMOVE);
|
kobject_uevent(&s->kobj, KOBJ_REMOVE);
|
||||||
kobject_del(&s->kobj);
|
kobject_del(&s->kobj);
|
||||||
kobject_put(&s->kobj);
|
kobject_put(&s->kobj);
|
||||||
@ -4548,8 +4544,11 @@ static int __init slab_sysfs_init(void)
|
|||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
down_write(&slub_lock);
|
||||||
|
|
||||||
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
|
slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
|
||||||
if (!slab_kset) {
|
if (!slab_kset) {
|
||||||
|
up_write(&slub_lock);
|
||||||
printk(KERN_ERR "Cannot register slab subsystem.\n");
|
printk(KERN_ERR "Cannot register slab subsystem.\n");
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
@ -4574,6 +4573,7 @@ static int __init slab_sysfs_init(void)
|
|||||||
kfree(al);
|
kfree(al);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up_write(&slub_lock);
|
||||||
resiliency_test();
|
resiliency_test();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user