mm/slub: make add_full() condition more explicit
The commit below is incomplete, as it didn't handle the add_full() part. commit a4d3f8916c65 ("slub: remove useless kmem_cache_debug() before remove_full()") This patch checks for SLAB_STORE_USER instead of kmem_cache_debug(), since that should be the only context in which we need the list_lock for add_full(). Signed-off-by: Abel Wu <wuyun.wu@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Liu Xiang <liu.xiang6@zte.com.cn> Link: https://lkml.kernel.org/r/20200811020240.1231-1-wuyun.wu@huawei.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
9f986d998a
commit
9cf7a11183
@ -2245,7 +2245,8 @@ redo:
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
m = M_FULL;
|
m = M_FULL;
|
||||||
if (kmem_cache_debug(s) && !lock) {
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
|
if ((s->flags & SLAB_STORE_USER) && !lock) {
|
||||||
lock = 1;
|
lock = 1;
|
||||||
/*
|
/*
|
||||||
* This also ensures that the scanning of full
|
* This also ensures that the scanning of full
|
||||||
@ -2254,6 +2255,7 @@ redo:
|
|||||||
*/
|
*/
|
||||||
spin_lock(&n->list_lock);
|
spin_lock(&n->list_lock);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (l != m) {
|
if (l != m) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user