slub: explicit list_lock taking
The allocator fastpath rework does change the usage of the list_lock. Remove the list_lock processing from the functions that hide them from the critical sections and move them into those critical sections. This in turn simplifies the support functions (no __ variant needed anymore) and simplifies the lock handling on bootstrap. Inline add_partial since it becomes pretty simple. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
parent
b789ef518b
commit
5cc6eee8a8
97
mm/slub.c
97
mm/slub.c
@ -916,26 +916,27 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Tracking of fully allocated slabs for debugging purposes.
|
* Tracking of fully allocated slabs for debugging purposes.
|
||||||
|
*
|
||||||
|
* list_lock must be held.
|
||||||
*/
|
*/
|
||||||
static void add_full(struct kmem_cache_node *n, struct page *page)
|
static void add_full(struct kmem_cache *s,
|
||||||
|
struct kmem_cache_node *n, struct page *page)
|
||||||
{
|
{
|
||||||
spin_lock(&n->list_lock);
|
|
||||||
list_add(&page->lru, &n->full);
|
|
||||||
spin_unlock(&n->list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void remove_full(struct kmem_cache *s, struct page *page)
|
|
||||||
{
|
|
||||||
struct kmem_cache_node *n;
|
|
||||||
|
|
||||||
if (!(s->flags & SLAB_STORE_USER))
|
if (!(s->flags & SLAB_STORE_USER))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
n = get_node(s, page_to_nid(page));
|
list_add(&page->lru, &n->full);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* list_lock must be held.
|
||||||
|
*/
|
||||||
|
static void remove_full(struct kmem_cache *s, struct page *page)
|
||||||
|
{
|
||||||
|
if (!(s->flags & SLAB_STORE_USER))
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock(&n->list_lock);
|
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
spin_unlock(&n->list_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Tracking of the number of slabs for debugging purposes */
|
/* Tracking of the number of slabs for debugging purposes */
|
||||||
@ -1060,8 +1061,13 @@ static noinline int free_debug_processing(struct kmem_cache *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Special debug activities for freeing objects */
|
/* Special debug activities for freeing objects */
|
||||||
if (!page->frozen && !page->freelist)
|
if (!page->frozen && !page->freelist) {
|
||||||
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||||
|
|
||||||
|
spin_lock(&n->list_lock);
|
||||||
remove_full(s, page);
|
remove_full(s, page);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
|
}
|
||||||
if (s->flags & SLAB_STORE_USER)
|
if (s->flags & SLAB_STORE_USER)
|
||||||
set_track(s, object, TRACK_FREE, addr);
|
set_track(s, object, TRACK_FREE, addr);
|
||||||
trace(s, page, object, 0);
|
trace(s, page, object, 0);
|
||||||
@ -1170,7 +1176,8 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
|
|||||||
{ return 1; }
|
{ return 1; }
|
||||||
static inline int check_object(struct kmem_cache *s, struct page *page,
|
static inline int check_object(struct kmem_cache *s, struct page *page,
|
||||||
void *object, u8 val) { return 1; }
|
void *object, u8 val) { return 1; }
|
||||||
static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
|
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
|
||||||
|
struct page *page) {}
|
||||||
static inline unsigned long kmem_cache_flags(unsigned long objsize,
|
static inline unsigned long kmem_cache_flags(unsigned long objsize,
|
||||||
unsigned long flags, const char *name,
|
unsigned long flags, const char *name,
|
||||||
void (*ctor)(void *))
|
void (*ctor)(void *))
|
||||||
@ -1420,38 +1427,33 @@ static __always_inline int slab_trylock(struct page *page)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Management of partially allocated slabs
|
* Management of partially allocated slabs.
|
||||||
|
*
|
||||||
|
* list_lock must be held.
|
||||||
*/
|
*/
|
||||||
static void add_partial(struct kmem_cache_node *n,
|
static inline void add_partial(struct kmem_cache_node *n,
|
||||||
struct page *page, int tail)
|
struct page *page, int tail)
|
||||||
{
|
{
|
||||||
spin_lock(&n->list_lock);
|
|
||||||
n->nr_partial++;
|
n->nr_partial++;
|
||||||
if (tail)
|
if (tail)
|
||||||
list_add_tail(&page->lru, &n->partial);
|
list_add_tail(&page->lru, &n->partial);
|
||||||
else
|
else
|
||||||
list_add(&page->lru, &n->partial);
|
list_add(&page->lru, &n->partial);
|
||||||
spin_unlock(&n->list_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __remove_partial(struct kmem_cache_node *n,
|
/*
|
||||||
|
* list_lock must be held.
|
||||||
|
*/
|
||||||
|
static inline void remove_partial(struct kmem_cache_node *n,
|
||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
n->nr_partial--;
|
n->nr_partial--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void remove_partial(struct kmem_cache *s, struct page *page)
|
|
||||||
{
|
|
||||||
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
|
||||||
|
|
||||||
spin_lock(&n->list_lock);
|
|
||||||
__remove_partial(n, page);
|
|
||||||
spin_unlock(&n->list_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Lock slab and remove from the partial list.
|
* Lock slab, remove from the partial list and put the object into the
|
||||||
|
* per cpu freelist.
|
||||||
*
|
*
|
||||||
* Must hold list_lock.
|
* Must hold list_lock.
|
||||||
*/
|
*/
|
||||||
@ -1459,7 +1461,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
|
|||||||
struct page *page)
|
struct page *page)
|
||||||
{
|
{
|
||||||
if (slab_trylock(page)) {
|
if (slab_trylock(page)) {
|
||||||
__remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -1576,12 +1578,17 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|||||||
if (page->inuse) {
|
if (page->inuse) {
|
||||||
|
|
||||||
if (page->freelist) {
|
if (page->freelist) {
|
||||||
|
spin_lock(&n->list_lock);
|
||||||
add_partial(n, page, tail);
|
add_partial(n, page, tail);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
||||||
} else {
|
} else {
|
||||||
stat(s, DEACTIVATE_FULL);
|
stat(s, DEACTIVATE_FULL);
|
||||||
if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
|
if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER)) {
|
||||||
add_full(n, page);
|
spin_lock(&n->list_lock);
|
||||||
|
add_full(s, n, page);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
} else {
|
} else {
|
||||||
@ -1597,7 +1604,9 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
|
|||||||
* kmem_cache_shrink can reclaim any empty slabs from
|
* kmem_cache_shrink can reclaim any empty slabs from
|
||||||
* the partial list.
|
* the partial list.
|
||||||
*/
|
*/
|
||||||
|
spin_lock(&n->list_lock);
|
||||||
add_partial(n, page, 1);
|
add_partial(n, page, 1);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
} else {
|
} else {
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
@ -2099,7 +2108,11 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
|||||||
* then add it.
|
* then add it.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!prior)) {
|
if (unlikely(!prior)) {
|
||||||
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||||
|
|
||||||
|
spin_lock(&n->list_lock);
|
||||||
add_partial(get_node(s, page_to_nid(page)), page, 1);
|
add_partial(get_node(s, page_to_nid(page)), page, 1);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
stat(s, FREE_ADD_PARTIAL);
|
stat(s, FREE_ADD_PARTIAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2113,7 +2126,11 @@ slab_empty:
|
|||||||
/*
|
/*
|
||||||
* Slab still on the partial list.
|
* Slab still on the partial list.
|
||||||
*/
|
*/
|
||||||
remove_partial(s, page);
|
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
|
||||||
|
|
||||||
|
spin_lock(&n->list_lock);
|
||||||
|
remove_partial(n, page);
|
||||||
|
spin_unlock(&n->list_lock);
|
||||||
stat(s, FREE_REMOVE_PARTIAL);
|
stat(s, FREE_REMOVE_PARTIAL);
|
||||||
}
|
}
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
@ -2391,7 +2408,6 @@ static void early_kmem_cache_node_alloc(int node)
|
|||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
struct kmem_cache_node *n;
|
struct kmem_cache_node *n;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
|
BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
|
||||||
|
|
||||||
@ -2418,14 +2434,7 @@ static void early_kmem_cache_node_alloc(int node)
|
|||||||
init_kmem_cache_node(n, kmem_cache_node);
|
init_kmem_cache_node(n, kmem_cache_node);
|
||||||
inc_slabs_node(kmem_cache_node, node, page->objects);
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
||||||
|
|
||||||
/*
|
|
||||||
* lockdep requires consistent irq usage for each lock
|
|
||||||
* so even though there cannot be a race this early in
|
|
||||||
* the boot sequence, we still disable irqs.
|
|
||||||
*/
|
|
||||||
local_irq_save(flags);
|
|
||||||
add_partial(n, page, 0);
|
add_partial(n, page, 0);
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
||||||
@ -2709,7 +2718,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
|
|||||||
spin_lock_irqsave(&n->list_lock, flags);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
list_for_each_entry_safe(page, h, &n->partial, lru) {
|
||||||
if (!page->inuse) {
|
if (!page->inuse) {
|
||||||
__remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
} else {
|
} else {
|
||||||
list_slab_objects(s, page,
|
list_slab_objects(s, page,
|
||||||
@ -3047,7 +3056,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
|
|||||||
* may have freed the last object and be
|
* may have freed the last object and be
|
||||||
* waiting to release the slab.
|
* waiting to release the slab.
|
||||||
*/
|
*/
|
||||||
__remove_partial(n, page);
|
remove_partial(n, page);
|
||||||
slab_unlock(page);
|
slab_unlock(page);
|
||||||
discard_slab(s, page);
|
discard_slab(s, page);
|
||||||
} else {
|
} else {
|
||||||
|
Loading…
Reference in New Issue
Block a user