mm, slub: return slab page from get_partial() and set c->page afterwards

The function get_partial() finds a suitable page on a partial list, acquires
and returns its freelist and assigns the page pointer to kmem_cache_cpu.
In later patch we will need more control over the kmem_cache_cpu.page
assignment, so instead of passing a kmem_cache_cpu pointer, pass a pointer to a
pointer to a page that get_partial() can fill and the caller can assign the
kmem_cache_cpu.page pointer. No functional change as all of this still happens
with disabled IRQs.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Vlastimil Babka 2021-05-11 14:05:22 +02:00
parent 53a0de06e5
commit 75c8ff281d

View File

@ -2017,7 +2017,7 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
* Try to allocate a partial slab from a specific node. * Try to allocate a partial slab from a specific node.
*/ */
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
struct kmem_cache_cpu *c, gfp_t flags) struct page **ret_page, gfp_t flags)
{ {
struct page *page, *page2; struct page *page, *page2;
void *object = NULL; void *object = NULL;
@ -2046,7 +2046,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
available += objects; available += objects;
if (!object) { if (!object) {
c->page = page; *ret_page = page;
stat(s, ALLOC_FROM_PARTIAL); stat(s, ALLOC_FROM_PARTIAL);
object = t; object = t;
} else { } else {
@ -2066,7 +2066,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
* Get a page from somewhere. Search in increasing NUMA distances. * Get a page from somewhere. Search in increasing NUMA distances.
*/ */
static void *get_any_partial(struct kmem_cache *s, gfp_t flags, static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
struct kmem_cache_cpu *c) struct page **ret_page)
{ {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
struct zonelist *zonelist; struct zonelist *zonelist;
@ -2108,7 +2108,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
if (n && cpuset_zone_allowed(zone, flags) && if (n && cpuset_zone_allowed(zone, flags) &&
n->nr_partial > s->min_partial) { n->nr_partial > s->min_partial) {
object = get_partial_node(s, n, c, flags); object = get_partial_node(s, n, ret_page, flags);
if (object) { if (object) {
/* /*
* Don't check read_mems_allowed_retry() * Don't check read_mems_allowed_retry()
@ -2130,7 +2130,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
* Get a partial page, lock it and return it. * Get a partial page, lock it and return it.
*/ */
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
struct kmem_cache_cpu *c) struct page **ret_page)
{ {
void *object; void *object;
int searchnode = node; int searchnode = node;
@ -2138,11 +2138,11 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
if (node == NUMA_NO_NODE) if (node == NUMA_NO_NODE)
searchnode = numa_mem_id(); searchnode = numa_mem_id();
object = get_partial_node(s, get_node(s, searchnode), c, flags); object = get_partial_node(s, get_node(s, searchnode), ret_page, flags);
if (object || node != NUMA_NO_NODE) if (object || node != NUMA_NO_NODE)
return object; return object;
return get_any_partial(s, flags, c); return get_any_partial(s, flags, ret_page);
} }
#ifdef CONFIG_PREEMPTION #ifdef CONFIG_PREEMPTION
@ -2754,9 +2754,11 @@ new_slab:
goto redo; goto redo;
} }
freelist = get_partial(s, gfpflags, node, c); freelist = get_partial(s, gfpflags, node, &page);
if (freelist) if (freelist) {
c->page = page;
goto check_new_page; goto check_new_page;
}
page = new_slab(s, gfpflags, node); page = new_slab(s, gfpflags, node);
@ -2780,7 +2782,6 @@ new_slab:
c->page = page; c->page = page;
check_new_page: check_new_page:
page = c->page;
if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags))) if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist; goto load_freelist;