Merge branch 'slab/for-6.2/cleanups' into slab/for-next
- Removal of dead code from deactivate_slab() by Hyeonggon Yoo. - Fix of BUILD_BUG_ON() for sufficient early percpu size by Baoquan He. - Make kmem_cache_alloc() kernel-doc less misleading, by myself.
This commit is contained in:
commit
4b28ba9eea
@ -42,7 +42,7 @@
|
|||||||
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
|
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
|
||||||
*/
|
*/
|
||||||
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
|
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
|
||||||
#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
|
#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
||||||
|
@ -441,7 +441,18 @@ static_assert(PAGE_SHIFT <= 20);
|
|||||||
#endif /* !CONFIG_SLOB */
|
#endif /* !CONFIG_SLOB */
|
||||||
|
|
||||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
|
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
|
||||||
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
|
|
||||||
|
/**
|
||||||
|
* kmem_cache_alloc - Allocate an object
|
||||||
|
* @cachep: The cache to allocate from.
|
||||||
|
* @flags: See kmalloc().
|
||||||
|
*
|
||||||
|
* Allocate an object from this cache.
|
||||||
|
* See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
|
||||||
|
*
|
||||||
|
* Return: pointer to the new object or %NULL in case of error
|
||||||
|
*/
|
||||||
|
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
|
||||||
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
|
void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
|
||||||
gfp_t gfpflags) __assume_slab_alignment __malloc;
|
gfp_t gfpflags) __assume_slab_alignment __malloc;
|
||||||
void kmem_cache_free(struct kmem_cache *s, void *objp);
|
void kmem_cache_free(struct kmem_cache *s, void *objp);
|
||||||
@ -483,9 +494,9 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
|
|||||||
__alloc_size(1);
|
__alloc_size(1);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kmalloc - allocate memory
|
* kmalloc - allocate kernel memory
|
||||||
* @size: how many bytes of memory are required.
|
* @size: how many bytes of memory are required.
|
||||||
* @flags: the type of memory to allocate.
|
* @flags: describe the allocation context
|
||||||
*
|
*
|
||||||
* kmalloc is the normal method of allocating memory
|
* kmalloc is the normal method of allocating memory
|
||||||
* for objects smaller than page size in the kernel.
|
* for objects smaller than page size in the kernel.
|
||||||
@ -512,12 +523,12 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
|
|||||||
* %GFP_ATOMIC
|
* %GFP_ATOMIC
|
||||||
* Allocation will not sleep. May use emergency pools.
|
* Allocation will not sleep. May use emergency pools.
|
||||||
*
|
*
|
||||||
* %GFP_HIGHUSER
|
|
||||||
* Allocate memory from high memory on behalf of user.
|
|
||||||
*
|
|
||||||
* Also it is possible to set different flags by OR'ing
|
* Also it is possible to set different flags by OR'ing
|
||||||
* in one or more of the following additional @flags:
|
* in one or more of the following additional @flags:
|
||||||
*
|
*
|
||||||
|
* %__GFP_ZERO
|
||||||
|
* Zero the allocated memory before returning. Also see kzalloc().
|
||||||
|
*
|
||||||
* %__GFP_HIGH
|
* %__GFP_HIGH
|
||||||
* This allocation has high priority and may use emergency pools.
|
* This allocation has high priority and may use emergency pools.
|
||||||
*
|
*
|
||||||
|
10
mm/slab.c
10
mm/slab.c
@ -3446,16 +3446,6 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* kmem_cache_alloc - Allocate an object
|
|
||||||
* @cachep: The cache to allocate from.
|
|
||||||
* @flags: See kmalloc().
|
|
||||||
*
|
|
||||||
* Allocate an object from this cache. The flags are only relevant
|
|
||||||
* if the cache has no available objects.
|
|
||||||
*
|
|
||||||
* Return: pointer to the new object or %NULL in case of error
|
|
||||||
*/
|
|
||||||
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
||||||
{
|
{
|
||||||
return __kmem_cache_alloc_lru(cachep, NULL, flags);
|
return __kmem_cache_alloc_lru(cachep, NULL, flags);
|
||||||
|
19
mm/slub.c
19
mm/slub.c
@ -2411,7 +2411,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
|
|||||||
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
||||||
void *freelist)
|
void *freelist)
|
||||||
{
|
{
|
||||||
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
|
enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
|
||||||
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
|
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
|
||||||
int free_delta = 0;
|
int free_delta = 0;
|
||||||
enum slab_modes mode = M_NONE;
|
enum slab_modes mode = M_NONE;
|
||||||
@ -2487,14 +2487,6 @@ redo:
|
|||||||
* acquire_slab() will see a slab that is frozen
|
* acquire_slab() will see a slab that is frozen
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&n->list_lock, flags);
|
spin_lock_irqsave(&n->list_lock, flags);
|
||||||
} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
|
|
||||||
mode = M_FULL;
|
|
||||||
/*
|
|
||||||
* This also ensures that the scanning of full
|
|
||||||
* slabs from diagnostic functions will not see
|
|
||||||
* any frozen slabs.
|
|
||||||
*/
|
|
||||||
spin_lock_irqsave(&n->list_lock, flags);
|
|
||||||
} else {
|
} else {
|
||||||
mode = M_FULL_NOLIST;
|
mode = M_FULL_NOLIST;
|
||||||
}
|
}
|
||||||
@ -2504,7 +2496,7 @@ redo:
|
|||||||
old.freelist, old.counters,
|
old.freelist, old.counters,
|
||||||
new.freelist, new.counters,
|
new.freelist, new.counters,
|
||||||
"unfreezing slab")) {
|
"unfreezing slab")) {
|
||||||
if (mode == M_PARTIAL || mode == M_FULL)
|
if (mode == M_PARTIAL)
|
||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||||
goto redo;
|
goto redo;
|
||||||
}
|
}
|
||||||
@ -2518,10 +2510,6 @@ redo:
|
|||||||
stat(s, DEACTIVATE_EMPTY);
|
stat(s, DEACTIVATE_EMPTY);
|
||||||
discard_slab(s, slab);
|
discard_slab(s, slab);
|
||||||
stat(s, FREE_SLAB);
|
stat(s, FREE_SLAB);
|
||||||
} else if (mode == M_FULL) {
|
|
||||||
add_full(s, n, slab);
|
|
||||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
|
||||||
stat(s, DEACTIVATE_FULL);
|
|
||||||
} else if (mode == M_FULL_NOLIST) {
|
} else if (mode == M_FULL_NOLIST) {
|
||||||
stat(s, DEACTIVATE_FULL);
|
stat(s, DEACTIVATE_FULL);
|
||||||
}
|
}
|
||||||
@ -4017,7 +4005,8 @@ init_kmem_cache_node(struct kmem_cache_node *n)
|
|||||||
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
|
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
|
BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
|
||||||
KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
|
NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
|
||||||
|
sizeof(struct kmem_cache_cpu));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must align to double word boundary for the double cmpxchg
|
* Must align to double word boundary for the double cmpxchg
|
||||||
|
Loading…
Reference in New Issue
Block a user