slab updates for 5.20/6.0

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEjUuTAak14xi+SF7M4CHKc/GJqRAFAmLnqTQACgkQ4CHKc/GJ
 qRBnBwgAohP0MXszRnhGEKKTmLBtsEyPrV0OBEIlz3MnDYBYfnDLd5JdSMMA+1jp
 sT80QWYKPMr10WKWX5vPjhIYRIfgWchEYND/93DnJYC6Fdap/D0hDd6tIQEKnxpN
 YeGZHck6orj9L2HfazJo7qpt//Th5mM8WRTN9OIiFdKPYOvlm7DT51wukVLnK9fA
 WoWrx3CsyIh6unvAC6AMOVFt7ZJOfD6muMQsGmkcpp1sJLeM1Ofoe8l+h5oSrFZQ
 CrdV4XXrprVi7JhqvSX4alRnF5vmOAVKVXhBLZ3A/3uTou2Bhic6n68chyb/x2RE
 FhwmsXS+v7jsOI0PV4gNzwT+sp+01w==
 =y2kQ
 -----END PGP SIGNATURE-----

Merge tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab

Pull slab updates from Vlastimil Babka:

 - An addition of 'accounted' flag to slab allocation tracepoints to
   indicate memcg_kmem accounting, by Vasily

 - An optimization of memcg handling in freeing paths, by Muchun

 - Various smaller fixes and cleanups

* tag 'slab-for-5.20_or_6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
  mm/slab_common: move generic bulk alloc/free functions to SLOB
  mm/sl[au]b: use own bulk free function when bulk alloc failed
  mm: slab: optimize memcg_slab_free_hook()
  mm/tracing: add 'accounted' entry into output of allocation tracepoints
  tools/vm/slabinfo: Handle files in debugfs
  mm/slub: Simplify __kmem_cache_alias()
  mm, slab: fix bad alignments
This commit is contained in:
Linus Torvalds 2022-08-01 11:46:58 -07:00
commit 0dd1cabe8a
7 changed files with 134 additions and 158 deletions

View File

@ -13,11 +13,12 @@ DECLARE_EVENT_CLASS(kmem_alloc,
TP_PROTO(unsigned long call_site,
const void *ptr,
struct kmem_cache *s,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@ -25,6 +26,7 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__field( size_t, bytes_req )
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
__field( bool, accounted )
),
TP_fast_assign(
@ -33,42 +35,47 @@ DECLARE_EVENT_CLASS(kmem_alloc,
__entry->bytes_req = bytes_req;
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s && s->flags & SLAB_ACCOUNT)) : false;
),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags))
show_gfp_flags(__entry->gfp_flags),
__entry->accounted ? "true" : "false")
);
DEFINE_EVENT(kmem_alloc, kmalloc,
TP_PROTO(unsigned long call_site, const void *ptr,
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
TP_PROTO(unsigned long call_site, const void *ptr,
TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);
DECLARE_EVENT_CLASS(kmem_alloc_node,
TP_PROTO(unsigned long call_site,
const void *ptr,
struct kmem_cache *s,
size_t bytes_req,
size_t bytes_alloc,
gfp_t gfp_flags,
int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node),
TP_STRUCT__entry(
__field( unsigned long, call_site )
@ -77,6 +84,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__field( size_t, bytes_alloc )
__field( unsigned long, gfp_flags )
__field( int, node )
__field( bool, accounted )
),
TP_fast_assign(
@ -86,33 +94,37 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->bytes_alloc = bytes_alloc;
__entry->gfp_flags = (__force unsigned long)gfp_flags;
__entry->node = node;
__entry->accounted = IS_ENABLED(CONFIG_MEMCG_KMEM) ?
((gfp_flags & __GFP_ACCOUNT) ||
(s && s->flags & SLAB_ACCOUNT)) : false;
),
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s",
(void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
show_gfp_flags(__entry->gfp_flags),
__entry->node)
__entry->node,
__entry->accounted ? "true" : "false")
);
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc,
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
TP_PROTO(unsigned long call_site, const void *ptr,
size_t bytes_req, size_t bytes_alloc,
struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
gfp_t gfp_flags, int node),
TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);
TRACE_EVENT(kfree,

View File

@ -3230,7 +3230,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, size_t orig_
}
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
init = slab_want_init_on_alloc(flags, cachep);
@ -3259,7 +3259,7 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_mem_id());
out:
out:
return objp;
}
#else
@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
{
bool init;
memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);
if (is_kfence_address(objp)) {
kmemleak_free_recursive(objp, cachep->flags);
memcg_slab_free_hook(cachep, &objp, 1);
__kfence_free(objp);
return;
}
@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, caller);
memcg_slab_free_hook(cachep, &objp, 1);
/*
* Skip calling cache_free_alien() when the platform is not numa.
@ -3478,7 +3478,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret,
trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
cachep->object_size, cachep->size, flags);
return ret;
@ -3553,7 +3553,7 @@ error:
local_irq_enable();
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
slab_post_alloc_hook(s, objcg, flags, i, p, false);
__kmem_cache_free_bulk(s, i, p);
kmem_cache_free_bulk(s, i, p);
return 0;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@ -3567,7 +3567,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
trace_kmalloc(_RET_IP_, ret, cachep,
size, cachep->size, flags);
return ret;
}
@ -3592,7 +3592,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, cachep->object_size, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
cachep->object_size, cachep->size,
flags, nodeid);
@ -3611,7 +3611,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, flags, nodeid, size, _RET_IP_);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
trace_kmalloc_node(_RET_IP_, ret, cachep,
size, cachep->size,
flags, nodeid);
return ret;
@ -3694,7 +3694,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
ret = slab_alloc(cachep, NULL, flags, size, caller);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
trace_kmalloc(caller, ret, cachep,
size, cachep->size, flags);
return ret;

View File

@ -380,15 +380,6 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos);
/*
* Generic implementation of bulk operations
* These are useful for situations in which the allocator cannot
* perform optimizations. In that case segments of the object listed
* may be allocated or freed using these operations.
*/
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
{
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
@ -547,36 +538,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
obj_cgroup_put(objcg);
}
static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
struct kmem_cache *s;
struct obj_cgroup **objcgs;
struct obj_cgroup *objcg;
struct slab *slab;
unsigned int off;
int i;
if (!memcg_kmem_enabled())
return;
objcgs = slab_objcgs(slab);
if (!objcgs)
return;
for (i = 0; i < objects; i++) {
if (unlikely(!p[i]))
continue;
slab = virt_to_slab(p[i]);
/* we could be given a kmalloc_large() object, skip those */
if (!slab)
continue;
objcgs = slab_objcgs(slab);
if (!objcgs)
continue;
if (!s_orig)
s = slab->slab_cache;
else
s = s_orig;
struct obj_cgroup *objcg;
unsigned int off;
off = obj_to_index(s, slab, p[i]);
objcg = objcgs[off];
@ -628,7 +605,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
{
}
static inline void memcg_slab_free_hook(struct kmem_cache *s,
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
void **p, int objects)
{
}

View File

@ -26,13 +26,12 @@
#include <linux/memcontrol.h>
#include <linux/stackdepot.h>
#include "internal.h"
#include "slab.h"
#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>
#include "internal.h"
#include "slab.h"
enum slab_state slab_state;
LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
@ -105,33 +104,6 @@ static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
}
#endif
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
{
size_t i;
for (i = 0; i < nr; i++) {
if (s)
kmem_cache_free(s, p[i]);
else
kfree(p[i]);
}
}
int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void **p)
{
size_t i;
for (i = 0; i < nr; i++) {
void *x = p[i] = kmem_cache_alloc(s, flags);
if (!x) {
__kmem_cache_free_bulk(s, i, p);
return 0;
}
}
return i;
}
/*
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
@ -959,7 +931,7 @@ EXPORT_SYMBOL(kmalloc_order);
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
void *ret = kmalloc_order(size, flags, order);
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);

View File

@ -507,7 +507,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
*m = size;
ret = (void *)m + minalign;
trace_kmalloc_node(caller, ret,
trace_kmalloc_node(caller, ret, NULL,
size, size + minalign, gfp, node);
} else {
unsigned int order = get_order(size);
@ -516,7 +516,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
trace_kmalloc_node(caller, ret,
trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << order, gfp, node);
}
@ -616,12 +616,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node, 0);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
SLOB_UNITS(c->size) * SLOB_UNIT,
flags, node);
} else {
b = slob_new_pages(flags, get_order(c->size), node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
PAGE_SIZE << get_order(c->size),
flags, node);
}
@ -692,16 +692,33 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
}
EXPORT_SYMBOL(kmem_cache_free);
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
void kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
{
__kmem_cache_free_bulk(s, size, p);
size_t i;
for (i = 0; i < nr; i++) {
if (s)
kmem_cache_free(s, p[i]);
else
kfree(p[i]);
}
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
void **p)
{
return __kmem_cache_alloc_bulk(s, flags, size, p);
size_t i;
for (i = 0; i < nr; i++) {
void *x = p[i] = kmem_cache_alloc(s, flags);
if (!x) {
kmem_cache_free_bulk(s, i, p);
return 0;
}
}
return i;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

View File

@ -3257,7 +3257,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
{
void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
s->size, gfpflags);
return ret;
@ -3280,7 +3280,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru);
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
@ -3292,7 +3292,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
s->object_size, s->size, gfpflags, node);
return ret;
@ -3306,7 +3306,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret,
trace_kmalloc_node(_RET_IP_, ret, s,
size, s->size, gfpflags, node);
ret = kasan_kmalloc(s, ret, size, gfpflags);
@ -3464,9 +3464,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
struct kmem_cache_cpu *c;
unsigned long tid;
/* memcg_slab_free_hook() is already called for bulk free. */
if (!tail)
memcg_slab_free_hook(s, &head, 1);
redo:
/*
* Determine the currently cpus per cpu slab.
@ -3526,9 +3523,10 @@ redo:
}
static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
void *head, void *tail, int cnt,
void *head, void *tail, void **p, int cnt,
unsigned long addr)
{
memcg_slab_free_hook(s, slab, p, cnt);
/*
* With KASAN enabled slab_free_freelist_hook modifies the freelist
* to remove objects, whose reuse must be delayed.
@ -3550,7 +3548,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
if (!s)
return;
trace_kmem_cache_free(_RET_IP_, x, s->name);
slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
@ -3591,88 +3589,67 @@ static inline
int build_detached_freelist(struct kmem_cache *s, size_t size,
void **p, struct detached_freelist *df)
{
size_t first_skipped_index = 0;
int lookahead = 3;
void *object;
struct folio *folio;
struct slab *slab;
/* Always re-init detached_freelist */
df->slab = NULL;
do {
object = p[--size];
/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
} while (!object && size);
if (!object)
return 0;
size_t same;
object = p[--size];
folio = virt_to_folio(object);
if (!s) {
/* Handle kalloc'ed objects */
if (unlikely(!folio_test_slab(folio))) {
free_large_kmalloc(folio, object);
p[size] = NULL; /* mark object processed */
df->slab = NULL;
return size;
}
/* Derive kmem_cache from object */
slab = folio_slab(folio);
df->s = slab->slab_cache;
df->slab = folio_slab(folio);
df->s = df->slab->slab_cache;
} else {
slab = folio_slab(folio);
df->slab = folio_slab(folio);
df->s = cache_from_obj(s, object); /* Support for memcg */
}
if (is_kfence_address(object)) {
slab_free_hook(df->s, object, false);
__kfence_free(object);
p[size] = NULL; /* mark object processed */
return size;
}
/* Start new detached freelist */
df->slab = slab;
set_freepointer(df->s, object, NULL);
df->tail = object;
df->freelist = object;
p[size] = NULL; /* mark object processed */
df->cnt = 1;
if (is_kfence_address(object))
return size;
set_freepointer(df->s, object, NULL);
same = size;
while (size) {
object = p[--size];
if (!object)
continue; /* Skip processed objects */
/* df->slab is always set at this point */
if (df->slab == virt_to_slab(object)) {
/* Opportunity build freelist */
set_freepointer(df->s, object, df->freelist);
df->freelist = object;
df->cnt++;
p[size] = NULL; /* mark object processed */
same--;
if (size != same)
swap(p[size], p[same]);
continue;
}
/* Limit look ahead search */
if (!--lookahead)
break;
if (!first_skipped_index)
first_skipped_index = size + 1;
}
return first_skipped_index;
return same;
}
/* Note that interrupts must be enabled when calling this function. */
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
{
if (WARN_ON(!size))
if (!size)
return;
memcg_slab_free_hook(s, p, size);
do {
struct detached_freelist df;
@ -3680,7 +3657,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
if (!df.slab)
continue;
slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
_RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
@ -3760,7 +3738,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
error:
slub_put_cpu_ptr(s->cpu_slab);
slab_post_alloc_hook(s, objcg, flags, i, p, false);
__kmem_cache_free_bulk(s, i, p);
kmem_cache_free_bulk(s, i, p);
return 0;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
@ -4441,7 +4419,7 @@ void *__kmalloc(size_t size, gfp_t flags)
ret = slab_alloc(s, NULL, flags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags);
ret = kasan_kmalloc(s, ret, size, flags);
@ -4475,7 +4453,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, flags, node);
trace_kmalloc_node(_RET_IP_, ret,
trace_kmalloc_node(_RET_IP_, ret, NULL,
size, PAGE_SIZE << get_order(size),
flags, node);
@ -4489,7 +4467,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
ret = slab_alloc_node(s, NULL, flags, node, _RET_IP_, size);
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, flags, node);
ret = kasan_kmalloc(s, ret, size, flags);
@ -4581,7 +4559,7 @@ void kfree(const void *x)
return;
}
slab = folio_slab(folio);
slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);
@ -4890,6 +4868,9 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
if (sysfs_slab_alias(s, name))
return NULL;
s->refcount++;
/*
@ -4898,11 +4879,6 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
*/
s->object_size = max(s->object_size, size);
s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
if (sysfs_slab_alias(s, name)) {
s->refcount--;
s = NULL;
}
}
return s;
@ -4948,7 +4924,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
ret = slab_alloc(s, NULL, gfpflags, caller, size);
/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
trace_kmalloc(caller, ret, s, size, s->size, gfpflags);
return ret;
}
@ -4964,7 +4940,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, gfpflags, node);
trace_kmalloc_node(caller, ret,
trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << get_order(size),
gfpflags, node);
@ -4979,7 +4955,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ret = slab_alloc_node(s, NULL, gfpflags, node, caller, size);
/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
trace_kmalloc_node(caller, ret, s, size, s->size, gfpflags, node);
return ret;
}

View File

@ -233,6 +233,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
return l;
}
static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name)
{
char x[128];
FILE *f;
size_t l;
snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name);
f = fopen(x, "r");
if (!f) {
buffer[0] = 0;
l = 0;
} else {
l = fread(buffer, 1, sizeof(buffer), f);
buffer[l] = 0;
fclose(f);
}
return l;
}
/*
* Put a size string together
@ -409,14 +427,18 @@ static void show_tracking(struct slabinfo *s)
{
printf("\n%s: Kernel object allocation\n", s->name);
printf("-----------------------------------------------------------------------\n");
if (read_slab_obj(s, "alloc_calls"))
if (read_debug_slab_obj(s, "alloc_traces"))
printf("%s", buffer);
else if (read_slab_obj(s, "alloc_calls"))
printf("%s", buffer);
else
printf("No Data\n");
printf("\n%s: Kernel object freeing\n", s->name);
printf("------------------------------------------------------------------------\n");
if (read_slab_obj(s, "free_calls"))
if (read_debug_slab_obj(s, "free_traces"))
printf("%s", buffer);
else if (read_slab_obj(s, "free_calls"))
printf("%s", buffer);
else
printf("No Data\n");