tracing, slab: Define kmem_cache_alloc_notrace ifdef CONFIG_TRACING
Define kmem_trace_alloc_{,node}_notrace() if CONFIG_TRACING is enabled, otherwise perf-kmem will show wrong stats ifndef CONFIG_KMEM_TRACE, because a kmalloc() memory allocation may be traced by both trace_kmalloc() and trace_kmem_cache_alloc(). Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: linux-mm@kvack.org <linux-mm@kvack.org> Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> LKML-Reference: <4B21F89A.7000801@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
125580380f
commit
0f24f1287a
@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
|
|||||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
||||||
void *__kmalloc(size_t size, gfp_t flags);
|
void *__kmalloc(size_t size, gfp_t flags);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
|
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
|
||||||
extern size_t slab_buffer_size(struct kmem_cache *cachep);
|
extern size_t slab_buffer_size(struct kmem_cache *cachep);
|
||||||
#else
|
#else
|
||||||
@ -166,7 +166,7 @@ found:
|
|||||||
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||||
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
|
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
|
||||||
gfp_t flags,
|
gfp_t flags,
|
||||||
int nodeid);
|
int nodeid);
|
||||||
|
@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
|
|||||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
|
||||||
void *__kmalloc(size_t size, gfp_t flags);
|
void *__kmalloc(size_t size, gfp_t flags);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
|
extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
|
||||||
#else
|
#else
|
||||||
static __always_inline void *
|
static __always_inline void *
|
||||||
@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
|
|||||||
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
void *__kmalloc_node(size_t size, gfp_t flags, int node);
|
||||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
||||||
gfp_t gfpflags,
|
gfp_t gfpflags,
|
||||||
int node);
|
int node);
|
||||||
|
@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
size_t slab_buffer_size(struct kmem_cache *cachep)
|
size_t slab_buffer_size(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
return cachep->buffer_size;
|
return cachep->buffer_size;
|
||||||
@ -3558,7 +3558,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc);
|
EXPORT_SYMBOL(kmem_cache_alloc);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
|
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
|
||||||
{
|
{
|
||||||
return __cache_alloc(cachep, flags, __builtin_return_address(0));
|
return __cache_alloc(cachep, flags, __builtin_return_address(0));
|
||||||
@ -3621,7 +3621,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
|
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
|
||||||
gfp_t flags,
|
gfp_t flags,
|
||||||
int nodeid)
|
int nodeid)
|
||||||
|
@ -1754,7 +1754,7 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kmem_cache_alloc);
|
EXPORT_SYMBOL(kmem_cache_alloc);
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
|
||||||
{
|
{
|
||||||
return slab_alloc(s, gfpflags, -1, _RET_IP_);
|
return slab_alloc(s, gfpflags, -1, _RET_IP_);
|
||||||
@ -1775,7 +1775,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
|
|||||||
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
EXPORT_SYMBOL(kmem_cache_alloc_node);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KMEMTRACE
|
#ifdef CONFIG_TRACING
|
||||||
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
|
||||||
gfp_t gfpflags,
|
gfp_t gfpflags,
|
||||||
int node)
|
int node)
|
||||||
|
Loading…
Reference in New Issue
Block a user