memcg: zap __memcg_{charge,uncharge}_slab

They are simple wrappers around memcg_{charge,uncharge}_kmem, so let's
zap them and call these functions directly.

Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Vladimir Davydov 2015-02-10 14:11:41 -08:00 committed by Linus Torvalds
parent 4c5018ce06
commit dbf22eb6d8
3 changed files with 8 additions and 22 deletions

View File

@ -403,8 +403,9 @@ void memcg_update_array_size(int num_groups);
struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep); struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
void __memcg_kmem_put_cache(struct kmem_cache *cachep); void __memcg_kmem_put_cache(struct kmem_cache *cachep);
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); unsigned long nr_pages);
void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages);
int __memcg_cleanup_cache_params(struct kmem_cache *s); int __memcg_cleanup_cache_params(struct kmem_cache *s);

View File

@ -2495,8 +2495,8 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg));
} }
static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
unsigned long nr_pages) unsigned long nr_pages)
{ {
struct page_counter *counter; struct page_counter *counter;
int ret = 0; int ret = 0;
@ -2533,8 +2533,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
return ret; return ret;
} }
static void memcg_uncharge_kmem(struct mem_cgroup *memcg, void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
unsigned long nr_pages)
{ {
page_counter_uncharge(&memcg->memory, nr_pages); page_counter_uncharge(&memcg->memory, nr_pages);
if (do_swap_account) if (do_swap_account)
@ -2767,20 +2766,6 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg,
current->memcg_kmem_skip_account = 0; current->memcg_kmem_skip_account = 0;
} }
int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order)
{
unsigned int nr_pages = 1 << order;
return memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages);
}
void __memcg_uncharge_slab(struct kmem_cache *cachep, int order)
{
unsigned int nr_pages = 1 << order;
memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages);
}
/* /*
* Return the kmem_cache we're supposed to use for a slab allocation. * Return the kmem_cache we're supposed to use for a slab allocation.
* We try to use the current memcg's version of the cache. * We try to use the current memcg's version of the cache.

View File

@ -235,7 +235,7 @@ static __always_inline int memcg_charge_slab(struct kmem_cache *s,
return 0; return 0;
if (is_root_cache(s)) if (is_root_cache(s))
return 0; return 0;
return __memcg_charge_slab(s, gfp, order); return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order);
} }
static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
@ -244,7 +244,7 @@ static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
return; return;
if (is_root_cache(s)) if (is_root_cache(s))
return; return;
__memcg_uncharge_slab(s, order); memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order);
} }
#else #else
static inline bool is_root_cache(struct kmem_cache *s) static inline bool is_root_cache(struct kmem_cache *s)