memcg: remove mem_cgroup_cal_reclaim()

Now, get_scan_ratio() return correct value although memcg reclaim.  Then,
mem_cgroup_calc_reclaim() can be removed.

So, memcg reclaim get the same capability of anon/file reclaim balancing
as global reclaim now.

Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KOSAKI Motohiro 2009-01-07 18:08:21 -08:00 committed by Linus Torvalds
parent 3e2f41f1f6
commit 9439c1c95b
3 changed files with 10 additions and 48 deletions

View File

@ -97,9 +97,6 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
int priority); int priority);
extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
int priority); int priority);
extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
int priority, enum lru_list lru);
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
struct zone *zone); struct zone *zone);
unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
@ -244,13 +241,6 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
{ {
} }
static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
struct zone *zone, int priority,
enum lru_list lru)
{
return 0;
}
static inline bool mem_cgroup_disabled(void) static inline bool mem_cgroup_disabled(void)
{ {
return true; return true;

View File

@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
mem->prev_priority = priority; mem->prev_priority = priority;
} }
/*
* Calculate # of pages to be scanned in this priority/zone.
* See also vmscan.c
*
* priority starts from "DEF_PRIORITY" and decremented in each loop.
* (see include/linux/mmzone.h)
*/
long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
int priority, enum lru_list lru)
{
long nr_pages;
int nid = zone->zone_pgdat->node_id;
int zid = zone_idx(zone);
struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
return (nr_pages >> priority);
}
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
{ {
unsigned long active; unsigned long active;

View File

@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
get_scan_ratio(zone, sc, percent); get_scan_ratio(zone, sc, percent);
for_each_evictable_lru(l) { for_each_evictable_lru(l) {
if (scan_global_lru(sc)) { int file = is_file_lru(l);
int file = is_file_lru(l); int scan;
int scan;
scan = zone_page_state(zone, NR_LRU_BASE + l); scan = zone_page_state(zone, NR_LRU_BASE + l);
if (priority) { if (priority) {
scan >>= priority; scan >>= priority;
scan = (scan * percent[file]) / 100; scan = (scan * percent[file]) / 100;
} }
if (scan_global_lru(sc)) {
zone->lru[l].nr_scan += scan; zone->lru[l].nr_scan += scan;
nr[l] = zone->lru[l].nr_scan; nr[l] = zone->lru[l].nr_scan;
if (nr[l] >= swap_cluster_max) if (nr[l] >= swap_cluster_max)
zone->lru[l].nr_scan = 0; zone->lru[l].nr_scan = 0;
else else
nr[l] = 0; nr[l] = 0;
} else { } else
/* nr[l] = scan;
* This reclaim occurs not because zone memory shortage
* but because memory controller hits its limit.
* Don't modify zone reclaim related data.
*/
nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
priority, l);
}
} }
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||