summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-07 18:08:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 08:31:08 -0800
commit9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5 (patch)
tree79eaae56278407e0a223e562a2d0079834ca5529 /mm
parent3e2f41f1f64744f7942980d93cc93dd3e5924560 (diff)
downloadlinux-9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5.tar.gz
linux-9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5.tar.bz2
linux-9439c1c95b5c25b8031b2a7eb7e1590eb84be7f5.zip
memcg: remove mem_cgroup_cal_reclaim()
Now, get_scan_ratio() return correct value although memcg reclaim. Then, mem_cgroup_calc_reclaim() can be removed. So, memcg reclaim get the same capability of anon/file reclaim balancing as global reclaim now. Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c21
-rw-r--r--mm/vmscan.c27
2 files changed, 10 insertions, 38 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7b7f4dc05035..b8c1e5acc25a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -414,27 +414,6 @@ void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
mem->prev_priority = priority;
}
-/*
- * Calculate # of pages to be scanned in this priority/zone.
- * See also vmscan.c
- *
- * priority starts from "DEF_PRIORITY" and decremented in each loop.
- * (see include/linux/mmzone.h)
- */
-
-long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
- int priority, enum lru_list lru)
-{
- long nr_pages;
- int nid = zone->zone_pgdat->node_id;
- int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
-
- nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
-
- return (nr_pages >> priority);
-}
-
int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
{
unsigned long active;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 56fc7abe4d23..66bb6ef44b5f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1466,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
get_scan_ratio(zone, sc, percent);
for_each_evictable_lru(l) {
- if (scan_global_lru(sc)) {
- int file = is_file_lru(l);
- int scan;
+ int file = is_file_lru(l);
+ int scan;
- scan = zone_page_state(zone, NR_LRU_BASE + l);
- if (priority) {
- scan >>= priority;
- scan = (scan * percent[file]) / 100;
- }
+ scan = zone_page_state(zone, NR_LRU_BASE + l);
+ if (priority) {
+ scan >>= priority;
+ scan = (scan * percent[file]) / 100;
+ }
+ if (scan_global_lru(sc)) {
zone->lru[l].nr_scan += scan;
nr[l] = zone->lru[l].nr_scan;
if (nr[l] >= swap_cluster_max)
zone->lru[l].nr_scan = 0;
else
nr[l] = 0;
- } else {
- /*
- * This reclaim occurs not because zone memory shortage
- * but because memory controller hits its limit.
- * Don't modify zone reclaim related data.
- */
- nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
- priority, l);
- }
+ } else
+ nr[l] = scan;
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||