summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/memcontrol.c1846
-rw-r--r--mm/memory.c28
-rw-r--r--mm/migrate.c42
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/nommu.c1027
-rw-r--r--mm/oom_kill.c10
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/page_cgroup.c207
-rw-r--r--mm/shmem.c20
-rw-r--r--mm/swap.c33
-rw-r--r--mm/swap_state.c4
-rw-r--r--mm/swapfile.c24
-rw-r--r--mm/vmscan.c197
14 files changed, 2625 insertions, 833 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 2f55a1e2baf7..ceba0bd03662 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
VM_BUG_ON(!PageLocked(page));
error = mem_cgroup_cache_charge(page, current->mm,
- gfp_mask & ~__GFP_HIGHMEM);
+ gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 51ee96545579..e2996b80601f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -21,11 +21,13 @@
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
#include <linux/mm.h>
+#include <linux/pagemap.h>
#include <linux/smp.h>
#include <linux/page-flags.h>
#include <linux/backing-dev.h>
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
@@ -34,12 +36,23 @@
#include <linux/vmalloc.h>
#include <linux/mm_inline.h>
#include <linux/page_cgroup.h>
+#include "internal.h"
#include <asm/uaccess.h>
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
#define MEM_CGROUP_RECLAIM_RETRIES 5
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+/* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
+int do_swap_account __read_mostly;
+static int really_do_swap_account __initdata = 1; /* for remember boot option*/
+#else
+#define do_swap_account (0)
+#endif
+
+static DEFINE_MUTEX(memcg_tasklist); /* can be hold under cgroup_mutex */
+
/*
* Statistics for memory cgroup.
*/
@@ -60,7 +73,7 @@ struct mem_cgroup_stat_cpu {
} ____cacheline_aligned_in_smp;
struct mem_cgroup_stat {
- struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
+ struct mem_cgroup_stat_cpu cpustat[0];
};
/*
@@ -89,9 +102,10 @@ struct mem_cgroup_per_zone {
/*
* spin_lock to protect the per cgroup LRU
*/
- spinlock_t lru_lock;
struct list_head lists[NR_LRU_LISTS];
unsigned long count[NR_LRU_LISTS];
+
+ struct zone_reclaim_stat reclaim_stat;
};
/* Macro for accessing counter */
#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
@@ -122,44 +136,73 @@ struct mem_cgroup {
*/
struct res_counter res;
/*
+ * the counter to account for mem+swap usage.
+ */
+ struct res_counter memsw;
+ /*
* Per cgroup active and inactive list, similar to the
* per zone LRU lists.
*/
struct mem_cgroup_lru_info info;
+ /*
+ protect against reclaim related member.
+ */
+ spinlock_t reclaim_param_lock;
+
int prev_priority; /* for recording reclaim priority */
+
+ /*
+ * While reclaiming in a hiearchy, we cache the last child we
+ * reclaimed from. Protected by hierarchy_mutex
+ */
+ struct mem_cgroup *last_scanned_child;
/*
- * statistics.
+ * Should the accounting and control be hierarchical, per subtree?
+ */
+ bool use_hierarchy;
+ unsigned long last_oom_jiffies;
+ atomic_t refcnt;
+
+ unsigned int swappiness;
+
+ /*
+ * statistics. This must be placed at the end of memcg.
*/
struct mem_cgroup_stat stat;
};
-static struct mem_cgroup init_mem_cgroup;
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
MEM_CGROUP_CHARGE_TYPE_MAPPED,
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
+ MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
NR_CHARGE_TYPE,
};
/* only for here (for easy reading.) */
#define PCGF_CACHE (1UL << PCG_CACHE)
#define PCGF_USED (1UL << PCG_USED)
-#define PCGF_ACTIVE (1UL << PCG_ACTIVE)
#define PCGF_LOCK (1UL << PCG_LOCK)
-#define PCGF_FILE (1UL << PCG_FILE)
static const unsigned long
pcg_default_flags[NR_CHARGE_TYPE] = {
- PCGF_CACHE | PCGF_FILE | PCGF_USED | PCGF_LOCK, /* File Cache */
- PCGF_ACTIVE | PCGF_USED | PCGF_LOCK, /* Anon */
- PCGF_ACTIVE | PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
+ PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
+ PCGF_USED | PCGF_LOCK, /* Anon */
+ PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
0, /* FORCE */
};
-/*
- * Always modified under lru lock. Then, not necessary to preempt_disable()
- */
+/* for encoding cft->private value on file */
+#define _MEM (0)
+#define _MEMSWAP (1)
+#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
+#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff)
+#define MEMFILE_ATTR(val) ((val) & 0xffff)
+
+static void mem_cgroup_get(struct mem_cgroup *mem);
+static void mem_cgroup_put(struct mem_cgroup *mem);
+
static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
struct page_cgroup *pc,
bool charge)
@@ -167,10 +210,9 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
int val = (charge)? 1 : -1;
struct mem_cgroup_stat *stat = &mem->stat;
struct mem_cgroup_stat_cpu *cpustat;
+ int cpu = get_cpu();
- VM_BUG_ON(!irqs_disabled());
-
- cpustat = &stat->cpustat[smp_processor_id()];
+ cpustat = &stat->cpustat[cpu];
if (PageCgroupCache(pc))
__mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
else
@@ -182,6 +224,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
else
__mem_cgroup_stat_add_safe(cpustat,
MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
+ put_cpu();
}
static struct mem_cgroup_per_zone *
@@ -197,6 +240,9 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
int nid = page_cgroup_nid(pc);
int zid = page_cgroup_zid(pc);
+ if (!mem)
+ return NULL;
+
return mem_cgroup_zoneinfo(mem, nid, zid);
}
@@ -236,77 +282,152 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
struct mem_cgroup, css);
}
-static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
- struct page_cgroup *pc)
+static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
{
- int lru = LRU_BASE;
+ struct mem_cgroup *mem = NULL;
+ /*
+ * Because we have no locks, mm->owner's may be being moved to other
+ * cgroup. We use css_tryget() here even if this looks
+ * pessimistic (rather than adding locks here).
+ */
+ rcu_read_lock();
+ do {
+ mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (unlikely(!mem))
+ break;
+ } while (!css_tryget(&mem->css));
+ rcu_read_unlock();
+ return mem;
+}
- if (PageCgroupUnevictable(pc))
- lru = LRU_UNEVICTABLE;
- else {
- if (PageCgroupActive(pc))
- lru += LRU_ACTIVE;
- if (PageCgroupFile(pc))
- lru += LRU_FILE;
- }
+static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
+{
+ if (!mem)
+ return true;
+ return css_is_removed(&mem->css);
+}
- MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+/*
+ * Following LRU functions are allowed to be used without PCG_LOCK.
+ * Operations are called by routine of global LRU independently from memcg.
+ * What we have to take care of here is validness of pc->mem_cgroup.
+ *
+ * Changes to pc->mem_cgroup happens when
+ * 1. charge
+ * 2. moving account
+ * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
+ * It is added to LRU before charge.
+ * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
+ * When moving account, the page is not on LRU. It's isolated.
+ */
- mem_cgroup_charge_statistics(pc->mem_cgroup, pc, false);
- list_del(&pc->lru);
+void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
+{
+ struct page_cgroup *pc;
+ struct mem_cgroup *mem;
+ struct mem_cgroup_per_zone *mz;
+
+ if (mem_cgroup_disabled())
+ return;
+ pc = lookup_page_cgroup(page);
+ /* can happen while we handle swapcache. */
+ if (list_empty(&pc->lru) || !pc->mem_cgroup)
+ return;
+ /*
+ * We don't check PCG_USED bit. It's cleared when the "page" is finally
+ * removed from global LRU.
+ */
+ mz = page_cgroup_zoneinfo(pc);
+ mem = pc->mem_cgroup;
+ MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+ list_del_init(&pc->lru);
+ return;
}
-static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
- struct page_cgroup *pc)
+void mem_cgroup_del_lru(struct page *page)
{
- int lru = LRU_BASE;
+ mem_cgroup_del_lru_list(page, page_lru(page));
+}
- if (PageCgroupUnevictable(pc))
- lru = LRU_UNEVICTABLE;
- else {
- if (PageCgroupActive(pc))
- lru += LRU_ACTIVE;
- if (PageCgroupFile(pc))
- lru += LRU_FILE;
- }
+void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct page_cgroup *pc;
- MEM_CGROUP_ZSTAT(mz, lru) += 1;
- list_add(&pc->lru, &mz->lists[lru]);
+ if (mem_cgroup_disabled())
+ return;
- mem_cgroup_charge_statistics(pc->mem_cgroup, pc, true);
+ pc = lookup_page_cgroup(page);
+ smp_rmb();
+ /* unused page is not rotated. */
+ if (!PageCgroupUsed(pc))
+ return;
+ mz = page_cgroup_zoneinfo(pc);
+ list_move(&pc->lru, &mz->lists[lru]);
}
-static void __mem_cgroup_move_lists(struct page_cgroup *pc, enum lru_list lru)
+void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
{
- struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
- int active = PageCgroupActive(pc);
- int file = PageCgroupFile(pc);
- int unevictable = PageCgroupUnevictable(pc);
- enum lru_list from = unevictable ? LRU_UNEVICTABLE :
- (LRU_FILE * !!file + !!active);
+ struct page_cgroup *pc;
+ struct mem_cgroup_per_zone *mz;
- if (lru == from)
+ if (mem_cgroup_disabled())
+ return;
+ pc = lookup_page_cgroup(page);
+ /* barrier to sync with "charge" */
+ smp_rmb();
+ if (!PageCgroupUsed(pc))
return;
- MEM_CGROUP_ZSTAT(mz, from) -= 1;
+ mz = page_cgroup_zoneinfo(pc);
+ MEM_CGROUP_ZSTAT(mz, lru) += 1;
+ list_add(&pc->lru, &mz->lists[lru]);
+}
+
+/*
+ * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
+ * lru because the page may.be reused after it's fully uncharged (because of
+ * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
+ * it again. This function is only used to charge SwapCache. It's done under
+ * lock_page and expected that zone->lru_lock is never held.
+ */
+static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
+{
+ unsigned long flags;
+ struct zone *zone = page_zone(page);
+ struct page_cgroup *pc = lookup_page_cgroup(page);
+
+ spin_lock_irqsave(&zone->lru_lock, flags);
/*
- * However this is done under mz->lru_lock, another flags, which
- * are not related to LRU, will be modified from out-of-lock.
- * We have to use atomic set/clear flags.
+ * Forget old LRU when this page_cgroup is *not* used. This Used bit
+ * is guarded by lock_page() because the page is SwapCache.
*/
- if (is_unevictable_lru(lru)) {
- ClearPageCgroupActive(pc);
- SetPageCgroupUnevictable(pc);
- } else {
- if (is_active_lru(lru))
- SetPageCgroupActive(pc);
- else
- ClearPageCgroupActive(pc);
- ClearPageCgroupUnevictable(pc);
- }
+ if (!PageCgroupUsed(pc))
+ mem_cgroup_del_lru_list(page, page_lru(page));
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
- MEM_CGROUP_ZSTAT(mz, lru) += 1;
- list_move(&pc->lru, &mz->lists[lru]);
+static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
+{
+ unsigned long flags;
+ struct zone *zone = page_zone(page);
+ struct page_cgroup *pc = lookup_page_cgroup(page);
+
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ /* link when the page is linked to LRU but page_cgroup isn't */
+ if (PageLRU(page) && list_empty(&pc->lru))
+ mem_cgroup_add_lru_list(page, page_lru(page));
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+}
+
+
+void mem_cgroup_move_lists(struct page *page,
+ enum lru_list from, enum lru_list to)
+{
+ if (mem_cgroup_disabled())
+ return;
+ mem_cgroup_del_lru_list(page, from);
+ mem_cgroup_add_lru_list(page, to);
}
int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
@@ -320,37 +441,6 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
}
/*
- * This routine assumes that the appropriate zone's lru lock is already held
- */
-void mem_cgroup_move_lists(struct page *page, enum lru_list lru)
-{
- struct page_cgroup *pc;
- struct mem_cgroup_per_zone *mz;
- unsigned long flags;
-
- if (mem_cgroup_subsys.disabled)
- return;
-
- /*
- * We cannot lock_page_cgroup while holding zone's lru_lock,
- * because other holders of lock_page_cgroup can be interrupted
- * with an attempt to rotate_reclaimable_page. But we cannot
- * safely get to page_cgroup without it, so just try_lock it:
- * mem_cgroup_isolate_pages allows for page left on wrong list.
- */
- pc = lookup_page_cgroup(page);
- if (!trylock_page_cgroup(pc))
- return;
- if (pc && PageCgroupUsed(pc)) {
- mz = page_cgroup_zoneinfo(pc);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_move_lists(pc, lru);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- }
- unlock_page_cgroup(pc);
-}
-
-/*
* Calculate mapped_ratio under memory controller. This will be used in
* vmscan.c for deteremining we have to reclaim mapped pages.
*/
@@ -372,39 +462,108 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
*/
int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
{
- return mem->prev_priority;
+ int prev_priority;
+
+ spin_lock(&mem->reclaim_param_lock);
+ prev_priority = mem->prev_priority;
+ spin_unlock(&mem->reclaim_param_lock);
+
+ return prev_priority;
}
void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
{
+ spin_lock(&mem->reclaim_param_lock);
if (priority < mem->prev_priority)
mem->prev_priority = priority;
+ spin_unlock(&mem->reclaim_param_lock);
}
void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
{
+ spin_lock(&mem->reclaim_param_lock);
mem->prev_priority = priority;
+ spin_unlock(&mem->reclaim_param_lock);
}
-/*
- * Calculate # of pages to be scanned in this priority/zone.
- * See also vmscan.c
- *
- * priority starts from "DEF_PRIORITY" and decremented in each loop.
- * (see include/linux/mmzone.h)
- */
+static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
+{
+ unsigned long active;
+ unsigned long inactive;
+ unsigned long gb;
+ unsigned long inactive_ratio;
+
+ inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
+ active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
+
+ gb = (inactive + active) >> (30 - PAGE_SHIFT);
+ if (gb)
+ inactive_ratio = int_sqrt(10 * gb);
+ else
+ inactive_ratio = 1;
+
+ if (present_pages) {
+ present_pages[0] = inactive;
+ present_pages[1] = active;
+ }
+
+ return inactive_ratio;
+}
+
+int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
+{
+ unsigned long active;
+ unsigned long inactive;
+ unsigned long present_pages[2];
+ unsigned long inactive_ratio;
-long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
- int priority, enum lru_list lru)
+ inactive_ratio = calc_inactive_ratio(memcg, present_pages);
+
+ inactive = present_pages[0];
+ active = present_pages[1];
+
+ if (inactive * inactive_ratio < active)
+ return 1;
+
+ return 0;
+}
+
+unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
+ struct zone *zone,
+ enum lru_list lru)
{
- long nr_pages;
int nid = zone->zone_pgdat->node_id;
int zid = zone_idx(zone);
- struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+ struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
- nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
+ return MEM_CGROUP_ZSTAT(mz, lru);
+}
- return (nr_pages >> priority);
+struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
+ struct zone *zone)
+{
+ int nid = zone->zone_pgdat->node_id;
+ int zid = zone_idx(zone);
+ struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+
+ return &mz->reclaim_stat;
+}
+
+struct zone_reclaim_stat *
+mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+{
+ struct page_cgroup *pc;
+ struct mem_cgroup_per_zone *mz;
+
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ pc = lookup_page_cgroup(page);
+ mz = page_cgroup_zoneinfo(pc);
+ if (!mz)
+ return NULL;
+
+ return &mz->reclaim_stat;
}
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -429,95 +588,281 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
src = &mz->lists[lru];
- spin_lock(&mz->lru_lock);
scan = 0;
list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
if (scan >= nr_to_scan)
break;
+
+ page = pc->page;
if (unlikely(!PageCgroupUsed(pc)))
continue;
- page = pc->page;
-
if (unlikely(!PageLRU(page)))
continue;
- /*
- * TODO: play better with lumpy reclaim, grabbing anything.
- */
- if (PageUnevictable(page) ||
- (PageActive(page) && !active) ||
- (!PageActive(page) && active)) {
- __mem_cgroup_move_lists(pc, page_lru(page));
- continue;
- }
-
scan++;
- list_move(&pc->lru, &pc_list);
-
if (__isolate_lru_page(page, mode, file) == 0) {
list_move(&page->lru, dst);
nr_taken++;
}
}
- list_splice(&pc_list, src);
- spin_unlock(&mz->lru_lock);
-
*scanned = scan;
return nr_taken;
}
+#define mem_cgroup_from_res_counter(counter, member) \
+ container_of(counter, struct mem_cgroup, member)
+
/*
- * Charge the memory controller for page usage.
- * Return
- * 0 if the charge was successful
- * < 0 if the cgroup is over its limit
+ * This routine finds the DFS walk successor. This routine should be
+ * called with hierarchy_mutex held
*/
-static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
- gfp_t gfp_mask, enum charge_type ctype,
- struct mem_cgroup *memcg)
+static struct mem_cgroup *
+mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
{
+ struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
+
+ curr_cgroup = curr->css.cgroup;
+ root_cgroup = root_mem->css.cgroup;
+
+ if (!list_empty(&curr_cgroup->children)) {
+ /*
+ * Walk down to children
+ */
+ mem_cgroup_put(curr);
+ cgroup = list_entry(curr_cgroup->children.next,
+ struct cgroup, sibling);
+ curr = mem_cgroup_from_cont(cgroup);
+ mem_cgroup_get(curr);
+ goto done;
+ }
+
+visit_parent:
+ if (curr_cgroup == root_cgroup) {
+ mem_cgroup_put(curr);
+ curr = root_mem;
+ mem_cgroup_get(curr);
+ goto done;
+ }
+
+ /*
+ * Goto next sibling
+ */
+ if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
+ mem_cgroup_put(curr);
+ cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
+ sibling);
+ curr = mem_cgroup_from_cont(cgroup);
+ mem_cgroup_get(curr);
+ goto done;
+ }
+
+ /*
+ * Go up to next parent and next parent's sibling if need be
+ */
+ curr_cgroup = curr_cgroup->parent;
+ goto visit_parent;
+
+done:
+ root_mem->last_scanned_child = curr;
+ return curr;
+}
+
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+{
+ struct cgroup *cgroup;
+ struct mem_cgroup *ret;
+ bool obsolete;
+
+ obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
+
+ /*
+ * Scan all children under the mem_cgroup mem
+ */
+ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+ if (list_empty(&root_mem->css.cgroup->children)) {
+ ret = root_mem;
+ goto done;
+ }
+
+ if (!root_mem->last_scanned_child || obsolete) {
+
+ if (obsolete && root_mem->last_scanned_child)
+ mem_cgroup_put(root_mem->last_scanned_child);
+
+ cgroup = list_first_entry(&root_mem->css.cgroup->children,
+ struct cgroup, sibling);
+ ret = mem_cgroup_from_cont(cgroup);
+ mem_cgroup_get(ret);
+ } else
+ ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
+ root_mem);
+
+done:
+ root_mem->last_scanned_child = ret;
+ mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+ return ret;
+}
+
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+ if (do_swap_account) {
+ if (res_counter_check_under_limit(&mem->res) &&
+ res_counter_check_under_limit(&mem->memsw))
+ return true;
+ } else
+ if (res_counter_check_under_limit(&mem->res))
+ return true;
+ return false;
+}
+
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+ struct cgroup *cgrp = memcg->css.cgroup;
+ unsigned int swappiness;
+
+ /* root ? */
+ if (cgrp->parent == NULL)
+ return vm_swappiness;
+
+ spin_lock(&memcg->reclaim_param_lock);
+ swappiness = memcg->swappiness;
+ spin_unlock(&memcg->reclaim_param_lock);
+
+ return swappiness;
+}
+
+/*
+ * Dance down the hierarchy if needed to reclaim memory. We remember the
+ * last child we reclaimed from, so that we don't end up penalizing
+ * one child extensively based on its position in the children list.
+ *
+ * root_mem is the original ancestor that we've been reclaim from.
+ */
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+ gfp_t gfp_mask, bool noswap)
+{
+ struct mem_cgroup *next_mem;
+ int ret = 0;
+
+ /*
+ * Reclaim unconditionally and don't check for return value.
+ * We need to reclaim in the current group and down the tree.
+ * One might think about checking for children before reclaiming,
+ * but there might be left over accounting, even after children
+ * have left.
+ */
+ ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+ get_swappiness(root_mem));
+ if (mem_cgroup_check_under_limit(root_mem))
+ return 0;
+ if (!root_mem->use_hierarchy)
+ return ret;
+
+ next_mem = mem_cgroup_get_first_node(root_mem);
+
+ while (next_mem != root_mem) {
+ if (mem_cgroup_is_obsolete(next_mem)) {
+ mem_cgroup_put(next_mem);
+ next_mem = mem_cgroup_get_first_node(root_mem);
+ continue;
+ }
+ ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+ get_swappiness(next_mem));
+ if (mem_cgroup_check_under_limit(root_mem))
+ return 0;
+ mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+ next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
+ mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+ }
+ return ret;
+}
+
+bool mem_cgroup_oom_called(struct task_struct *task)
+{
+ bool ret = false;
struct mem_cgroup *mem;
- struct page_cgroup *pc;
- unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
- struct mem_cgroup_per_zone *mz;
- unsigned long flags;
+ struct mm_struct *mm;
- pc = lookup_page_cgroup(page);
- /* can happen at boot */
- if (unlikely(!pc))
+ rcu_read_lock();
+ mm = task->mm;
+ if (!mm)
+ mm = &init_mm;
+ mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+ if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
+ ret = true;
+ rcu_read_unlock();
+ return ret;
+}
+/*
+ * Unlike exported interface, "oom" parameter is added. if oom==true,
+ * oom-killer can be invoked.
+ */
+static int __mem_cgroup_try_charge(struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcg,
+ bool oom)
+{
+ struct mem_cgroup *mem, *mem_over_limit;
+ int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ struct res_counter *fail_res;
+
+ if (unlikely(test_thread_flag(TIF_MEMDIE))) {
+ /* Don't account this! */
+ *memcg = NULL;
return 0;
- prefetchw(pc);
+ }
+
/*
* We always charge the cgroup the mm_struct belongs to.
* The mm_struct's mem_cgroup changes on task migration if the
* thread group leader migrates. It's possible that mm is not
* set, if so charge the init_mm (happens for pagecache usage).
*/
-
- if (likely(!memcg)) {
- rcu_read_lock();
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!mem)) {
- rcu_read_unlock();
- return 0;
- }
- /*
- * For every charge from the cgroup, increment reference count
- */
- css_get(&mem->css);
- rcu_read_unlock();
+ mem = *memcg;
+ if (likely(!mem)) {
+ mem = try_get_mem_cgroup_from_mm(mm);
+ *memcg = mem;
} else {
- mem = memcg;
- css_get(&memcg->css);
+ css_get(&mem->css);
}
+ if (unlikely(!mem))
+ return 0;
+
+ VM_BUG_ON(mem_cgroup_is_obsolete(mem));
+
+ while (1) {
+ int ret;
+ bool noswap = false;
+
+ ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
+ if (likely(!ret)) {
+ if (!do_swap_account)
+ break;
+ ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
+ &fail_res);
+ if (likely(!ret))
+ break;
+ /* mem+swap counter fails */
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ noswap = true;
+ mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+ memsw);
+ } else
+ /* mem counter fails */
+ mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+ res);
- while (unlikely(res_counter_charge(&mem->res, PAGE_SIZE))) {
if (!(gfp_mask & __GFP_WAIT))
- goto out;
+ goto nomem;
- if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
- continue;
+ ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
+ noswap);
/*
* try_to_free_mem_cgroup_pages() might not give us a full
@@ -525,49 +870,214 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
* moved to swap cache or just unmapped from the cgroup.
* Check the limit again to see if the reclaim reduced the
* current usage of the cgroup before giving up
+ *
*/
- if (res_counter_check_under_limit(&mem->res))
+ if (mem_cgroup_check_under_limit(mem_over_limit))
continue;
if (!nr_retries--) {
- mem_cgroup_out_of_memory(mem, gfp_mask);
- goto out;
+ if (oom) {
+ mutex_lock(&memcg_tasklist);
+ mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
+ mutex_unlock(&memcg_tasklist);
+ mem_over_limit->last_oom_jiffies = jiffies;
+ }
+ goto nomem;
}
}
+ return 0;
+nomem:
+ css_put(&mem->css);
+ return -ENOMEM;
+}
+static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
+{
+ struct mem_cgroup *mem;
+ swp_entry_t ent;
+
+ if (!PageSwapCache(page))
+ return NULL;
+
+ ent.val = page_private(page);
+ mem = lookup_swap_cgroup(ent);
+ if (!mem)
+ return NULL;
+ if (!css_tryget(&mem->css))
+ return NULL;
+ return mem;
+}
+
+/*
+ * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
+ * USED state. If already USED, uncharge and return.
+ */
+
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+ struct page_cgroup *pc,
+ enum charge_type ctype)
+{
+ /* try_charge() can return NULL to *memcg, taking care of it. */
+ if (!mem)
+ return;
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
css_put(&mem->css);
-
- goto done;
+ return;
}
pc->mem_cgroup = mem;
- /*
- * If a page is accounted as a page cache, insert to inactive list.
- * If anon, insert to active list.
- */
+ smp_wmb();
pc->flags = pcg_default_flags[ctype];
- mz = page_cgroup_zoneinfo(pc);
+ mem_cgroup_charge_statistics(mem, pc, true);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_add_list(mz, pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
unlock_page_cgroup(pc);
+}
-done:
- return 0;
+/**
+ * mem_cgroup_move_account - move account of the page
+ * @pc: page_cgroup of the page.
+ * @from: mem_cgroup which the page is moved from.
+ * @to: mem_cgroup which the page is moved to. @from != @to.
+ *
+ * The caller must confirm following.
+ * - page is not on LRU (isolate_page() is useful.)
+ *
+ * returns 0 at success,
+ * returns -EBUSY when lock is busy or "pc" is unstable.
+ *
+ * This function does "uncharge" from old cgroup but doesn't do "charge" to
+ * new cgroup. It should be done by a caller.
+ */
+
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+ struct mem_cgroup *from, struct mem_cgroup *to)
+{
+ struct mem_cgroup_per_zone *from_mz, *to_mz;
+ int nid, zid;
+ int ret = -EBUSY;
+
+ VM_BUG_ON(from == to);
+ VM_BUG_ON(PageLRU(pc->page));
+
+ nid = page_cgroup_nid(pc);
+ zid = page_cgroup_zid(pc);
+ from_mz = mem_cgroup_zoneinfo(from, nid, zid);
+ to_mz = mem_cgroup_zoneinfo(to, nid, zid);
+
+ if (!trylock_page_cgroup(pc))
+ return ret;
+
+ if (!PageCgroupUsed(pc))
+ goto out;
+
+ if (pc->mem_cgroup != from)
+ goto out;
+
+ css_put(&from->css);
+ res_counter_uncharge(&from->res, PAGE_SIZE);
+ mem_cgroup_charge_statistics(from, pc, false);
+ if (do_swap_account)
+ res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ pc->mem_cgroup = to;
+ mem_cgroup_charge_statistics(to, pc, true);
+ css_get(&to->css);
+ ret = 0;
out:
- css_put(&mem->css);
- return -ENOMEM;
+ unlock_page_cgroup(pc);
+ return ret;
+}
+
+/*
+ * move charges to its parent.
+ */
+
+static int mem_cgroup_move_parent(struct page_cgroup *pc,
+ struct mem_cgroup *child,
+ gfp_t gfp_mask)
+{
+ struct page *page = pc->page;
+ struct cgroup *cg = child->css.cgroup;
+ struct cgroup *pcg = cg->parent;
+ struct mem_cgroup *parent;
+ int ret;
+
+ /* Is ROOT ? */
+ if (!pcg)
+ return -EINVAL;
+
+
+ parent = mem_cgroup_from_cont(pcg);
+
+
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
+ if (ret || !parent)
+ return ret;
+
+ if (!get_page_unless_zero(page))
+ return -EBUSY;
+
+ ret = isolate_lru_page(page);
+
+ if (ret)
+ goto cancel;
+
+ ret = mem_cgroup_move_account(pc, child, parent);
+
+ /* drop extra refcnt by try_charge() (move_account increment one) */
+ css_put(&parent->css);
+ putback_lru_page(page);
+ if (!ret) {
+ put_page(page);
+ return 0;
+ }
+ /* uncharge if move fails */
+cancel:
+ res_counter_uncharge(&parent->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&parent->memsw, PAGE_SIZE);
+ put_page(page);
+ return ret;
+}
+
+/*
+ * Charge the memory controller for page usage.
+ * Return
+ * 0 if the charge was successful
+ * < 0 if the cgroup is over its limit
+ */
+static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, enum charge_type ctype,
+ struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *mem;
+ struct page_cgroup *pc;
+ int ret;
+
+ pc = lookup_page_cgroup(page);
+ /* can happen at boot */
+ if (unlikely(!pc))
+ return 0;
+ prefetchw(pc);
+
+ mem = memcg;
+ ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
+ if (ret || !mem)
+ return ret;
+
+ __mem_cgroup_commit_charge(mem, pc, ctype);
+ return 0;
}
-int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_newpage_charge(struct page *page,
+ struct mm_struct *mm, gfp_t gfp_mask)
{
- if (mem_cgroup_subsys.disabled)
+ if (mem_cgroup_disabled())
return 0;
if (PageCompound(page))
return 0;
@@ -589,7 +1099,10 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
- if (mem_cgroup_subsys.disabled)
+ struct mem_cgroup *mem = NULL;
+ int ret;
+
+ if (mem_cgroup_disabled())
return 0;
if (PageCompound(page))
return 0;
@@ -601,6 +1114,8 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
* For GFP_NOWAIT case, the page may be pre-charged before calling
* add_to_page_cache(). (See shmem.c) check it here and avoid to call
* charge twice. (It works but has to pay a bit larger cost.)
+ * And when the page is SwapCache, it should take swap information
+ * into account. This is under lock_page() now.
*/
if (!(gfp_mask & __GFP_WAIT)) {
struct page_cgroup *pc;
@@ -617,58 +1132,198 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
unlock_page_cgroup(pc);
}
- if (unlikely(!mm))
+ if (do_swap_account && PageSwapCache(page)) {
+ mem = try_get_mem_cgroup_from_swapcache(page);
+ if (mem)
+ mm = NULL;
+ else
+ mem = NULL;
+ /* SwapCache may be still linked to LRU now. */
+ mem_cgroup_lru_del_before_commit_swapcache(page);
+ }
+
+ if (unlikely(!mm && !mem))
mm = &init_mm;
if (page_is_file_cache(page))
return mem_cgroup_charge_common(page, mm, gfp_mask,
MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
- else
- return mem_cgroup_charge_common(page, mm, gfp_mask,
- MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
+
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
+ if (mem)
+ css_put(&mem->css);
+ if (PageSwapCache(page))
+ mem_cgroup_lru_add_after_commit_swapcache(page);
+
+ if (do_swap_account && !ret && PageSwapCache(page)) {
+ swp_entry_t ent = {.val = page_private(page)};
+ /* avoid double counting */
+ mem = swap_cgroup_record(ent, NULL);
+ if (mem) {
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ mem_cgroup_put(mem);
+ }
+ }
+ return ret;
+}
+
+/*
+ * While swap-in, try_charge -> commit or cancel, the page is locked.
+ * And when try_charge() successfully returns, one refcnt to memcg without
+ * struct page_cgroup is aquired. This refcnt will be cumsumed by
+ * "commit()" or removed by "cancel()"
+ */
+int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
+ struct page *page,
+ gfp_t mask, struct mem_cgroup **ptr)
+{
+ struct mem_cgroup *mem;
+ int ret;
+
+ if (mem_cgroup_disabled())
+ return 0;
+
+ if (!do_swap_account)
+ goto charge_cur_mm;
+ /*
+ * A racing thread's fault, or swapoff, may have already updated
+ * the pte, and even removed page from swap cache: return success
+ * to go on to do_swap_page()'s pte_same() test, which should fail.
+ */
+ if (!PageSwapCache(page))
+ return 0;
+ mem = try_get_mem_cgroup_from_swapcache(page);
+ if (!mem)
+ goto charge_cur_mm;
+ *ptr = mem;
+ ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
+ /* drop extra refcnt from tryget */
+ css_put(&mem->css);
+ return ret;
+charge_cur_mm:
+ if (unlikely(!mm))
+ mm = &init_mm;
+ return __mem_cgroup_try_charge(mm, mask, ptr, true);
+}
+
+void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
+{
+ struct page_cgroup *pc;
+
+ if (mem_cgroup_disabled())
+ return;
+ if (!ptr)
+ return;
+ pc = lookup_page_cgroup(page);
+ mem_cgroup_lru_del_before_commit_swapcache(page);
+ __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
+ mem_cgroup_lru_add_after_commit_swapcache(page);
+ /*
+ * Now swap is on-memory. This means this page may be
+ * counted both as mem and swap....double count.
+ * Fix it by uncharging from memsw. Basically, this SwapCache is stable
+ * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
+ * may call delete_from_swap_cache() before reach here.
+ */
+ if (do_swap_account && PageSwapCache(page)) {
+ swp_entry_t ent = {.val = page_private(page)};
+ struct mem_cgroup *memcg;
+ memcg = swap_cgroup_record(ent, NULL);
+ if (memcg) {
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ mem_cgroup_put(memcg);
+ }
+
+ }
+ /* add this page(page_cgroup) to the LRU we want. */
+
}
+void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
+{
+ if (mem_cgroup_disabled())
+ return;
+ if (!mem)
+ return;
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account)
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+ css_put(&mem->css);
+}
+
+
/*
* uncharge if !page_mapped(page)
*/
-static void
+static struct mem_cgroup *
__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
{
struct page_cgroup *pc;
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
struct mem_cgroup_per_zone *mz;
- unsigned long flags;
- if (mem_cgroup_subsys.disabled)
- return;
+ if (mem_cgroup_disabled())
+ return NULL;
+
+ if (PageSwapCache(page))
+ return NULL;
/*
* Check if our page_cgroup is valid
*/
pc = lookup_page_cgroup(page);
if (unlikely(!pc || !PageCgroupUsed(pc)))
- return;
+ return NULL;
lock_page_cgroup(pc);
- if ((ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED && page_mapped(page))
- || !PageCgroupUsed(pc)) {
- /* This happens at race in zap_pte_range() and do_swap_page()*/
- unlock_page_cgroup(pc);
- return;
+
+ mem = pc->mem_cgroup;
+
+ if (!PageCgroupUsed(pc))
+ goto unlock_out;
+
+ switch (ctype) {
+ case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ if (page_mapped(page))
+ goto unlock_out;
+ break;
+ case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
+ if (!PageAnon(page)) { /* Shared memory */
+ if (page->mapping && !page_is_file_cache(page))
+ goto unlock_out;
+ } else if (page_mapped(page)) /* Anon */
+ goto unlock_out;
+ break;
+ default:
+ break;
}
+
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
+
+ mem_cgroup_charge_statistics(mem, pc, false);
ClearPageCgroupUsed(pc);
- mem = pc->mem_cgroup;
+ /*
+ * pc->mem_cgroup is not cleared here. It will be accessed when it's
+ * freed from LRU. This is safe because uncharged page is expected not
+ * to be reused (freed soon). Exception is SwapCache, it's handled by
+ * special functions.
+ */
mz = page_cgroup_zoneinfo(pc);
- spin_lock_irqsave(&mz->lru_lock, flags);
- __mem_cgroup_remove_list(mz, pc);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
unlock_page_cgroup(pc);
- res_counter_uncharge(&mem->res, PAGE_SIZE);
- css_put(&mem->css);
+ /* at swapout, this memcg will be accessed to record to swap */
+ if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+ css_put(&mem->css);
- return;
+ return mem;
+
+unlock_out:
+ unlock_page_cgroup(pc);
+ return NULL;
}
void mem_cgroup_uncharge_page(struct page *page)
@@ -689,16 +1344,55 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
}
/*
- * Before starting migration, account against new page.
+ * called from __delete_from_swap_cache() and drop "page" account.
+ * memcg information is recorded to swap_cgroup of "ent"
+ */
+void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = __mem_cgroup_uncharge_common(page,
+ MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
+ /* record memcg information */
+ if (do_swap_account && memcg) {
+ swap_cgroup_record(ent, memcg);
+ mem_cgroup_get(memcg);
+ }
+ if (memcg)
+ css_put(&memcg->css);
+}
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+/*
+ * called from swap_entry_free(). remove record in swap_cgroup and
+ * uncharge "memsw" account.
*/
-int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
+void mem_cgroup_uncharge_swap(swp_entry_t ent)
+{
+ struct mem_cgroup *memcg;
+
+ if (!do_swap_account)
+ return;
+
+ memcg = swap_cgroup_record(ent, NULL);
+ if (memcg) {
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
+ mem_cgroup_put(memcg);
+ }
+}
+#endif
+
+/*
+ * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
+ * page belongs to.
+ */
+int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
{
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
- enum charge_type ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
int ret = 0;
- if (mem_cgroup_subsys.disabled)
+ if (mem_cgroup_disabled())
return 0;
pc = lookup_page_cgroup(page);
@@ -706,41 +1400,67 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
- if (PageCgroupCache(pc)) {
- if (page_is_file_cache(page))
- ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
- else
- ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- }
}
unlock_page_cgroup(pc);
+
if (mem) {
- ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
- ctype, mem);
+ ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
css_put(&mem->css);
}
+ *ptr = mem;
return ret;
}
/* remove redundant charge if migration failed*/
-void mem_cgroup_end_migration(struct page *newpage)
+void mem_cgroup_end_migration(struct mem_cgroup *mem,
+ struct page *oldpage, struct page *newpage)
{
+ struct page *target, *unused;
+ struct page_cgroup *pc;
+ enum charge_type ctype;
+
+ if (!mem)
+ return;
+
+ /* at migration success, oldpage->mapping is NULL. */
+ if (oldpage->mapping) {
+ target = oldpage;
+ unused = NULL;
+ } else {
+ target = newpage;
+ unused = oldpage;
+ }
+
+ if (PageAnon(target))
+ ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
+ else if (page_is_file_cache(target))
+ ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
+ else
+ ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
+
+ /* unused page is not on radix-tree now. */
+ if (unused)
+ __mem_cgroup_uncharge_common(unused, ctype);
+
+ pc = lookup_page_cgroup(target);
/*
- * At success, page->mapping is not NULL.
- * special rollback care is necessary when
- * 1. at migration failure. (newpage->mapping is cleared in this case)
- * 2. the newpage was moved but not remapped again because the task
- * exits and the newpage is obsolete. In this case, the new page
- * may be a swapcache. So, we just call mem_cgroup_uncharge_page()
- * always for avoiding mess. The page_cgroup will be removed if
- * unnecessary. File cache pages is still on radix-tree. Don't
- * care it.
+ * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
+ * So, double-counting is effectively avoided.
*/
- if (!newpage->mapping)
- __mem_cgroup_uncharge_common(newpage,
- MEM_CGROUP_CHARGE_TYPE_FORCE);
- else if (PageAnon(newpage))
- mem_cgroup_uncharge_page(newpage);
+ __mem_cgroup_commit_charge(mem, pc, ctype);
+
+ /*
+ * Both of oldpage and newpage are still under lock_page().
+ * Then, we don't have to care about race in radix-tree.
+ * But we have to be careful that this page is unmapped or not.
+ *
+ * There is a case for !page_mapped(). At the start of
+ * migration, oldpage was mapped. But now, it's zapped.
+ * But we know *target* page is not freed/reused under us.
+ * mem_cgroup_uncharge_page() does all necessary checks.
+ */
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
+ mem_cgroup_uncharge_page(target);
}
/*
@@ -748,29 +1468,26 @@ void mem_cgroup_end_migration(struct page *newpage)
* This is typically used for page reclaiming for shmem for reducing side
* effect of page allocation from shmem, which is used by some mem_cgroup.
*/
-int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
+int mem_cgroup_shrink_usage(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem = NULL;
int progress = 0;
int retry = MEM_CGROUP_RECLAIM_RETRIES;
- if (mem_cgroup_subsys.disabled)
+ if (mem_cgroup_disabled())
return 0;
- if (!mm)
+ if (page)
+ mem = try_get_mem_cgroup_from_swapcache(page);
+ if (!mem && mm)
+ mem = try_get_mem_cgroup_from_mm(mm);
+ if (unlikely(!mem))
return 0;
- rcu_read_lock();
- mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!mem)) {
- rcu_read_unlock();
- return 0;
- }
- css_get(&mem->css);
- rcu_read_unlock();
-
do {
- progress = try_to_free_mem_cgroup_pages(mem, gfp_mask);
- progress += res_counter_check_under_limit(&mem->res);
+ progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
+ progress += mem_cgroup_check_under_limit(mem);
} while (!progress && --retry);
css_put(&mem->css);
@@ -779,117 +1496,295 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
return 0;
}
+static DEFINE_MUTEX(set_limit_mutex);
+
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
- unsigned long long val)
+ unsigned long long val)
{
int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
int progress;
+ u64 memswlimit;
int ret = 0;
- while (res_counter_set_limit(&memcg->res, val)) {
+ while (retry_count) {
if (signal_pending(current)) {
ret = -EINTR;
break;
}
- if (!retry_count) {
- ret = -EBUSY;
+ /*
+ * Rather than hide all in some function, I do this in
+ * open coded manner. You see what this really does.
+ * We have to guarantee mem->res.limit < mem->memsw.limit.
+ */
+ mutex_lock(&set_limit_mutex);
+ memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ if (memswlimit < val) {
+ ret = -EINVAL;
+ mutex_unlock(&set_limit_mutex);
break;
}
- progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
- if (!progress)
- retry_count--;
+ ret = res_counter_set_limit(&memcg->res, val);
+ mutex_unlock(&set_limit_mutex);
+
+ if (!ret)
+ break;
+
+ progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
+ false);
+ if (!progress) retry_count--;
}
+
return ret;
}
+int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
+ unsigned long long val)
+{
+ int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
+ u64 memlimit, oldusage, curusage;
+ int ret;
+
+ if (!do_swap_account)
+ return -EINVAL;
+
+ while (retry_count) {
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+ /*
+ * Rather than hide all in some function, I do this in
+ * open coded manner. You see what this really does.
+ * We have to guarantee mem->res.limit < mem->memsw.limit.
+ */
+ mutex_lock(&set_limit_mutex);
+ memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+ if (memlimit > val) {
+ ret = -EINVAL;
+ mutex_unlock(&set_limit_mutex);
+ break;
+ }
+ ret = res_counter_set_limit(&memcg->memsw, val);
+ mutex_unlock(&set_limit_mutex);
+
+ if (!ret)
+ break;
+
+ oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
+ curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
+ if (curusage >= oldusage)
+ retry_count--;
+ }
+ return ret;
+}
/*
* This routine traverse page_cgroup in given list and drop them all.
* *And* this routine doesn't reclaim page itself, just removes page_cgroup.
*/
-#define FORCE_UNCHARGE_BATCH (128)
-static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
- struct mem_cgroup_per_zone *mz,
- enum lru_list lru)
+static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
+ int node, int zid, enum lru_list lru)
{
- struct page_cgroup *pc;
- struct page *page;
- int count = FORCE_UNCHARGE_BATCH;
- unsigned long flags;
+ struct zone *zone;
+ struct mem_cgroup_per_zone *mz;
+ struct page_cgroup *pc, *busy;
+ unsigned long flags, loop;
struct list_head *list;
+ int ret = 0;
+ zone = &NODE_DATA(node)->node_zones[zid];
+ mz = mem_cgroup_zoneinfo(mem, node, zid);
list = &mz->lists[lru];
- spin_lock_irqsave(&mz->lru_lock, flags);
- while (!list_empty(list)) {
- pc = list_entry(list->prev, struct page_cgroup, lru);
- page = pc->page;
- if (!PageCgroupUsed(pc))
- break;
- get_page(page);
- spin_unlock_irqrestore(&mz->lru_lock, flags);
- /*
- * Check if this page is on LRU. !LRU page can be found
- * if it's under page migration.
- */
- if (PageLRU(page)) {
- __mem_cgroup_uncharge_common(page,
- MEM_CGROUP_CHARGE_TYPE_FORCE);
- put_page(page);
- if (--count <= 0) {
- count = FORCE_UNCHARGE_BATCH;
- cond_resched();
- }
- } else {
- spin_lock_irqsave(&mz->lru_lock, flags);
+ loop = MEM_CGROUP_ZSTAT(mz, lru);
+ /* give some margin against EBUSY etc...*/
+ loop += 256;
+ busy = NULL;
+ while (loop--) {
+ ret = 0;
+ spin_lock_irqsave(&zone->lru_lock, flags);
+ if (list_empty(list)) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
break;
}
- spin_lock_irqsave(&mz->lru_lock, flags);
+ pc = list_entry(list->prev, struct page_cgroup, lru);
+ if (busy == pc) {
+ list_move(&pc->lru, list);
+ busy = 0;
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+ ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
+ if (ret == -ENOMEM)
+ break;
+
+ if (ret == -EBUSY || ret == -EINVAL) {
+ /* found lock contention or "pc" is obsolete. */
+ busy = pc;
+ cond_resched();
+ } else
+ busy = NULL;
}
- spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+ if (!ret && !list_empty(list))
+ return -EBUSY;
+ return ret;
}
/*
* make mem_cgroup's charge to be 0 if there is no task.
* This enables deleting this mem_cgroup.
*/
-static int mem_cgroup_force_empty(struct mem_cgroup *mem)
+static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
{
- int ret = -EBUSY;
- int node, zid;
+ int ret;
+ int node, zid, shrink;
+ int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ struct cgroup *cgrp = mem->css.cgroup;
css_get(&mem->css);
- /*
- * page reclaim code (kswapd etc..) will move pages between
- * active_list <-> inactive_list while we don't take a lock.
- * So, we have to do loop here until all lists are empty.
- */
+
+ shrink = 0;
+ /* should free all ? */
+ if (free_all)
+ goto try_to_free;
+move_account:
while (mem->res.usage > 0) {
- if (atomic_read(&mem->css.cgroup->count) > 0)
+ ret = -EBUSY;
+ if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
+ goto out;
+ ret = -EINTR;
+ if (signal_pending(current))
goto out;
/* This is for making all *used* pages to be on LRU. */
lru_add_drain_all();
- for_each_node_state(node, N_POSSIBLE)
- for (zid = 0; zid < MAX_NR_ZONES; zid++) {
- struct mem_cgroup_per_zone *mz;
+ ret = 0;
+ for_each_node_state(node, N_POSSIBLE) {
+ for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
enum lru_list l;
- mz = mem_cgroup_zoneinfo(mem, node, zid);
- for_each_lru(l)
- mem_cgroup_force_empty_list(mem, mz, l);
+ for_each_lru(l) {
+ ret = mem_cgroup_force_empty_list(mem,
+ node, zid, l);
+ if (ret)
+ break;
+ }
}
+ if (ret)
+ break;
+ }
+ /* it seems parent cgroup doesn't have enough mem */
+ if (ret == -ENOMEM)
+ goto try_to_free;
cond_resched();
}
ret = 0;
out:
css_put(&mem->css);
return ret;
+
+try_to_free:
+ /* returns EBUSY if there is a task or if we come here twice. */
+ if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /* we call try-to-free pages for make this cgroup empty */
+ lru_add_drain_all();
+ /* try to free all pages in this cgroup */
+ shrink = 1;
+ while (nr_retries && mem->res.usage > 0) {
+ int progress;
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ goto out;
+ }
+ progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
+ false, get_swappiness(mem));
+ if (!progress) {
+ nr_retries--;
+ /* maybe some writeback is necessary */
+ congestion_wait(WRITE, HZ/10);
+ }
+
+ }
+ lru_add_drain();
+ /* try move_account...there may be some *locked* pages. */
+ if (mem->res.usage)
+ goto move_account;
+ ret = 0;
+ goto out;
+}
+
+int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+{
+ return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
+}
+
+
+static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+{
+ return mem_cgroup_from_cont(cont)->use_hierarchy;
+}
+
+static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
+ u64 val)
+{
+ int retval = 0;
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ struct cgroup *parent = cont->parent;
+ struct mem_cgroup *parent_mem = NULL;
+
+ if (parent)
+ parent_mem = mem_cgroup_from_cont(parent);
+
+ cgroup_lock();
+ /*
+ * If parent's use_hiearchy is set, we can't make any modifications
+ * in the child subtrees. If it is unset, then the change can
+ * occur, provided the current cgroup has no children.
+ *
+ * For the root cgroup, parent_mem is NULL, we allow value to be
+ * set if there are no children.
+ */
+ if ((!parent_mem || !parent_mem->use_hierarchy) &&
+ (val == 1 || val == 0)) {
+ if (list_empty(&cont->children))
+ mem->use_hierarchy = val;
+ else
+ retval = -EBUSY;
+ } else
+ retval = -EINVAL;
+ cgroup_unlock();
+
+ return retval;
}
static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
{
- return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
- cft->private);
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ u64 val = 0;
+ int type, name;
+
+ type = MEMFILE_TYPE(cft->private);
+ name = MEMFILE_ATTR(cft->private);
+ switch (type) {
+ case _MEM:
+ val = res_counter_read_u64(&mem->res, name);
+ break;
+ case _MEMSWAP:
+ if (do_swap_account)
+ val = res_counter_read_u64(&mem->memsw, name);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ return val;
}
/*
* The user of this function is...
@@ -899,15 +1794,22 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
const char *buffer)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ int type, name;
unsigned long long val;
int ret;
- switch (cft->private) {
+ type = MEMFILE_TYPE(cft->private);
+ name = MEMFILE_ATTR(cft->private);
+ switch (name) {
case RES_LIMIT:
/* This function does all necessary parse...reuse it */
ret = res_counter_memparse_write_strategy(buffer, &val);
- if (!ret)
+ if (ret)
+ break;
+ if (type == _MEM)
ret = mem_cgroup_resize_limit(memcg, val);
+ else
+ ret = mem_cgroup_resize_memsw_limit(memcg, val);
break;
default:
ret = -EINVAL; /* should be BUG() ? */
@@ -916,27 +1818,59 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
return ret;
}
+static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
+ unsigned long long *mem_limit, unsigned long long *memsw_limit)
+{
+ struct cgroup *cgroup;
+ unsigned long long min_limit, min_memsw_limit, tmp;
+
+ min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+ min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ cgroup = memcg->css.cgroup;
+ if (!memcg->use_hierarchy)
+ goto out;
+
+ while (cgroup->parent) {
+ cgroup = cgroup->parent;
+ memcg = mem_cgroup_from_cont(cgroup);
+ if (!memcg->use_hierarchy)
+ break;
+ tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
+ min_limit = min(min_limit, tmp);
+ tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+ min_memsw_limit = min(min_memsw_limit, tmp);
+ }
+out:
+ *mem_limit = min_limit;
+ *memsw_limit = min_memsw_limit;
+ return;
+}
+
static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
{
struct mem_cgroup *mem;
+ int type, name;
mem = mem_cgroup_from_cont(cont);
- switch (event) {
+ type = MEMFILE_TYPE(event);
+ name = MEMFILE_ATTR(event);
+ switch (name) {
case RES_MAX_USAGE:
- res_counter_reset_max(&mem->res);
+ if (type == _MEM)
+ res_counter_reset_max(&mem->res);
+ else
+ res_counter_reset_max(&mem->memsw);
break;
case RES_FAILCNT:
- res_counter_reset_failcnt(&mem->res);
+ if (type == _MEM)
+ res_counter_reset_failcnt(&mem->res);
+ else
+ res_counter_reset_failcnt(&mem->memsw);
break;
}
return 0;
}
-static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
-{
- return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
-}
-
static const struct mem_cgroup_stat_desc {
const char *msg;
u64 unit;
@@ -985,43 +1919,163 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
}
+ {
+ unsigned long long limit, memsw_limit;
+ memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
+ cb->fill(cb, "hierarchical_memory_limit", limit);
+ if (do_swap_account)
+ cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+ }
+
+#ifdef CONFIG_DEBUG_VM
+ cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
+
+ {
+ int nid, zid;
+ struct mem_cgroup_per_zone *mz;
+ unsigned long recent_rotated[2] = {0, 0};
+ unsigned long recent_scanned[2] = {0, 0};
+
+ for_each_online_node(nid)
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
+
+ recent_rotated[0] +=
+ mz->reclaim_stat.recent_rotated[0];
+ recent_rotated[1] +=
+ mz->reclaim_stat.recent_rotated[1];
+ recent_scanned[0] +=
+ mz->reclaim_stat.recent_scanned[0];
+ recent_scanned[1] +=
+ mz->reclaim_stat.recent_scanned[1];
+ }
+ cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
+ cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
+ cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
+ cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+ }
+#endif
+
+ return 0;
+}
+
+static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+
+ return get_swappiness(memcg);
+}
+
+static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
+ u64 val)
+{
+ struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *parent;
+ if (val > 100)
+ return -EINVAL;
+
+ if (cgrp->parent == NULL)
+ return -EINVAL;
+
+ parent = mem_cgroup_from_cont(cgrp->parent);
+ /* If under hierarchy, only empty-root can set this value */
+ if ((parent->use_hierarchy) ||
+ (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+ return -EINVAL;
+
+ spin_lock(&memcg->reclaim_param_lock);
+ memcg->swappiness = val;
+ spin_unlock(&memcg->reclaim_param_lock);
+
return 0;
}
+
static struct cftype mem_cgroup_files[] = {
{
.name = "usage_in_bytes",
- .private = RES_USAGE,
+ .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
.read_u64 = mem_cgroup_read,
},
{
.name = "max_usage_in_bytes",
- .private = RES_MAX_USAGE,
+ .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
.trigger = mem_cgroup_reset,
.read_u64 = mem_cgroup_read,
},
{
.name = "limit_in_bytes",
- .private = RES_LIMIT,
+ .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
.write_string = mem_cgroup_write,
.read_u64 = mem_cgroup_read,
},
{
.name = "failcnt",
- .private = RES_FAILCNT,
+ .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
.trigger = mem_cgroup_reset,
.read_u64 = mem_cgroup_read,
},
{
+ .name = "stat",
+ .read_map = mem_control_stat_show,
+ },
+ {
.name = "force_empty",
- .trigger = mem_force_empty_write,
+ .trigger = mem_cgroup_force_empty_write,
},
{
- .name = "stat",
- .read_map = mem_control_stat_show,
+ .name = "use_hierarchy",
+ .write_u64 = mem_cgroup_hierarchy_write,
+ .read_u64 = mem_cgroup_hierarchy_read,
+ },
+ {
+ .name = "swappiness",
+ .read_u64 = mem_cgroup_swappiness_read,
+ .write_u64 = mem_cgroup_swappiness_write,
},
};
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+static struct cftype memsw_cgroup_files[] = {
+ {
+ .name = "memsw.usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
+ .read_u64 = mem_cgroup_read,
+ },
+ {
+ .name = "memsw.max_usage_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
+ .trigger = mem_cgroup_reset,
+ .read_u64 = mem_cgroup_read,
+ },
+ {
+ .name = "memsw.limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
+ .write_string = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read,
+ },
+ {
+ .name = "memsw.failcnt",
+ .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
+ .trigger = mem_cgroup_reset,
+ .read_u64 = mem_cgroup_read,
+ },
+};
+
+static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+ if (!do_swap_account)
+ return 0;
+ return cgroup_add_files(cont, ss, memsw_cgroup_files,
+ ARRAY_SIZE(memsw_cgroup_files));
+};
+#else
+static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
+{
+ return 0;
+}
+#endif
+
static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
{
struct mem_cgroup_per_node *pn;
@@ -1047,7 +2101,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mz = &pn->zoneinfo[zone];
- spin_lock_init(&mz->lru_lock);
for_each_lru(l)
INIT_LIST_HEAD(&mz->lists[l]);
}
@@ -1059,55 +2112,113 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
kfree(mem->info.nodeinfo[node]);
}
+static int mem_cgroup_size(void)
+{
+ int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
+ return sizeof(struct mem_cgroup) + cpustat_size;
+}
+
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
+ int size = mem_cgroup_size();
- if (sizeof(*mem) < PAGE_SIZE)
- mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+ if (size < PAGE_SIZE)
+ mem = kmalloc(size, GFP_KERNEL);
else
- mem = vmalloc(sizeof(*mem));
+ mem = vmalloc(size);
if (mem)
- memset(mem, 0, sizeof(*mem));
+ memset(mem, 0, size);
return mem;
}
-static void mem_cgroup_free(struct mem_cgroup *mem)
+/*
+ * At destroying mem_cgroup, references from swap_cgroup can remain.
+ * (scanning all at force_empty is too costly...)
+ *
+ * Instead of clearing all references at force_empty, we remember
+ * the number of reference from swap_cgroup and free mem_cgroup when
+ * it goes down to 0.
+ *
+ * Removal of cgroup itself succeeds regardless of refs from swap.
+ */
+
+static void __mem_cgroup_free(struct mem_cgroup *mem)
{
- if (sizeof(*mem) < PAGE_SIZE)
+ int node;
+
+ for_each_node_state(node, N_POSSIBLE)
+ free_mem_cgroup_per_zone_info(mem, node);
+
+ if (mem_cgroup_size() < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
}
+static void mem_cgroup_get(struct mem_cgroup *mem)
+{
+ atomic_inc(&mem->refcnt);
+}
+
+static void mem_cgroup_put(struct mem_cgroup *mem)
+{
+ if (atomic_dec_and_test(&mem->refcnt))
+ __mem_cgroup_free(mem);
+}
+
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+static void __init enable_swap_cgroup(void)
+{
+ if (!mem_cgroup_disabled() && really_do_swap_account)
+ do_swap_account = 1;
+}
+#else
+static void __init enable_swap_cgroup(void)
+{
+}
+#endif
static struct cgroup_subsys_state *
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
- struct mem_cgroup *mem;
+ struct mem_cgroup *mem, *parent;
int node;
- if (unlikely((cont->parent) == NULL)) {
- mem = &init_mem_cgroup;
- } else {
- mem = mem_cgroup_alloc();
- if (!mem)
- return ERR_PTR(-ENOMEM);
- }
-
- res_counter_init(&mem->res);
+ mem = mem_cgroup_alloc();
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
for_each_node_state(node, N_POSSIBLE)
if (alloc_mem_cgroup_per_zone_info(mem, node))
goto free_out;
+ /* root ? */
+ if (cont->parent == NULL) {
+ enable_swap_cgroup();
+ parent = NULL;
+ } else {
+ parent = mem_cgroup_from_cont(cont->parent);
+ mem->use_hierarchy = parent->use_hierarchy;
+ }
+ if (parent && parent->use_hierarchy) {
+ res_counter_init(&mem->res, &parent->res);
+ res_counter_init(&mem->memsw, &parent->memsw);
+ } else {
+ res_counter_init(&mem->res, NULL);
+ res_counter_init(&mem->memsw, NULL);
+ }
+ mem->last_scanned_child = NULL;
+ spin_lock_init(&mem->reclaim_param_lock);
+
+ if (parent)
+ mem->swappiness = get_swappiness(parent);
+ atomic_set(&mem->refcnt, 1);
return &mem->css;
free_out:
- for_each_node_state(node, N_POSSIBLE)
- free_mem_cgroup_per_zone_info(mem, node);
- if (cont->parent != NULL)
- mem_cgroup_free(mem);
+ __mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM);
}
@@ -1115,26 +2226,26 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
- mem_cgroup_force_empty(mem);
+ mem_cgroup_force_empty(mem, false);
}
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- int node;
- struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-
- for_each_node_state(node, N_POSSIBLE)
- free_mem_cgroup_per_zone_info(mem, node);
-
- mem_cgroup_free(mem_cgroup_from_cont(cont));
+ mem_cgroup_put(mem_cgroup_from_cont(cont));
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- return cgroup_add_files(cont, ss, mem_cgroup_files,
- ARRAY_SIZE(mem_cgroup_files));
+ int ret;
+
+ ret = cgroup_add_files(cont, ss, mem_cgroup_files,
+ ARRAY_SIZE(mem_cgroup_files));
+
+ if (!ret)
+ ret = register_memsw_files(cont, ss);
+ return ret;
}
static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -1142,25 +2253,12 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
struct cgroup *old_cont,
struct task_struct *p)
{
- struct mm_struct *mm;
- struct mem_cgroup *mem, *old_mem;
-
- mm = get_task_mm(p);
- if (mm == NULL)
- return;
-
- mem = mem_cgroup_from_cont(cont);
- old_mem = mem_cgroup_from_cont(old_cont);
-
+ mutex_lock(&memcg_tasklist);
/*
- * Only thread group leaders are allowed to migrate, the mm_struct is
- * in effect owned by the leader
+ * FIXME: It's better to move charges of this process from old
+ * memcg to new memcg. But it's just on TODO-List now.
*/
- if (!thread_group_leader(p))
- goto out;
-
-out:
- mmput(mm);
+ mutex_unlock(&memcg_tasklist);
}
struct cgroup_subsys mem_cgroup_subsys = {
@@ -1173,3 +2271,13 @@ struct cgroup_subsys mem_cgroup_subsys = {
.attach = mem_cgroup_move_task,
.early_init = 0,
};
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+
+static int __init disable_swap_account(char *s)
+{
+ really_do_swap_account = 0;
+ return 1;
+}
+__setup("noswapaccount", disable_swap_account);
+#endif
diff --git a/mm/memory.c b/mm/memory.c
index 3f8fa06b963b..e009ce870859 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2000,7 +2000,7 @@ gotten:
cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
- if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+ if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
/*
@@ -2392,6 +2392,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page;
swp_entry_t entry;
pte_t pte;
+ struct mem_cgroup *ptr = NULL;
int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2430,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
- if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+ if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
unlock_page(page);
goto out;
@@ -2448,7 +2449,19 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_nomap;
}
- /* The page isn't present yet, go ahead with the fault. */
+ /*
+ * The page isn't present yet, go ahead with the fault.
+ *
+ * Be careful about the sequence of operations here.
+ * To get its accounting right, reuse_swap_page() must be called
+ * while the page is counted on swap but not yet in mapcount i.e.
+ * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+ * must be called after the swap_free(), or it will never succeed.
+ * Because delete_from_swap_page() may be called by reuse_swap_page(),
+ * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+ * in page->private. In this case, a record in swap_cgroup is silently
+ * discarded at swap_free().
+ */
inc_mm_counter(mm, anon_rss);
pte = mk_pte(page, vma->vm_page_prot);
@@ -2456,10 +2469,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
write_access = 0;
}
-
flush_icache_page(vma, page);
set_pte_at(mm, address, page_table, pte);
page_add_anon_rmap(page, vma, address);
+ /* It's better to call commit-charge after rmap is established */
+ mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
@@ -2480,7 +2494,7 @@ unlock:
out:
return ret;
out_nomap:
- mem_cgroup_uncharge_page(page);
+ mem_cgroup_cancel_charge_swapin(ptr);
pte_unmap_unlock(page_table, ptl);
unlock_page(page);
page_cache_release(page);
@@ -2510,7 +2524,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom;
__SetPageUptodate(page);
- if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+ if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
@@ -2601,7 +2615,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
- if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+ if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
ret = VM_FAULT_OOM;
page_cache_release(page);
goto out;
diff --git a/mm/migrate.c b/mm/migrate.c
index 55373983c9c6..a30ea5fcf9f1 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -121,20 +121,6 @@ static void remove_migration_pte(struct vm_area_struct *vma,
if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
goto out;
- /*
- * Yes, ignore the return value from a GFP_ATOMIC mem_cgroup_charge.
- * Failure is not an option here: we're now expected to remove every
- * migration pte, and will cause crashes otherwise. Normally this
- * is not an issue: mem_cgroup_prepare_migration bumped up the old
- * page_cgroup count for safety, that's now attached to the new page,
- * so this charge should just be another incrementation of the count,
- * to keep in balance with rmap.c's mem_cgroup_uncharging. But if
- * there's been a force_empty, those reference counts may no longer
- * be reliable, and this charge can actually fail: oh well, we don't
- * make the situation any worse by proceeding as if it had succeeded.
- */
- mem_cgroup_charge(new, mm, GFP_ATOMIC);
-
get_page(new);
pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
if (is_write_migration_entry(entry))
@@ -378,9 +364,6 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
anon = PageAnon(page);
page->mapping = NULL;
- if (!anon) /* This page was removed from radix-tree. */
- mem_cgroup_uncharge_cache_page(page);
-
/*
* If any waiters have accumulated on the new page then
* wake them up.
@@ -614,6 +597,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *newpage = get_new_page(page, private, &result);
int rcu_locked = 0;
int charge = 0;
+ struct mem_cgroup *mem;
if (!newpage)
return -ENOMEM;
@@ -623,24 +607,26 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
goto move_newpage;
}
- charge = mem_cgroup_prepare_migration(page, newpage);
- if (charge == -ENOMEM) {
- rc = -ENOMEM;
- goto move_newpage;
- }
/* prepare cgroup just returns 0 or -ENOMEM */
- BUG_ON(charge);
-
rc = -EAGAIN;
+
if (!trylock_page(page)) {
if (!force)
goto move_newpage;
lock_page(page);
}
+ /* charge against new page */
+ charge = mem_cgroup_prepare_migration(page, &mem);
+ if (charge == -ENOMEM) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+ BUG_ON(charge);
+
if (PageWriteback(page)) {
if (!force)
- goto unlock;
+ goto uncharge;
wait_on_page_writeback(page);
}
/*
@@ -693,7 +679,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
rcu_unlock:
if (rcu_locked)
rcu_read_unlock();
-
+uncharge:
+ if (!charge)
+ mem_cgroup_end_migration(mem, page, newpage);
unlock:
unlock_page(page);
@@ -709,8 +697,6 @@ unlock:
}
move_newpage:
- if (!charge)
- mem_cgroup_end_migration(newpage);
/*
* Move the new page to the LRU. If migration was not successful
diff --git a/mm/mmap.c b/mm/mmap.c
index a910c045cfd4..749623196cb9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2472,3 +2472,13 @@ void mm_drop_all_locks(struct mm_struct *mm)
mutex_unlock(&mm_all_locks_mutex);
}
+
+/*
+ * initialise the VMA slab
+ */
+void __init mmap_init(void)
+{
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_PANIC, NULL);
+}
diff --git a/mm/nommu.c b/mm/nommu.c
index 1c28ea3a4e9c..60ed8375c986 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -6,11 +6,11 @@
*
* See Documentation/nommu-mmap.txt
*
- * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
+ * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
* Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
* Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
- * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (c) 2007-2008 Paul Mundt <lethal@linux-sh.org>
*/
#include <linux/module.h>
@@ -33,6 +33,28 @@
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include "internal.h"
+
+static inline __attribute__((format(printf, 1, 2)))
+void no_printk(const char *fmt, ...)
+{
+}
+
+#if 0
+#define kenter(FMT, ...) \
+ printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
+#else
+#define kenter(FMT, ...) \
+ no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) \
+ no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) \
+ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
+#endif
#include "internal.h"
@@ -40,19 +62,22 @@ void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
-unsigned long askedalloc, realalloc;
atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_nr_trim_pages = 1; /* page trimming behaviour */
int heap_stack_gap = 0;
+atomic_t mmap_pages_allocated;
+
EXPORT_SYMBOL(mem_map);
EXPORT_SYMBOL(num_physpages);
-/* list of shareable VMAs */
-struct rb_root nommu_vma_tree = RB_ROOT;
-DECLARE_RWSEM(nommu_vma_sem);
+/* list of mapped, potentially shareable regions */
+static struct kmem_cache *vm_region_jar;
+struct rb_root nommu_region_tree = RB_ROOT;
+DECLARE_RWSEM(nommu_region_sem);
struct vm_operations_struct generic_file_vm_ops = {
};
@@ -124,6 +149,20 @@ unsigned int kobjsize(const void *objp)
return ksize(objp);
/*
+ * If it's not a compound page, see if we have a matching VMA
+ * region. This test is intentionally done in reverse order,
+ * so if there's no VMA, we still fall through and hand back
+ * PAGE_SIZE for 0-order pages.
+ */
+ if (!PageCompound(page)) {
+ struct vm_area_struct *vma;
+
+ vma = find_vma(current->mm, (unsigned long)objp);
+ if (vma)
+ return vma->vm_end - vma->vm_start;
+ }
+
+ /*
* The ksize() function is only guaranteed to work for pointers
* returned by kmalloc(). So handle arbitrary pointers here.
*/
@@ -401,129 +440,178 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
return mm->brk = brk;
}
-#ifdef DEBUG
-static void show_process_blocks(void)
+/*
+ * initialise the VMA and region record slabs
+ */
+void __init mmap_init(void)
{
- struct vm_list_struct *vml;
-
- printk("Process blocks %d:", current->pid);
-
- for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
- printk(" %p: %p", vml, vml->vma);
- if (vml->vma)
- printk(" (%d @%lx #%d)",
- kobjsize((void *) vml->vma->vm_start),
- vml->vma->vm_start,
- atomic_read(&vml->vma->vm_usage));
- printk(vml->next ? " ->" : ".\n");
- }
+ vm_region_jar = kmem_cache_create("vm_region_jar",
+ sizeof(struct vm_region), 0,
+ SLAB_PANIC, NULL);
+ vm_area_cachep = kmem_cache_create("vm_area_struct",
+ sizeof(struct vm_area_struct), 0,
+ SLAB_PANIC, NULL);
}
-#endif /* DEBUG */
/*
- * add a VMA into a process's mm_struct in the appropriate place in the list
- * - should be called with mm->mmap_sem held writelocked
+ * validate the region tree
+ * - the caller must hold the region lock
*/
-static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
+#ifdef CONFIG_DEBUG_NOMMU_REGIONS
+static noinline void validate_nommu_regions(void)
{
- struct vm_list_struct **ppv;
-
- for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
- if ((*ppv)->vma->vm_start > vml->vma->vm_start)
- break;
-
- vml->next = *ppv;
- *ppv = vml;
+ struct vm_region *region, *last;
+ struct rb_node *p, *lastp;
+
+ lastp = rb_first(&nommu_region_tree);
+ if (!lastp)
+ return;
+
+ last = rb_entry(lastp, struct vm_region, vm_rb);
+ if (unlikely(last->vm_end <= last->vm_start))
+ BUG();
+ if (unlikely(last->vm_top < last->vm_end))
+ BUG();
+
+ while ((p = rb_next(lastp))) {
+ region = rb_entry(p, struct vm_region, vm_rb);
+ last = rb_entry(lastp, struct vm_region, vm_rb);
+
+ if (unlikely(region->vm_end <= region->vm_start))
+ BUG();
+ if (unlikely(region->vm_top < region->vm_end))
+ BUG();
+ if (unlikely(region->vm_start < last->vm_top))
+ BUG();
+
+ lastp = p;
+ }
}
+#else
+#define validate_nommu_regions() do {} while(0)
+#endif
/*
- * look up the first VMA in which addr resides, NULL if none
- * - should be called with mm->mmap_sem at least held readlocked
+ * add a region into the global tree
*/
-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+static void add_nommu_region(struct vm_region *region)
{
- struct vm_list_struct *loop, *vml;
+ struct vm_region *pregion;
+ struct rb_node **p, *parent;
- /* search the vm_start ordered list */
- vml = NULL;
- for (loop = mm->context.vmlist; loop; loop = loop->next) {
- if (loop->vma->vm_start > addr)
- break;
- vml = loop;
+ validate_nommu_regions();
+
+ BUG_ON(region->vm_start & ~PAGE_MASK);
+
+ parent = NULL;
+ p = &nommu_region_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ pregion = rb_entry(parent, struct vm_region, vm_rb);
+ if (region->vm_start < pregion->vm_start)
+ p = &(*p)->rb_left;
+ else if (region->vm_start > pregion->vm_start)
+ p = &(*p)->rb_right;
+ else if (pregion == region)
+ return;
+ else
+ BUG();
}
- if (vml && vml->vma->vm_end > addr)
- return vml->vma;
+ rb_link_node(&region->vm_rb, parent, p);
+ rb_insert_color(&region->vm_rb, &nommu_region_tree);
- return NULL;
+ validate_nommu_regions();
}
-EXPORT_SYMBOL(find_vma);
/*
- * find a VMA
- * - we don't extend stack VMAs under NOMMU conditions
+ * delete a region from the global tree
*/
-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+static void delete_nommu_region(struct vm_region *region)
{
- return find_vma(mm, addr);
-}
+ BUG_ON(!nommu_region_tree.rb_node);
-int expand_stack(struct vm_area_struct *vma, unsigned long address)
-{
- return -ENOMEM;
+ validate_nommu_regions();
+ rb_erase(&region->vm_rb, &nommu_region_tree);
+ validate_nommu_regions();
}
/*
- * look up the first VMA exactly that exactly matches addr
- * - should be called with mm->mmap_sem at least held readlocked
+ * free a contiguous series of pages
*/
-static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
- unsigned long addr)
+static void free_page_series(unsigned long from, unsigned long to)
{
- struct vm_list_struct *vml;
-
- /* search the vm_start ordered list */
- for (vml = mm->context.vmlist; vml; vml = vml->next) {
- if (vml->vma->vm_start == addr)
- return vml->vma;
- if (vml->vma->vm_start > addr)
- break;
+ for (; from < to; from += PAGE_SIZE) {
+ struct page *page = virt_to_page(from);
+
+ kdebug("- free %lx", from);
+ atomic_dec(&mmap_pages_allocated);
+ if (page_count(page) != 1)
+ kdebug("free page %p [%d]", page, page_count(page));
+ put_page(page);
}
-
- return NULL;
}
/*
- * find a VMA in the global tree
+ * release a reference to a region
+ * - the caller must hold the region semaphore, which this releases
+ * - the region may not have been added to the tree yet, in which case vm_top
+ * will equal vm_start
*/
-static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
+static void __put_nommu_region(struct vm_region *region)
+ __releases(nommu_region_sem)
{
- struct vm_area_struct *vma;
- struct rb_node *n = nommu_vma_tree.rb_node;
+ kenter("%p{%d}", region, atomic_read(&region->vm_usage));
- while (n) {
- vma = rb_entry(n, struct vm_area_struct, vm_rb);
+ BUG_ON(!nommu_region_tree.rb_node);
- if (start < vma->vm_start)
- n = n->rb_left;
- else if (start > vma->vm_start)
- n = n->rb_right;
- else
- return vma;
+ if (atomic_dec_and_test(&region->vm_usage)) {
+ if (region->vm_top > region->vm_start)
+ delete_nommu_region(region);
+ up_write(&nommu_region_sem);
+
+ if (region->vm_file)
+ fput(region->vm_file);
+
+ /* IO memory and memory shared directly out of the pagecache
+ * from ramfs/tmpfs mustn't be released here */
+ if (region->vm_flags & VM_MAPPED_COPY) {
+ kdebug("free series");
+ free_page_series(region->vm_start, region->vm_top);
+ }
+ kmem_cache_free(vm_region_jar, region);
+ } else {
+ up_write(&nommu_region_sem);
}
+}
- return NULL;
+/*
+ * release a reference to a region
+ */
+static void put_nommu_region(struct vm_region *region)
+{
+ down_write(&nommu_region_sem);
+ __put_nommu_region(region);
}
/*
- * add a VMA in the global tree
+ * add a VMA into a process's mm_struct in the appropriate place in the list
+ * and tree and add to the address space's page tree also if not an anonymous
+ * page
+ * - should be called with mm->mmap_sem held writelocked
*/
-static void add_nommu_vma(struct vm_area_struct *vma)
+static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
{
- struct vm_area_struct *pvma;
+ struct vm_area_struct *pvma, **pp;
struct address_space *mapping;
- struct rb_node **p = &nommu_vma_tree.rb_node;
- struct rb_node *parent = NULL;
+ struct rb_node **p, *parent;
+
+ kenter(",%p", vma);
+
+ BUG_ON(!vma->vm_region);
+
+ mm->map_count++;
+ vma->vm_mm = mm;
/* add the VMA to the mapping */
if (vma->vm_file) {
@@ -534,42 +622,62 @@ static void add_nommu_vma(struct vm_area_struct *vma)
flush_dcache_mmap_unlock(mapping);
}
- /* add the VMA to the master list */
+ /* add the VMA to the tree */
+ parent = NULL;
+ p = &mm->mm_rb.rb_node;
while (*p) {
parent = *p;
pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
- if (vma->vm_start < pvma->vm_start) {
+ /* sort by: start addr, end addr, VMA struct addr in that order
+ * (the latter is necessary as we may get identical VMAs) */
+ if (vma->vm_start < pvma->vm_start)
p = &(*p)->rb_left;
- }
- else if (vma->vm_start > pvma->vm_start) {
+ else if (vma->vm_start > pvma->vm_start)
p = &(*p)->rb_right;
- }
- else {
- /* mappings are at the same address - this can only
- * happen for shared-mem chardevs and shared file
- * mappings backed by ramfs/tmpfs */
- BUG_ON(!(pvma->vm_flags & VM_SHARED));
-
- if (vma < pvma)
- p = &(*p)->rb_left;
- else if (vma > pvma)
- p = &(*p)->rb_right;
- else
- BUG();
- }
+ else if (vma->vm_end < pvma->vm_end)
+ p = &(*p)->rb_left;
+ else if (vma->vm_end > pvma->vm_end)
+ p = &(*p)->rb_right;
+ else if (vma < pvma)
+ p = &(*p)->rb_left;
+ else if (vma > pvma)
+ p = &(*p)->rb_right;
+ else
+ BUG();
}
rb_link_node(&vma->vm_rb, parent, p);
- rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
+ rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+
+ /* add VMA to the VMA list also */
+ for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
+ if (pvma->vm_start > vma->vm_start)
+ break;
+ if (pvma->vm_start < vma->vm_start)
+ continue;
+ if (pvma->vm_end < vma->vm_end)
+ break;
+ }
+
+ vma->vm_next = *pp;
+ *pp = vma;
}
/*
- * delete a VMA from the global list
+ * delete a VMA from its owning mm_struct and address space
*/
-static void delete_nommu_vma(struct vm_area_struct *vma)
+static void delete_vma_from_mm(struct vm_area_struct *vma)
{
+ struct vm_area_struct **pp;
struct address_space *mapping;
+ struct mm_struct *mm = vma->vm_mm;
+
+ kenter("%p", vma);
+
+ mm->map_count--;
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = NULL;
/* remove the VMA from the mapping */
if (vma->vm_file) {
@@ -580,8 +688,115 @@ static void delete_nommu_vma(struct vm_area_struct *vma)
flush_dcache_mmap_unlock(mapping);
}
- /* remove from the master list */
- rb_erase(&vma->vm_rb, &nommu_vma_tree);
+ /* remove from the MM's tree and list */
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
+ if (*pp == vma) {
+ *pp = vma->vm_next;
+ break;
+ }
+ }
+
+ vma->vm_mm = NULL;
+}
+
+/*
+ * destroy a VMA record
+ */
+static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ kenter("%p", vma);
+ if (vma->vm_ops && vma->vm_ops->close)
+ vma->vm_ops->close(vma);
+ if (vma->vm_file) {
+ fput(vma->vm_file);
+ if (vma->vm_flags & VM_EXECUTABLE)
+ removed_exe_file_vma(mm);
+ }
+ put_nommu_region(vma->vm_region);
+ kmem_cache_free(vm_area_cachep, vma);
+}
+
+/*
+ * look up the first VMA in which addr resides, NULL if none
+ * - should be called with mm->mmap_sem at least held readlocked
+ */
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+ struct rb_node *n = mm->mm_rb.rb_node;
+
+ /* check the cache first */
+ vma = mm->mmap_cache;
+ if (vma && vma->vm_start <= addr && vma->vm_end > addr)
+ return vma;
+
+ /* trawl the tree (there may be multiple mappings in which addr
+ * resides) */
+ for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+ vma = rb_entry(n, struct vm_area_struct, vm_rb);
+ if (vma->vm_start > addr)
+ return NULL;
+ if (vma->vm_end > addr) {
+ mm->mmap_cache = vma;
+ return vma;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(find_vma);
+
+/*
+ * find a VMA
+ * - we don't extend stack VMAs under NOMMU conditions
+ */
+struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
+{
+ return find_vma(mm, addr);
+}
+
+/*
+ * expand a stack to a given address
+ * - not supported under NOMMU conditions
+ */
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+ return -ENOMEM;
+}
+
+/*
+ * look up the first VMA exactly that exactly matches addr
+ * - should be called with mm->mmap_sem at least held readlocked
+ */
+static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len)
+{
+ struct vm_area_struct *vma;
+ struct rb_node *n = mm->mm_rb.rb_node;
+ unsigned long end = addr + len;
+
+ /* check the cache first */
+ vma = mm->mmap_cache;
+ if (vma && vma->vm_start == addr && vma->vm_end == end)
+ return vma;
+
+ /* trawl the tree (there may be multiple mappings in which addr
+ * resides) */
+ for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
+ vma = rb_entry(n, struct vm_area_struct, vm_rb);
+ if (vma->vm_start < addr)
+ continue;
+ if (vma->vm_start > addr)
+ return NULL;
+ if (vma->vm_end == end) {
+ mm->mmap_cache = vma;
+ return vma;
+ }
+ }
+
+ return NULL;
}
/*
@@ -596,7 +811,7 @@ static int validate_mmap_request(struct file *file,
unsigned long pgoff,
unsigned long *_capabilities)
{
- unsigned long capabilities;
+ unsigned long capabilities, rlen;
unsigned long reqprot = prot;
int ret;
@@ -616,12 +831,12 @@ static int validate_mmap_request(struct file *file,
return -EINVAL;
/* Careful about overflows.. */
- len = PAGE_ALIGN(len);
- if (!len || len > TASK_SIZE)
+ rlen = PAGE_ALIGN(len);
+ if (!rlen || rlen > TASK_SIZE)
return -ENOMEM;
/* offset overflow? */
- if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
+ if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
return -EOVERFLOW;
if (file) {
@@ -795,13 +1010,18 @@ static unsigned long determine_vm_flags(struct file *file,
}
/*
- * set up a shared mapping on a file
+ * set up a shared mapping on a file (the driver or filesystem provides and
+ * pins the storage)
*/
-static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_shared_file(struct vm_area_struct *vma)
{
int ret;
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
+ if (ret == 0) {
+ vma->vm_region->vm_top = vma->vm_region->vm_end;
+ return ret;
+ }
if (ret != -ENOSYS)
return ret;
@@ -815,10 +1035,14 @@ static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
/*
* set up a private mapping or an anonymous shared mapping
*/
-static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
+static int do_mmap_private(struct vm_area_struct *vma,
+ struct vm_region *region,
+ unsigned long len)
{
+ struct page *pages;
+ unsigned long total, point, n, rlen;
void *base;
- int ret;
+ int ret, order;
/* invoke the file's mapping function so that it can keep track of
* shared mappings on devices or memory
@@ -826,34 +1050,63 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
*/
if (vma->vm_file) {
ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
- if (ret != -ENOSYS) {
+ if (ret == 0) {
/* shouldn't return success if we're not sharing */
- BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
- return ret; /* success or a real error */
+ BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
+ vma->vm_region->vm_top = vma->vm_region->vm_end;
+ return ret;
}
+ if (ret != -ENOSYS)
+ return ret;
/* getting an ENOSYS error indicates that direct mmap isn't
* possible (as opposed to tried but failed) so we'll try to
* make a private copy of the data and map that instead */
}
+ rlen = PAGE_ALIGN(len);
+
/* allocate some memory to hold the mapping
* - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page
*/
- base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
- if (!base)
+ order = get_order(rlen);
+ kdebug("alloc order %d for %lx", order, len);
+
+ pages = alloc_pages(GFP_KERNEL, order);
+ if (!pages)
goto enomem;
- vma->vm_start = (unsigned long) base;
- vma->vm_end = vma->vm_start + len;
- vma->vm_flags |= VM_MAPPED_COPY;
+ total = 1 << order;
+ atomic_add(total, &mmap_pages_allocated);
+
+ point = rlen >> PAGE_SHIFT;
+
+ /* we allocated a power-of-2 sized page set, so we may want to trim off
+ * the excess */
+ if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
+ while (total > point) {
+ order = ilog2(total - point);
+ n = 1 << order;
+ kdebug("shave %lu/%lu @%lu", n, total - point, total);
+ atomic_sub(n, &mmap_pages_allocated);
+ total -= n;
+ set_page_refcounted(pages + total);
+ __free_pages(pages + total, order);
+ }
+ }
+
+ for (point = 1; point < total; point++)
+ set_page_refcounted(&pages[point]);
-#ifdef WARN_ON_SLACK
- if (len + WARN_ON_SLACK <= kobjsize(result))
- printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
- len, current->pid, kobjsize(result) - len);
-#endif
+ base = page_address(pages);
+ region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
+ region->vm_start = (unsigned long) base;
+ region->vm_end = region->vm_start + rlen;
+ region->vm_top = region->vm_start + (total << PAGE_SHIFT);
+
+ vma->vm_start = region->vm_start;
+ vma->vm_end = region->vm_start + len;
if (vma->vm_file) {
/* read the contents of a file into the copy */
@@ -865,26 +1118,28 @@ static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
+ ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
set_fs(old_fs);
if (ret < 0)
goto error_free;
/* clear the last little bit */
- if (ret < len)
- memset(base + ret, 0, len - ret);
+ if (ret < rlen)
+ memset(base + ret, 0, rlen - ret);
} else {
/* if it's an anonymous mapping, then just clear it */
- memset(base, 0, len);
+ memset(base, 0, rlen);
}
return 0;
error_free:
- kfree(base);
- vma->vm_start = 0;
+ free_page_series(region->vm_start, region->vm_end);
+ region->vm_start = vma->vm_start = 0;
+ region->vm_end = vma->vm_end = 0;
+ region->vm_top = 0;
return ret;
enomem:
@@ -904,13 +1159,14 @@ unsigned long do_mmap_pgoff(struct file *file,
unsigned long flags,
unsigned long pgoff)
{
- struct vm_list_struct *vml = NULL;
- struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *vma;
+ struct vm_region *region;
struct rb_node *rb;
- unsigned long capabilities, vm_flags;
- void *result;
+ unsigned long capabilities, vm_flags, result;
int ret;
+ kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
+
if (!(flags & MAP_FIXED))
addr = round_hint_to_min(addr);
@@ -918,73 +1174,120 @@ unsigned long do_mmap_pgoff(struct file *file,
* mapping */
ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
&capabilities);
- if (ret < 0)
+ if (ret < 0) {
+ kleave(" = %d [val]", ret);
return ret;
+ }
/* we've determined that we can make the mapping, now translate what we
* now know into VMA flags */
vm_flags = determine_vm_flags(file, prot, flags, capabilities);
- /* we're going to need to record the mapping if it works */
- vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
- if (!vml)
- goto error_getting_vml;
+ /* we're going to need to record the mapping */
+ region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
+ if (!region)
+ goto error_getting_region;
+
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (!vma)
+ goto error_getting_vma;
+
+ atomic_set(&region->vm_usage, 1);
+ region->vm_flags = vm_flags;
+ region->vm_pgoff = pgoff;
+
+ INIT_LIST_HEAD(&vma->anon_vma_node);
+ vma->vm_flags = vm_flags;
+ vma->vm_pgoff = pgoff;
- down_write(&nommu_vma_sem);
+ if (file) {
+ region->vm_file = file;
+ get_file(file);
+ vma->vm_file = file;
+ get_file(file);
+ if (vm_flags & VM_EXECUTABLE) {
+ added_exe_file_vma(current->mm);
+ vma->vm_mm = current->mm;
+ }
+ }
- /* if we want to share, we need to check for VMAs created by other
+ down_write(&nommu_region_sem);
+
+ /* if we want to share, we need to check for regions created by other
* mmap() calls that overlap with our proposed mapping
- * - we can only share with an exact match on most regular files
+ * - we can only share with a superset match on most regular files
* - shared mappings on character devices and memory backed files are
* permitted to overlap inexactly as far as we are concerned for in
* these cases, sharing is handled in the driver or filesystem rather
* than here
*/
if (vm_flags & VM_MAYSHARE) {
- unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
- unsigned long vmpglen;
+ struct vm_region *pregion;
+ unsigned long pglen, rpglen, pgend, rpgend, start;
- /* suppress VMA sharing for shared regions */
- if (vm_flags & VM_SHARED &&
- capabilities & BDI_CAP_MAP_DIRECT)
- goto dont_share_VMAs;
+ pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ pgend = pgoff + pglen;
- for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
- vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+ for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
+ pregion = rb_entry(rb, struct vm_region, vm_rb);
- if (!(vma->vm_flags & VM_MAYSHARE))
+ if (!(pregion->vm_flags & VM_MAYSHARE))
continue;
/* search for overlapping mappings on the same file */
- if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
+ if (pregion->vm_file->f_path.dentry->d_inode !=
+ file->f_path.dentry->d_inode)
continue;
- if (vma->vm_pgoff >= pgoff + pglen)
+ if (pregion->vm_pgoff >= pgend)
continue;
- vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
- vmpglen >>= PAGE_SHIFT;
- if (pgoff >= vma->vm_pgoff + vmpglen)
+ rpglen = pregion->vm_end - pregion->vm_start;
+ rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ rpgend = pregion->vm_pgoff + rpglen;
+ if (pgoff >= rpgend)
continue;
- /* handle inexactly overlapping matches between mappings */
- if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
+ /* handle inexactly overlapping matches between
+ * mappings */
+ if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
+ !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
+ /* new mapping is not a subset of the region */
if (!(capabilities & BDI_CAP_MAP_DIRECT))
goto sharing_violation;
continue;
}
- /* we've found a VMA we can share */
- atomic_inc(&vma->vm_usage);
-
- vml->vma = vma;
- result = (void *) vma->vm_start;
- goto shared;
+ /* we've found a region we can share */
+ atomic_inc(&pregion->vm_usage);
+ vma->vm_region = pregion;
+ start = pregion->vm_start;
+ start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
+ vma->vm_start = start;
+ vma->vm_end = start + len;
+
+ if (pregion->vm_flags & VM_MAPPED_COPY) {
+ kdebug("share copy");
+ vma->vm_flags |= VM_MAPPED_COPY;
+ } else {
+ kdebug("share mmap");
+ ret = do_mmap_shared_file(vma);
+ if (ret < 0) {
+ vma->vm_region = NULL;
+ vma->vm_start = 0;
+ vma->vm_end = 0;
+ atomic_dec(&pregion->vm_usage);
+ pregion = NULL;
+ goto error_just_free;
+ }
+ }
+ fput(region->vm_file);
+ kmem_cache_free(vm_region_jar, region);
+ region = pregion;
+ result = start;
+ goto share;
}
- dont_share_VMAs:
- vma = NULL;
-
/* obtain the address at which to make a shared mapping
* - this is the hook for quasi-memory character devices to
* tell us the location of a shared mapping
@@ -995,113 +1298,93 @@ unsigned long do_mmap_pgoff(struct file *file,
if (IS_ERR((void *) addr)) {
ret = addr;
if (ret != (unsigned long) -ENOSYS)
- goto error;
+ goto error_just_free;
/* the driver refused to tell us where to site
* the mapping so we'll have to attempt to copy
* it */
ret = (unsigned long) -ENODEV;
if (!(capabilities & BDI_CAP_MAP_COPY))
- goto error;
+ goto error_just_free;
capabilities &= ~BDI_CAP_MAP_DIRECT;
+ } else {
+ vma->vm_start = region->vm_start = addr;
+ vma->vm_end = region->vm_end = addr + len;
}
}
}
- /* we're going to need a VMA struct as well */
- vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
- if (!vma)
- goto error_getting_vma;
-
- INIT_LIST_HEAD(&vma->anon_vma_node);
- atomic_set(&vma->vm_usage, 1);
- if (file) {
- get_file(file);
- if (vm_flags & VM_EXECUTABLE) {
- added_exe_file_vma(current->mm);
- vma->vm_mm = current->mm;
- }
- }
- vma->vm_file = file;
- vma->vm_flags = vm_flags;
- vma->vm_start = addr;
- vma->vm_end = addr + len;
- vma->vm_pgoff = pgoff;
-
- vml->vma = vma;
+ vma->vm_region = region;
/* set up the mapping */
if (file && vma->vm_flags & VM_SHARED)
- ret = do_mmap_shared_file(vma, len);
+ ret = do_mmap_shared_file(vma);
else
- ret = do_mmap_private(vma, len);
+ ret = do_mmap_private(vma, region, len);
if (ret < 0)
- goto error;
-
- /* okay... we have a mapping; now we have to register it */
- result = (void *) vma->vm_start;
+ goto error_put_region;
- if (vma->vm_flags & VM_MAPPED_COPY) {
- realalloc += kobjsize(result);
- askedalloc += len;
- }
+ add_nommu_region(region);
- realalloc += kobjsize(vma);
- askedalloc += sizeof(*vma);
+ /* okay... we have a mapping; now we have to register it */
+ result = vma->vm_start;
current->mm->total_vm += len >> PAGE_SHIFT;
- add_nommu_vma(vma);
-
- shared:
- realalloc += kobjsize(vml);
- askedalloc += sizeof(*vml);
-
- add_vma_to_mm(current->mm, vml);
+share:
+ add_vma_to_mm(current->mm, vma);
- up_write(&nommu_vma_sem);
+ up_write(&nommu_region_sem);
if (prot & PROT_EXEC)
- flush_icache_range((unsigned long) result,
- (unsigned long) result + len);
+ flush_icache_range(result, result + len);
-#ifdef DEBUG
- printk("do_mmap:\n");
- show_process_blocks();
-#endif
-
- return (unsigned long) result;
+ kleave(" = %lx", result);
+ return result;
- error:
- up_write(&nommu_vma_sem);
- kfree(vml);
+error_put_region:
+ __put_nommu_region(region);
if (vma) {
if (vma->vm_file) {
fput(vma->vm_file);
if (vma->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(vma->vm_mm);
}
- kfree(vma);
+ kmem_cache_free(vm_area_cachep, vma);
}
+ kleave(" = %d [pr]", ret);
return ret;
- sharing_violation:
- up_write(&nommu_vma_sem);
- printk("Attempt to share mismatched mappings\n");
- kfree(vml);
- return -EINVAL;
+error_just_free:
+ up_write(&nommu_region_sem);
+error:
+ fput(region->vm_file);
+ kmem_cache_free(vm_region_jar, region);
+ fput(vma->vm_file);
+ if (vma->vm_flags & VM_EXECUTABLE)
+ removed_exe_file_vma(vma->vm_mm);
+ kmem_cache_free(vm_area_cachep, vma);
+ kleave(" = %d", ret);
+ return ret;
- error_getting_vma:
- up_write(&nommu_vma_sem);
- kfree(vml);
- printk("Allocation of vma for %lu byte allocation from process %d failed\n",
+sharing_violation:
+ up_write(&nommu_region_sem);
+ printk(KERN_WARNING "Attempt to share mismatched mappings\n");
+ ret = -EINVAL;
+ goto error;
+
+error_getting_vma:
+ kmem_cache_free(vm_region_jar, region);
+ printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
+ " from process %d failed\n",
len, current->pid);
show_free_areas();
return -ENOMEM;
- error_getting_vml:
- printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+error_getting_region:
+ printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
+ " from process %d failed\n",
len, current->pid);
show_free_areas();
return -ENOMEM;
@@ -1109,85 +1392,183 @@ unsigned long do_mmap_pgoff(struct file *file,
EXPORT_SYMBOL(do_mmap_pgoff);
/*
- * handle mapping disposal for uClinux
+ * split a vma into two pieces at address 'addr', a new vma is allocated either
+ * for the first part or the tail.
*/
-static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, int new_below)
{
- if (vma) {
- down_write(&nommu_vma_sem);
+ struct vm_area_struct *new;
+ struct vm_region *region;
+ unsigned long npages;
- if (atomic_dec_and_test(&vma->vm_usage)) {
- delete_nommu_vma(vma);
+ kenter("");
- if (vma->vm_ops && vma->vm_ops->close)
- vma->vm_ops->close(vma);
+ /* we're only permitted to split anonymous regions that have a single
+ * owner */
+ if (vma->vm_file ||
+ atomic_read(&vma->vm_region->vm_usage) != 1)
+ return -ENOMEM;
- /* IO memory and memory shared directly out of the pagecache from
- * ramfs/tmpfs mustn't be released here */
- if (vma->vm_flags & VM_MAPPED_COPY) {
- realalloc -= kobjsize((void *) vma->vm_start);
- askedalloc -= vma->vm_end - vma->vm_start;
- kfree((void *) vma->vm_start);
- }
+ if (mm->map_count >= sysctl_max_map_count)
+ return -ENOMEM;
- realalloc -= kobjsize(vma);
- askedalloc -= sizeof(*vma);
+ region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
- if (vma->vm_file) {
- fput(vma->vm_file);
- if (vma->vm_flags & VM_EXECUTABLE)
- removed_exe_file_vma(mm);
- }
- kfree(vma);
- }
+ new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ if (!new) {
+ kmem_cache_free(vm_region_jar, region);
+ return -ENOMEM;
+ }
+
+ /* most fields are the same, copy all, and then fixup */
+ *new = *vma;
+ *region = *vma->vm_region;
+ new->vm_region = region;
+
+ npages = (addr - vma->vm_start) >> PAGE_SHIFT;
- up_write(&nommu_vma_sem);
+ if (new_below) {
+ region->vm_top = region->vm_end = new->vm_end = addr;
+ } else {
+ region->vm_start = new->vm_start = addr;
+ region->vm_pgoff = new->vm_pgoff += npages;
+ }
+
+ if (new->vm_ops && new->vm_ops->open)
+ new->vm_ops->open(new);
+
+ delete_vma_from_mm(vma);
+ down_write(&nommu_region_sem);
+ delete_nommu_region(vma->vm_region);
+ if (new_below) {
+ vma->vm_region->vm_start = vma->vm_start = addr;
+ vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
+ } else {
+ vma->vm_region->vm_end = vma->vm_end = addr;
+ vma->vm_region->vm_top = addr;
}
+ add_nommu_region(vma->vm_region);
+ add_nommu_region(new->vm_region);
+ up_write(&nommu_region_sem);
+ add_vma_to_mm(mm, vma);
+ add_vma_to_mm(mm, new);
+ return 0;
}
/*
- * release a mapping
- * - under NOMMU conditions the parameters must match exactly to the mapping to
- * be removed
+ * shrink a VMA by removing the specified chunk from either the beginning or
+ * the end
*/
-int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
+static int shrink_vma(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long from, unsigned long to)
{
- struct vm_list_struct *vml, **parent;
- unsigned long end = addr + len;
+ struct vm_region *region;
-#ifdef DEBUG
- printk("do_munmap:\n");
-#endif
+ kenter("");
- for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
- if ((*parent)->vma->vm_start > addr)
- break;
- if ((*parent)->vma->vm_start == addr &&
- ((len == 0) || ((*parent)->vma->vm_end == end)))
- goto found;
+ /* adjust the VMA's pointers, which may reposition it in the MM's tree
+ * and list */
+ delete_vma_from_mm(vma);
+ if (from > vma->vm_start)
+ vma->vm_end = from;
+ else
+ vma->vm_start = to;
+ add_vma_to_mm(mm, vma);
+
+ /* cut the backing region down to size */
+ region = vma->vm_region;
+ BUG_ON(atomic_read(&region->vm_usage) != 1);
+
+ down_write(&nommu_region_sem);
+ delete_nommu_region(region);
+ if (from > region->vm_start) {
+ to = region->vm_top;
+ region->vm_top = region->vm_end = from;
+ } else {
+ region->vm_start = to;
}
+ add_nommu_region(region);
+ up_write(&nommu_region_sem);
- printk("munmap of non-mmaped memory by process %d (%s): %p\n",
- current->pid, current->comm, (void *) addr);
- return -EINVAL;
+ free_page_series(from, to);
+ return 0;
+}
- found:
- vml = *parent;
+/*
+ * release a mapping
+ * - under NOMMU conditions the chunk to be unmapped must be backed by a single
+ * VMA, though it need not cover the whole VMA
+ */
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
+{
+ struct vm_area_struct *vma;
+ struct rb_node *rb;
+ unsigned long end = start + len;
+ int ret;
- put_vma(mm, vml->vma);
+ kenter(",%lx,%zx", start, len);
- *parent = vml->next;
- realalloc -= kobjsize(vml);
- askedalloc -= sizeof(*vml);
- kfree(vml);
+ if (len == 0)
+ return -EINVAL;
- update_hiwater_vm(mm);
- mm->total_vm -= len >> PAGE_SHIFT;
+ /* find the first potentially overlapping VMA */
+ vma = find_vma(mm, start);
+ if (!vma) {
+ printk(KERN_WARNING
+ "munmap of memory not mmapped by process %d (%s):"
+ " 0x%lx-0x%lx\n",
+ current->pid, current->comm, start, start + len - 1);
+ return -EINVAL;
+ }
-#ifdef DEBUG
- show_process_blocks();
-#endif
+ /* we're allowed to split an anonymous VMA but not a file-backed one */
+ if (vma->vm_file) {
+ do {
+ if (start > vma->vm_start) {
+ kleave(" = -EINVAL [miss]");
+ return -EINVAL;
+ }
+ if (end == vma->vm_end)
+ goto erase_whole_vma;
+ rb = rb_next(&vma->vm_rb);
+ vma = rb_entry(rb, struct vm_area_struct, vm_rb);
+ } while (rb);
+ kleave(" = -EINVAL [split file]");
+ return -EINVAL;
+ } else {
+ /* the chunk must be a subset of the VMA found */
+ if (start == vma->vm_start && end == vma->vm_end)
+ goto erase_whole_vma;
+ if (start < vma->vm_start || end > vma->vm_end) {
+ kleave(" = -EINVAL [superset]");
+ return -EINVAL;
+ }
+ if (start & ~PAGE_MASK) {
+ kleave(" = -EINVAL [unaligned start]");
+ return -EINVAL;
+ }
+ if (end != vma->vm_end && end & ~PAGE_MASK) {
+ kleave(" = -EINVAL [unaligned split]");
+ return -EINVAL;
+ }
+ if (start != vma->vm_start && end != vma->vm_end) {
+ ret = split_vma(mm, vma, start, 1);
+ if (ret < 0) {
+ kleave(" = %d [split]", ret);
+ return ret;
+ }
+ }
+ return shrink_vma(mm, vma, start, end);
+ }
+erase_whole_vma:
+ delete_vma_from_mm(vma);
+ delete_vma(mm, vma);
+ kleave(" = 0");
return 0;
}
EXPORT_SYMBOL(do_munmap);
@@ -1204,32 +1585,26 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
}
/*
- * Release all mappings
+ * release all the mappings made in a process's VM space
*/
-void exit_mmap(struct mm_struct * mm)
+void exit_mmap(struct mm_struct *mm)
{
- struct vm_list_struct *tmp;
-
- if (mm) {
-#ifdef DEBUG
- printk("Exit_mmap:\n");
-#endif
+ struct vm_area_struct *vma;
- mm->total_vm = 0;
+ if (!mm)
+ return;
- while ((tmp = mm->context.vmlist)) {
- mm->context.vmlist = tmp->next;
- put_vma(mm, tmp->vma);
+ kenter("");
- realalloc -= kobjsize(tmp);
- askedalloc -= sizeof(*tmp);
- kfree(tmp);
- }
+ mm->total_vm = 0;
-#ifdef DEBUG
- show_process_blocks();
-#endif
+ while ((vma = mm->mmap)) {
+ mm->mmap = vma->vm_next;
+ delete_vma_from_mm(vma);
+ delete_vma(mm, vma);
}
+
+ kleave("");
}
unsigned long do_brk(unsigned long addr, unsigned long len)
@@ -1242,8 +1617,8 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
* time (controlled by the MREMAP_MAYMOVE flag and available VM space)
*
* under NOMMU conditions, we only permit changing a mapping's size, and only
- * as long as it stays within the hole allocated by the kmalloc() call in
- * do_mmap_pgoff() and the block is not shareable
+ * as long as it stays within the region allocated by do_mmap_private() and the
+ * block is not shareable
*
* MREMAP_FIXED is not supported under NOMMU conditions
*/
@@ -1254,13 +1629,16 @@ unsigned long do_mremap(unsigned long addr,
struct vm_area_struct *vma;
/* insanity checks first */
- if (new_len == 0)
+ if (old_len == 0 || new_len == 0)
return (unsigned long) -EINVAL;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
if (flags & MREMAP_FIXED && new_addr != addr)
return (unsigned long) -EINVAL;
- vma = find_vma_exact(current->mm, addr);
+ vma = find_vma_exact(current->mm, addr, old_len);
if (!vma)
return (unsigned long) -EINVAL;
@@ -1270,22 +1648,19 @@ unsigned long do_mremap(unsigned long addr,
if (vma->vm_flags & VM_MAYSHARE)
return (unsigned long) -EPERM;
- if (new_len > kobjsize((void *) addr))
+ if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
return (unsigned long) -ENOMEM;
/* all checks complete - do it */
vma->vm_end = vma->vm_start + new_len;
-
- askedalloc -= old_len;
- askedalloc += new_len;
-
return vma->vm_start;
}
EXPORT_SYMBOL(do_mremap);
-asmlinkage unsigned long sys_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
+asmlinkage
+unsigned long sys_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr)
{
unsigned long ret;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6b9e758c98a5..40ba05061a4f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -429,7 +429,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
unsigned long points = 0;
struct task_struct *p;
- cgroup_lock();
read_lock(&tasklist_lock);
retry:
p = select_bad_process(&points, mem);
@@ -444,7 +443,6 @@ retry:
goto retry;
out:
read_unlock(&tasklist_lock);
- cgroup_unlock();
}
#endif
@@ -560,6 +558,13 @@ void pagefault_out_of_memory(void)
/* Got some memory back in the last second. */
return;
+ /*
+ * If this is from memcg, oom-killer is already invoked.
+ * and not worth to go system-wide-oom.
+ */
+ if (mem_cgroup_oom_called(current))
+ goto rest_and_return;
+
if (sysctl_panic_on_oom)
panic("out of memory from page fault. panic_on_oom is selected.\n");
@@ -571,6 +576,7 @@ void pagefault_out_of_memory(void)
* Give "p" a good chance of killing itself before we
* retry to allocate memory.
*/
+rest_and_return:
if (!test_thread_flag(TIF_MEMDIE))
schedule_timeout_uninterruptible(1);
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7bf22e045318..5675b3073854 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
INIT_LIST_HEAD(&zone->lru[l].list);
zone->lru[l].nr_scan = 0;
}
- zone->recent_rotated[0] = 0;
- zone->recent_rotated[1] = 0;
- zone->recent_scanned[0] = 0;
- zone->recent_scanned[1] = 0;
+ zone->reclaim_stat.recent_rotated[0] = 0;
+ zone->reclaim_stat.recent_rotated[1] = 0;
+ zone->reclaim_stat.recent_scanned[0] = 0;
+ zone->reclaim_stat.recent_scanned[1] = 0;
zap_zone_vm_stats(zone);
zone->flags = 0;
if (!size)
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index d6507a660ed6..7006a11350c8 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -8,6 +8,7 @@
#include <linux/memory.h>
#include <linux/vmalloc.h>
#include <linux/cgroup.h>
+#include <linux/swapops.h>
static void __meminit
__init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
@@ -15,6 +16,7 @@ __init_page_cgroup(struct page_cgroup *pc, unsigned long pfn)
pc->flags = 0;
pc->mem_cgroup = NULL;
pc->page = pfn_to_page(pfn);
+ INIT_LIST_HEAD(&pc->lru);
}
static unsigned long total_usage;
@@ -72,7 +74,7 @@ void __init page_cgroup_init(void)
int nid, fail;
- if (mem_cgroup_subsys.disabled)
+ if (mem_cgroup_disabled())
return;
for_each_online_node(nid) {
@@ -103,13 +105,11 @@ struct page_cgroup *lookup_page_cgroup(struct page *page)
/* __alloc_bootmem...() is protected by !slab_available() */
static int __init_refok init_section_page_cgroup(unsigned long pfn)
{
- struct mem_section *section;
+ struct mem_section *section = __pfn_to_section(pfn);
struct page_cgroup *base, *pc;
unsigned long table_size;
int nid, index;
- section = __pfn_to_section(pfn);
-
if (!section->page_cgroup) {
nid = page_to_nid(pfn_to_page(pfn));
table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
@@ -145,7 +145,6 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn)
__init_page_cgroup(pc, pfn + index);
}
- section = __pfn_to_section(pfn);
section->page_cgroup = base - pfn;
total_usage += table_size;
return 0;
@@ -248,7 +247,7 @@ void __init page_cgroup_init(void)
unsigned long pfn;
int fail = 0;
- if (mem_cgroup_subsys.disabled)
+ if (mem_cgroup_disabled())
return;
for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) {
@@ -273,3 +272,199 @@ void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
}
#endif
+
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
+
+static DEFINE_MUTEX(swap_cgroup_mutex);
+struct swap_cgroup_ctrl {
+ struct page **map;
+ unsigned long length;
+};
+
+struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];
+
+/*
+ * This 8bytes seems big..maybe we can reduce this when we can use "id" for
+ * cgroup rather than pointer.
+ */
+struct swap_cgroup {
+ struct mem_cgroup *val;
+};
+#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup))
+#define SC_POS_MASK (SC_PER_PAGE - 1)
+
+/*
+ * SwapCgroup implements "lookup" and "exchange" operations.
+ * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
+ * against SwapCache. At swap_free(), this is accessed directly from swap.
+ *
+ * This means,
+ * - we have no race in "exchange" when we're accessed via SwapCache because
+ * SwapCache(and its swp_entry) is under lock.
+ * - When called via swap_free(), there is no user of this entry and no race.
+ * Then, we don't need lock around "exchange".
+ *
+ * TODO: we can push these buffers out to HIGHMEM.
+ */
+
+/*
+ * allocate buffer for swap_cgroup.
+ */
+static int swap_cgroup_prepare(int type)
+{
+ struct page *page;
+ struct swap_cgroup_ctrl *ctrl;
+ unsigned long idx, max;
+
+ if (!do_swap_account)
+ return 0;
+ ctrl = &swap_cgroup_ctrl[type];
+
+ for (idx = 0; idx < ctrl->length; idx++) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto not_enough_page;
+ ctrl->map[idx] = page;
+ }
+ return 0;
+not_enough_page:
+ max = idx;
+ for (idx = 0; idx < max; idx++)
+ __free_page(ctrl->map[idx]);
+
+ return -ENOMEM;
+}
+
+/**
+ * swap_cgroup_record - record mem_cgroup for this swp_entry.
+ * @ent: swap entry to be recorded into
+ * @mem: mem_cgroup to be recorded
+ *
+ * Returns old value at success, NULL at failure.
+ * (Of course, old value can be NULL.)
+ */
+struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem)
+{
+ int type = swp_type(ent);
+ unsigned long offset = swp_offset(ent);
+ unsigned long idx = offset / SC_PER_PAGE;
+ unsigned long pos = offset & SC_POS_MASK;
+ struct swap_cgroup_ctrl *ctrl;
+ struct page *mappage;
+ struct swap_cgroup *sc;
+ struct mem_cgroup *old;
+
+ if (!do_swap_account)
+ return NULL;
+
+ ctrl = &swap_cgroup_ctrl[type];
+
+ mappage = ctrl->map[idx];
+ sc = page_address(mappage);
+ sc += pos;
+ old = sc->val;
+ sc->val = mem;
+
+ return old;
+}
+
+/**
+ * lookup_swap_cgroup - lookup mem_cgroup tied to swap entry
+ * @ent: swap entry to be looked up.
+ *
+ * Returns pointer to mem_cgroup at success. NULL at failure.
+ */
+struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent)
+{
+ int type = swp_type(ent);
+ unsigned long offset = swp_offset(ent);
+ unsigned long idx = offset / SC_PER_PAGE;
+ unsigned long pos = offset & SC_POS_MASK;
+ struct swap_cgroup_ctrl *ctrl;
+ struct page *mappage;
+ struct swap_cgroup *sc;
+ struct mem_cgroup *ret;
+
+ if (!do_swap_account)
+ return NULL;
+
+ ctrl = &swap_cgroup_ctrl[type];
+ mappage = ctrl->map[idx];
+ sc = page_address(mappage);
+ sc += pos;
+ ret = sc->val;
+ return ret;
+}
+
+int swap_cgroup_swapon(int type, unsigned long max_pages)
+{
+ void *array;
+ unsigned long array_size;
+ unsigned long length;
+ struct swap_cgroup_ctrl *ctrl;
+
+ if (!do_swap_account)
+ return 0;
+
+ length = ((max_pages/SC_PER_PAGE) + 1);
+ array_size = length * sizeof(void *);
+
+ array = vmalloc(array_size);
+ if (!array)
+ goto nomem;
+
+ memset(array, 0, array_size);
+ ctrl = &swap_cgroup_ctrl[type];
+ mutex_lock(&swap_cgroup_mutex);
+ ctrl->length = length;
+ ctrl->map = array;
+ if (swap_cgroup_prepare(type)) {
+ /* memory shortage */
+ ctrl->map = NULL;
+ ctrl->length = 0;
+ vfree(array);
+ mutex_unlock(&swap_cgroup_mutex);
+ goto nomem;
+ }
+ mutex_unlock(&swap_cgroup_mutex);
+
+ printk(KERN_INFO
+ "swap_cgroup: uses %ld bytes of vmalloc for pointer array space"
+ " and %ld bytes to hold mem_cgroup pointers on swap\n",
+ array_size, length * PAGE_SIZE);
+ printk(KERN_INFO
+ "swap_cgroup can be disabled by noswapaccount boot option.\n");
+
+ return 0;
+nomem:
+ printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n");
+ printk(KERN_INFO
+ "swap_cgroup can be disabled by noswapaccount boot option\n");
+ return -ENOMEM;
+}
+
+void swap_cgroup_swapoff(int type)
+{
+ int i;
+ struct swap_cgroup_ctrl *ctrl;
+
+ if (!do_swap_account)
+ return;
+
+ mutex_lock(&swap_cgroup_mutex);
+ ctrl = &swap_cgroup_ctrl[type];
+ if (ctrl->map) {
+ for (i = 0; i < ctrl->length; i++) {
+ struct page *page = ctrl->map[i];
+ if (page)
+ __free_page(page);
+ }
+ vfree(ctrl->map);
+ ctrl->map = NULL;
+ ctrl->length = 0;
+ }
+ mutex_unlock(&swap_cgroup_mutex);
+}
+
+#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 5941f9801363..5d0de96c9789 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -928,7 +928,11 @@ found:
error = 1;
if (!inode)
goto out;
- /* Precharge page using GFP_KERNEL while we can wait */
+ /*
+ * Charge page using GFP_KERNEL while we can wait.
+ * Charged back to the user(not to caller) when swap account is used.
+ * add_to_page_cache() will be called with GFP_NOWAIT.
+ */
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
if (error)
goto out;
@@ -1320,15 +1324,19 @@ repeat:
} else {
shmem_swp_unmap(entry);
spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
if (error == -ENOMEM) {
/* allow reclaim from this memory cgroup */
- error = mem_cgroup_shrink_usage(current->mm,
+ error = mem_cgroup_shrink_usage(swappage,
+ current->mm,
gfp);
- if (error)
+ if (error) {
+ unlock_page(swappage);
+ page_cache_release(swappage);
goto failed;
+ }
}
+ unlock_page(swappage);
+ page_cache_release(swappage);
goto repeat;
}
} else if (sgp == SGP_READ && !filepage) {
@@ -1379,7 +1387,7 @@ repeat:
/* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm,
- gfp & ~__GFP_HIGHMEM);
+ GFP_KERNEL);
if (error) {
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
diff --git a/mm/swap.c b/mm/swap.c
index ba2c0e8b8b54..8adb9feb61e1 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -151,6 +151,26 @@ void rotate_reclaimable_page(struct page *page)
}
}
+static void update_page_reclaim_stat(struct zone *zone, struct page *page,
+ int file, int rotated)
+{
+ struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
+ struct zone_reclaim_stat *memcg_reclaim_stat;
+
+ memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
+
+ reclaim_stat->recent_scanned[file]++;
+ if (rotated)
+ reclaim_stat->recent_rotated[file]++;
+
+ if (!memcg_reclaim_stat)
+ return;
+
+ memcg_reclaim_stat->recent_scanned[file]++;
+ if (rotated)
+ memcg_reclaim_stat->recent_rotated[file]++;
+}
+
/*
* FIXME: speed this up?
*/
@@ -168,10 +188,8 @@ void activate_page(struct page *page)
lru += LRU_ACTIVE;
add_page_to_lru_list(zone, page, lru);
__count_vm_event(PGACTIVATE);
- mem_cgroup_move_lists(page, lru);
- zone->recent_rotated[!!file]++;
- zone->recent_scanned[!!file]++;
+ update_page_reclaim_stat(zone, page, !!file, 1);
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -386,12 +404,14 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
{
int i;
struct zone *zone = NULL;
+
VM_BUG_ON(is_unevictable_lru(lru));
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
struct zone *pagezone = page_zone(page);
int file;
+ int active;
if (pagezone != zone) {
if (zone)
@@ -403,12 +423,11 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
+ active = is_active_lru(lru);
file = is_file_lru(lru);
- zone->recent_scanned[file]++;
- if (is_active_lru(lru)) {
+ if (active)
SetPageActive(page);
- zone->recent_rotated[file]++;
- }
+ update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(zone, page, lru);
}
if (zone)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 81c825f67a7f..3ecea98ecb45 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
+#include <linux/page_cgroup.h>
#include <asm/pgtable.h>
@@ -108,6 +109,8 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
*/
void __delete_from_swap_cache(struct page *page)
{
+ swp_entry_t ent = {.val = page_private(page)};
+
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapCache(page));
VM_BUG_ON(PageWriteback(page));
@@ -118,6 +121,7 @@ void __delete_from_swap_cache(struct page *page)
total_swapcache_pages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
INC_CACHE_INFO(del_total);
+ mem_cgroup_uncharge_swapcache(page, ent);
}
/**
diff --git a/mm/swapfile.c b/mm/swapfile.c
index eec5ca758a23..da422c47e2ee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -33,6 +33,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
+#include <linux/page_cgroup.h>
static DEFINE_SPINLOCK(swap_lock);
static unsigned int nr_swapfiles;
@@ -470,8 +471,9 @@ out:
return NULL;
}
-static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
+static int swap_entry_free(struct swap_info_struct *p, swp_entry_t ent)
{
+ unsigned long offset = swp_offset(ent);
int count = p->swap_map[offset];
if (count < SWAP_MAP_MAX) {
@@ -486,6 +488,7 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
+ mem_cgroup_uncharge_swap(ent);
}
}
return count;
@@ -501,7 +504,7 @@ void swap_free(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, swp_offset(entry));
+ swap_entry_free(p, entry);
spin_unlock(&swap_lock);
}
}
@@ -581,7 +584,7 @@ int free_swap_and_cache(swp_entry_t entry)
p = swap_info_get(entry);
if (p) {
- if (swap_entry_free(p, swp_offset(entry)) == 1) {
+ if (swap_entry_free(p, entry) == 1) {
page = find_get_page(&swapper_space, entry.val);
if (page && !trylock_page(page)) {
page_cache_release(page);
@@ -690,17 +693,18 @@ unsigned int count_swap_pages(int type, int free)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
+ struct mem_cgroup *ptr = NULL;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;
- if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+ if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
if (ret > 0)
- mem_cgroup_uncharge_page(page);
+ mem_cgroup_cancel_charge_swapin(ptr);
ret = 0;
goto out;
}
@@ -710,6 +714,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
set_pte_at(vma->vm_mm, addr, pte,
pte_mkold(mk_pte(page, vma->vm_page_prot)));
page_add_anon_rmap(page, vma, addr);
+ mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
/*
* Move the page to the active list so it is not
@@ -1492,6 +1497,9 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
spin_unlock(&swap_lock);
mutex_unlock(&swapon_mutex);
vfree(swap_map);
+ /* Destroy swap account informatin */
+ swap_cgroup_swapoff(type);
+
inode = mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct block_device *bdev = I_BDEV(inode);
@@ -1809,6 +1817,11 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
}
swap_map[page_nr] = SWAP_MAP_BAD;
}
+
+ error = swap_cgroup_swapon(type, maxpages);
+ if (error)
+ goto bad_swap;
+
nr_good_pages = swap_header->info.last_page -
swap_header->info.nr_badpages -
1 /* header page */;
@@ -1880,6 +1893,7 @@ bad_swap:
bd_release(bdev);
}
destroy_swap_extents(p);
+ swap_cgroup_swapoff(type);
bad_swap_2:
spin_lock(&swap_lock);
p->swap_file = NULL;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b07c48b09a93..9a27c44aa327 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -125,11 +125,30 @@ static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-#define scan_global_lru(sc) (!(sc)->mem_cgroup)
+#define scanning_global_lru(sc) (!(sc)->mem_cgroup)
#else
-#define scan_global_lru(sc) (1)
+#define scanning_global_lru(sc) (1)
#endif
+static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
+ struct scan_control *sc)
+{
+ if (!scanning_global_lru(sc))
+ return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
+
+ return &zone->reclaim_stat;
+}
+
+static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
+ enum lru_list lru)
+{
+ if (!scanning_global_lru(sc))
+ return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+
+ return zone_page_state(zone, NR_LRU_BASE + lru);
+}
+
+
/*
* Add a shrinker callback to be called from the vm
*/
@@ -512,7 +531,6 @@ redo:
lru = LRU_UNEVICTABLE;
add_page_to_unevictable_list(page);
}
- mem_cgroup_move_lists(page, lru);
/*
* page's status can change while we move it among lru. If an evictable
@@ -547,7 +565,6 @@ void putback_lru_page(struct page *page)
lru = !!TestClearPageActive(page) + page_is_file_cache(page);
lru_cache_add_lru(page, lru);
- mem_cgroup_move_lists(page, lru);
put_page(page);
}
#endif /* CONFIG_UNEVICTABLE_LRU */
@@ -813,6 +830,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
return ret;
ret = -EBUSY;
+
if (likely(get_page_unless_zero(page))) {
/*
* Be careful not to clear PageLRU until after we're
@@ -821,6 +839,7 @@ int __isolate_lru_page(struct page *page, int mode, int file)
*/
ClearPageLRU(page);
ret = 0;
+ mem_cgroup_del_lru(page);
}
return ret;
@@ -1029,6 +1048,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
struct pagevec pvec;
unsigned long nr_scanned = 0;
unsigned long nr_reclaimed = 0;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
pagevec_init(&pvec, 1);
@@ -1070,13 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
__mod_zone_page_state(zone, NR_INACTIVE_ANON,
-count[LRU_INACTIVE_ANON]);
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc))
zone->pages_scanned += nr_scan;
- zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
- zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
- zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
- zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
- }
+
+ reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
+ reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
+ reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
+ reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+
spin_unlock_irq(&zone->lru_lock);
nr_scanned += nr_scan;
@@ -1108,7 +1129,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
if (current_is_kswapd()) {
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
__count_vm_events(KSWAPD_STEAL, nr_freed);
- } else if (scan_global_lru(sc))
+ } else if (scanning_global_lru(sc))
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,10 +1155,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
SetPageLRU(page);
lru = page_lru(page);
add_page_to_lru_list(zone, page, lru);
- mem_cgroup_move_lists(page, lru);
- if (PageActive(page) && scan_global_lru(sc)) {
+ if (PageActive(page)) {
int file = !!page_is_file_cache(page);
- zone->recent_rotated[file]++;
+ reclaim_stat->recent_rotated[file]++;
}
if (!pagevec_add(&pvec, page)) {
spin_unlock_irq(&zone->lru_lock);
@@ -1197,6 +1217,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct page *page;
struct pagevec pvec;
enum lru_list lru;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
@@ -1207,10 +1228,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* zone->pages_scanned is used for detect zone's oom
* mem_cgroup remembers nr_scan by itself.
*/
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc)) {
zone->pages_scanned += pgscanned;
- zone->recent_scanned[!!file] += pgmoved;
}
+ reclaim_stat->recent_scanned[!!file] += pgmoved;
if (file)
__mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1251,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* This helps balance scan pressure between file and anonymous
* pages in get_scan_ratio.
*/
- if (scan_global_lru(sc))
- zone->recent_rotated[!!file] += pgmoved;
+ reclaim_stat->recent_rotated[!!file] += pgmoved;
while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive);
@@ -1263,7 +1283,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
ClearPageActive(page);
list_move(&page->lru, &zone->lru[lru].list);
- mem_cgroup_move_lists(page, lru);
+ mem_cgroup_add_lru_list(page, lru);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1292,6 +1312,38 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
pagevec_release(&pvec);
}
+static int inactive_anon_is_low_global(struct zone *zone)
+{
+ unsigned long active, inactive;
+
+ active = zone_page_state(zone, NR_ACTIVE_ANON);
+ inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+ if (inactive * zone->inactive_ratio < active)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ * @sc: scan control of this context
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
+{
+ int low;
+
+ if (scanning_global_lru(sc))
+ low = inactive_anon_is_low_global(zone);
+ else
+ low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
+ return low;
+}
+
static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
struct zone *zone, struct scan_control *sc, int priority)
{
@@ -1302,8 +1354,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
return 0;
}
- if (lru == LRU_ACTIVE_ANON &&
- (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
+ if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
shrink_active_list(nr_to_scan, zone, sc, priority, file);
return 0;
}
@@ -1325,6 +1376,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
unsigned long anon, file, free;
unsigned long anon_prio, file_prio;
unsigned long ap, fp;
+ struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
/* If we have no swap space, do not bother scanning anon pages. */
if (nr_swap_pages <= 0) {
@@ -1333,17 +1385,20 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
return;
}
- anon = zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
- file = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
- free = zone_page_state(zone, NR_FREE_PAGES);
-
- /* If we have very few page cache pages, force-scan anon pages. */
- if (unlikely(file + free <= zone->pages_high)) {
- percent[0] = 100;
- percent[1] = 0;
- return;
+ anon = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
+ zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
+ file = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
+ zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
+
+ if (scanning_global_lru(sc)) {
+ free = zone_page_state(zone, NR_FREE_PAGES);
+ /* If we have very few page cache pages,
+ force-scan anon pages. */
+ if (unlikely(file + free <= zone->pages_high)) {
+ percent[0] = 100;
+ percent[1] = 0;
+ return;
+ }
}
/*
@@ -1357,17 +1412,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
*
* anon in [0], file in [1]
*/
- if (unlikely(zone->recent_scanned[0] > anon / 4)) {
+ if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
spin_lock_irq(&zone->lru_lock);
- zone->recent_scanned[0] /= 2;
- zone->recent_rotated[0] /= 2;
+ reclaim_stat->recent_scanned[0] /= 2;
+ reclaim_stat->recent_rotated[0] /= 2;
spin_unlock_irq(&zone->lru_lock);
}
- if (unlikely(zone->recent_scanned[1] > file / 4)) {
+ if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
spin_lock_irq(&zone->lru_lock);
- zone->recent_scanned[1] /= 2;
- zone->recent_rotated[1] /= 2;
+ reclaim_stat->recent_scanned[1] /= 2;
+ reclaim_stat->recent_rotated[1] /= 2;
spin_unlock_irq(&zone->lru_lock);
}
@@ -1383,11 +1438,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
* proportional to the fraction of recently scanned pages on
* each list that were recently referenced and in active use.
*/
- ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
- ap /= zone->recent_rotated[0] + 1;
+ ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+ ap /= reclaim_stat->recent_rotated[0] + 1;
- fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
- fp /= zone->recent_rotated[1] + 1;
+ fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+ fp /= reclaim_stat->recent_rotated[1] + 1;
/* Normalize to percentages */
percent[0] = 100 * ap / (ap + fp + 1);
@@ -1411,30 +1466,23 @@ static void shrink_zone(int priority, struct zone *zone,
get_scan_ratio(zone, sc, percent);
for_each_evictable_lru(l) {
- if (scan_global_lru(sc)) {
- int file = is_file_lru(l);
- int scan;
-
- scan = zone_page_state(zone, NR_LRU_BASE + l);
- if (priority) {
- scan >>= priority;
- scan = (scan * percent[file]) / 100;
- }
+ int file = is_file_lru(l);
+ int scan;
+
+ scan = zone_page_state(zone, NR_LRU_BASE + l);
+ if (priority) {
+ scan >>= priority;
+ scan = (scan * percent[file]) / 100;
+ }
+ if (scanning_global_lru(sc)) {
zone->lru[l].nr_scan += scan;
nr[l] = zone->lru[l].nr_scan;
if (nr[l] >= swap_cluster_max)
zone->lru[l].nr_scan = 0;
else
nr[l] = 0;
- } else {
- /*
- * This reclaim occurs not because zone memory shortage
- * but because memory controller hits its limit.
- * Don't modify zone reclaim related data.
- */
- nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
- priority, l);
- }
+ } else
+ nr[l] = scan;
}
while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1467,9 +1515,7 @@ static void shrink_zone(int priority, struct zone *zone,
* Even if we did not try to evict anon pages at all, we want to
* rebalance the anon lru active/inactive ratio.
*/
- if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
- shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
- else if (!scan_global_lru(sc))
+ if (inactive_anon_is_low(zone, sc))
shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
throttle_vm_writeout(sc->gfp_mask);
@@ -1504,7 +1550,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
* Take care memory controller reclaiming has small influence
* to global LRU.
*/
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc)) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
continue;
note_zone_scanning_priority(zone, priority);
@@ -1557,12 +1603,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
delayacct_freepages_start();
- if (scan_global_lru(sc))
+ if (scanning_global_lru(sc))
count_vm_event(ALLOCSTALL);
/*
* mem_cgroup will not do shrink_slab.
*/
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc)) {
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1581,7 +1627,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
*/
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc)) {
shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1612,7 +1658,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_zones still had more to do? don't OOM, then */
- if (!sc->all_unreclaimable && scan_global_lru(sc))
+ if (!sc->all_unreclaimable && scanning_global_lru(sc))
ret = sc->nr_reclaimed;
out:
/*
@@ -1625,7 +1671,7 @@ out:
if (priority < 0)
priority = 0;
- if (scan_global_lru(sc)) {
+ if (scanning_global_lru(sc)) {
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,19 +1707,24 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
- gfp_t gfp_mask)
+ gfp_t gfp_mask,
+ bool noswap,
+ unsigned int swappiness)
{
struct scan_control sc = {
.may_writepage = !laptop_mode,
.may_swap = 1,
.swap_cluster_max = SWAP_CLUSTER_MAX,
- .swappiness = vm_swappiness,
+ .swappiness = swappiness,
.order = 0,
.mem_cgroup = mem_cont,
.isolate_pages = mem_cgroup_isolate_pages,
};
struct zonelist *zonelist;
+ if (noswap)
+ sc.may_swap = 0;
+
sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
(GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1761,7 +1812,7 @@ loop_again:
* Do some background aging of the anon list, to give
* pages a chance to be referenced before reclaiming.
*/
- if (inactive_anon_is_low(zone))
+ if (inactive_anon_is_low(zone, &sc))
shrink_active_list(SWAP_CLUSTER_MAX, zone,
&sc, priority, 0);
@@ -2404,6 +2455,7 @@ retry:
__dec_zone_state(zone, NR_UNEVICTABLE);
list_move(&page->lru, &zone->lru[l].list);
+ mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
__inc_zone_state(zone, NR_INACTIVE_ANON + l);
__count_vm_event(UNEVICTABLE_PGRESCUED);
} else {
@@ -2412,6 +2464,7 @@ retry:
*/
SetPageUnevictable(page);
list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+ mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
if (page_evictable(page, NULL))
goto retry;
}