diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2011-03-23 16:42:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-23 19:46:28 -0700 |
commit | 6b3ae58efca06623c197fd6d91ded4aa3a8fe039 (patch) | |
tree | 6460e4e1ce206d391b862a3d398a9e22e33ecb3c /include | |
parent | 5564e88ba6fd2f6dcd83a592771810cd84b5ae80 (diff) | |
download | linux-stable-6b3ae58efca06623c197fd6d91ded4aa3a8fe039.tar.gz linux-stable-6b3ae58efca06623c197fd6d91ded4aa3a8fe039.tar.bz2 linux-stable-6b3ae58efca06623c197fd6d91ded4aa3a8fe039.zip |
memcg: remove direct page_cgroup-to-page pointer
In struct page_cgroup, we have a full word for flags but only a few are
reserved. Use the remaining upper bits to encode, depending on
configuration, the node or the section, to enable page_cgroup-to-page
lookups without a direct pointer.
This saves a full word for every page in a system with memory cgroups
enabled.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/page_cgroup.h | 75 |
1 files changed, 58 insertions, 17 deletions
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 6b63679ce8a1..f5de21de31dd 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -1,8 +1,26 @@ #ifndef __LINUX_PAGE_CGROUP_H #define __LINUX_PAGE_CGROUP_H +enum { + /* flags for mem_cgroup */ + PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ + PCG_CACHE, /* charged as cache */ + PCG_USED, /* this object is in use. */ + PCG_MIGRATION, /* under page migration */ + /* flags for mem_cgroup and file and I/O status */ + PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ + PCG_FILE_MAPPED, /* page is accounted as "mapped" */ + /* No lock in page_cgroup */ + PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */ + __NR_PCG_FLAGS, +}; + +#ifndef __GENERATING_BOUNDS_H +#include <generated/bounds.h> + #ifdef CONFIG_CGROUP_MEM_RES_CTLR #include <linux/bit_spinlock.h> + /* * Page Cgroup can be considered as an extended mem_map. * A page_cgroup page is associated with every page descriptor. The @@ -13,7 +31,6 @@ struct page_cgroup { unsigned long flags; struct mem_cgroup *mem_cgroup; - struct page *page; struct list_head lru; /* per cgroup LRU list */ }; @@ -32,19 +49,7 @@ static inline void __init page_cgroup_init(void) #endif struct page_cgroup *lookup_page_cgroup(struct page *page); - -enum { - /* flags for mem_cgroup */ - PCG_LOCK, /* Lock for pc->mem_cgroup and following bits. */ - PCG_CACHE, /* charged as cache */ - PCG_USED, /* this object is in use. */ - PCG_MIGRATION, /* under page migration */ - /* flags for mem_cgroup and file and I/O status */ - PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */ - PCG_FILE_MAPPED, /* page is accounted as "mapped" */ - /* No lock in page_cgroup */ - PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */ -}; +struct page *lookup_cgroup_page(struct page_cgroup *pc); #define TESTPCGFLAG(uname, lname) \ static inline int PageCgroup##uname(struct page_cgroup *pc) \ @@ -117,6 +122,39 @@ static inline void move_unlock_page_cgroup(struct page_cgroup *pc, local_irq_restore(*flags); } +#ifdef CONFIG_SPARSEMEM +#define PCG_ARRAYID_WIDTH SECTIONS_SHIFT +#else +#define PCG_ARRAYID_WIDTH NODES_SHIFT +#endif + +#if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS) +#error Not enough space left in pc->flags to store page_cgroup array IDs +#endif + +/* pc->flags: ARRAY-ID | FLAGS */ + +#define PCG_ARRAYID_MASK ((1UL << PCG_ARRAYID_WIDTH) - 1) + +#define PCG_ARRAYID_OFFSET (BITS_PER_LONG - PCG_ARRAYID_WIDTH) +/* + * Zero the shift count for non-existant fields, to prevent compiler + * warnings and ensure references are optimized away. + */ +#define PCG_ARRAYID_SHIFT (PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0)) + +static inline void set_page_cgroup_array_id(struct page_cgroup *pc, + unsigned long id) +{ + pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT); + pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT; +} + +static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc) +{ + return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK; +} + #else /* CONFIG_CGROUP_MEM_RES_CTLR */ struct page_cgroup; @@ -137,7 +175,7 @@ static inline void __init page_cgroup_init_flatmem(void) { } -#endif +#endif /* CONFIG_CGROUP_MEM_RES_CTLR */ #include <linux/swap.h> @@ -173,5 +211,8 @@ static inline void swap_cgroup_swapoff(int type) return; } -#endif -#endif +#endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */ + +#endif /* !__GENERATING_BOUNDS_H */ + +#endif /* __LINUX_PAGE_CGROUP_H */ |