summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2012-07-31 16:42:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 18:42:41 -0700
commit94ae8ba7176666d1e7d8bbb9f93670a27540b6a8 (patch)
tree5c8a1478e2463c29be6a3e6f63d5acabaac65a17 /mm/hugetlb.c
parent79dbb2368ae3515fad9c8b7c8f831cd86be59b1d (diff)
downloadlinux-94ae8ba7176666d1e7d8bbb9f93670a27540b6a8.tar.gz
linux-94ae8ba7176666d1e7d8bbb9f93670a27540b6a8.tar.bz2
linux-94ae8ba7176666d1e7d8bbb9f93670a27540b6a8.zip
hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.
A page's hugetlb cgroup assignment and movement to the active list should occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup we will iterate the active list and find pages with NULL hugetlb cgroup values. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ec7b86ebf9d9..c39e4beeb63a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid)
page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
- if (!page) {
+ if (!page)
page = alloc_buddy_huge_page(h, nid);
- if (page) {
- spin_lock(&hugetlb_lock);
- list_move(&page->lru, &h->hugepage_activelist);
- spin_unlock(&hugetlb_lock);
- }
- }
return page;
}
@@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
}
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
- spin_unlock(&hugetlb_lock);
-
- if (!page) {
+ if (page) {
+ /* update page cgroup details */
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
+ spin_unlock(&hugetlb_lock);
+ } else {
+ spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
@@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
list_move(&page->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
}
@@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
- /* update page cgroup details */
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
return page;
}