summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2013-10-19 23:24:03 +0200
committerLinus Walleij <linus.walleij@linaro.org>2013-10-19 23:24:03 +0200
commitb41fb43911b4cb864812adec88d028cc6219f23e (patch)
treef383de554dc3640ec3081ad41fa34f7cab68de82 /mm/hugetlb.c
parent0963d59bc0bbed48b06733950f5eb167e9b9a8fa (diff)
parent31d141e3a666269a3b6fcccddb0351caf7454240 (diff)
downloadlinux-stable-b41fb43911b4cb864812adec88d028cc6219f23e.tar.gz
linux-stable-b41fb43911b4cb864812adec88d028cc6219f23e.tar.bz2
linux-stable-b41fb43911b4cb864812adec88d028cc6219f23e.zip
Merge tag 'v3.12-rc6' into devel
Linux 3.12-rc6 Conflicts: drivers/gpio/gpio-lynxpoint.c
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c17
1 files changed, 16 insertions, 1 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b49579c7f2a5..0b7656e804d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -653,6 +653,7 @@ static void free_huge_page(struct page *page)
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
restore_reserve = PagePrivate(page);
+ ClearPagePrivate(page);
spin_lock(&hugetlb_lock);
hugetlb_cgroup_uncharge_page(hstate_index(h),
@@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__SetPageHead(page);
+ __ClearPageReserved(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ /*
+ * For gigantic hugepages allocated through bootmem at
+ * boot, it's safer to be consistent with the not-gigantic
+ * hugepages and clear the PG_reserved bit from all tail pages
+ * too. Otherwse drivers using get_user_pages() to access tail
+ * pages may get the reference counting wrong if they see
+ * PG_reserved set on a tail page (despite the head page not
+ * having PG_reserved set). Enforcing this consistency between
+ * head and tail pages allows drivers to optimize away a check
+ * on the head page when they need know if put_page() is needed
+ * after get_user_pages().
+ */
+ __ClearPageReserved(p);
set_page_count(p, 0);
p->first_page = page;
}
@@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void)
#else
page = virt_to_page(m);
#endif
- __ClearPageReserved(page);
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
+ WARN_ON(PageReserved(page));
prep_new_huge_page(h, page, page_to_nid(page));
/*
* If we had gigantic hugepages allocated at boot time, we need