summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb_vmemmap.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2023-02-21 11:19:49 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2023-02-21 11:19:49 -0800
commit7ae9fb1b7ecbb5d85d07857943f677fd1a559b18 (patch)
treedbdd35328f43569c38c4ce193cefd7d2b6b9fbfd /mm/hugetlb_vmemmap.c
parent9c445d2637c938a800fcc8b5f0b10e60c94460c7 (diff)
parent9e69e845ae95227949c400af1037dca023f73038 (diff)
downloadlinux-stable-7ae9fb1b7ecbb5d85d07857943f677fd1a559b18.tar.gz
linux-stable-7ae9fb1b7ecbb5d85d07857943f677fd1a559b18.tar.bz2
linux-stable-7ae9fb1b7ecbb5d85d07857943f677fd1a559b18.zip
Merge branch 'next' into for-linus
Prepare input updates for 6.3 merge window.
Diffstat (limited to 'mm/hugetlb_vmemmap.c')
-rw-r--r--mm/hugetlb_vmemmap.c46
1 files changed, 36 insertions, 10 deletions
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index ba2a2596fb4e..45e93a545dd7 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) "HugeTLB: " fmt
#include <linux/pgtable.h>
+#include <linux/moduleparam.h>
#include <linux/bootmem_info.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -202,12 +203,7 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
return ret;
} while (pgd++, addr = next, addr != end);
- /*
- * We only change the mapping of the vmemmap virtual address range
- * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
- * belongs to the range.
- */
- flush_tlb_kernel_range(start + PAGE_SIZE, end);
+ flush_tlb_kernel_range(start, end);
return 0;
}
@@ -231,10 +227,8 @@ static void free_vmemmap_page_list(struct list_head *list)
{
struct page *page, *next;
- list_for_each_entry_safe(page, next, list, lru) {
- list_del(&page->lru);
+ list_for_each_entry_safe(page, next, list, lru)
free_vmemmap_page(page);
- }
}
static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
@@ -245,9 +239,23 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
* to the tail pages.
*/
pgprot_t pgprot = PAGE_KERNEL_RO;
- pte_t entry = mk_pte(walk->reuse_page, pgprot);
struct page *page = pte_page(*pte);
+ pte_t entry;
+ /* Remapping the head page requires r/w */
+ if (unlikely(addr == walk->reuse_addr)) {
+ pgprot = PAGE_KERNEL;
+ list_del(&walk->reuse_page->lru);
+
+ /*
+ * Makes sure that preceding stores to the page contents from
+ * vmemmap_remap_free() become visible before the set_pte_at()
+ * write.
+ */
+ smp_wmb();
+ }
+
+ entry = mk_pte(walk->reuse_page, pgprot);
list_add_tail(&page->lru, walk->vmemmap_pages);
set_pte_at(&init_mm, addr, pte, entry);
}
@@ -316,6 +324,24 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end,
.reuse_addr = reuse,
.vmemmap_pages = &vmemmap_pages,
};
+ int nid = page_to_nid((struct page *)start);
+ gfp_t gfp_mask = GFP_KERNEL | __GFP_THISNODE | __GFP_NORETRY |
+ __GFP_NOWARN;
+
+ /*
+ * Allocate a new head vmemmap page to avoid breaking a contiguous
+ * block of struct page memory when freeing it back to page allocator
+ * in free_vmemmap_page_list(). This will allow the likely contiguous
+ * struct page backing memory to be kept contiguous and allowing for
+ * more allocations of hugepages. Fallback to the currently
+ * mapped head page in case should it fail to allocate.
+ */
+ walk.reuse_page = alloc_pages_node(nid, gfp_mask, 0);
+ if (walk.reuse_page) {
+ copy_page(page_to_virt(walk.reuse_page),
+ (void *)walk.reuse_addr);
+ list_add(&walk.reuse_page->lru, &vmemmap_pages);
+ }
/*
* In order to make remapping routine most efficient for the huge pages,