From 6eb82f9940267d3af260989d077a2833f588beae Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 21 Jul 2020 11:59:51 +0200 Subject: x86/mm: Pre-allocate P4D/PUD pages for vmalloc area Pre-allocate the page-table pages for the vmalloc area at the level which needs synchronization on x86-64, which is P4D for 5-level and PUD for 4-level paging. Doing this at boot makes sure no synchronization of that area is necessary at runtime. The synchronization takes the pgd_lock and iterates over all page-tables in the system, so it can take quite long and is better avoided. Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar Reviewed-by: Mike Rapoport Link: https://lore.kernel.org/r/20200721095953.6218-2-joro@8bytes.org --- arch/x86/mm/init_64.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index dbae185511cd..e76bdb001460 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1238,6 +1238,56 @@ static void __init register_page_bootmem_info(void) #endif } +/* + * Pre-allocates page-table pages for the vmalloc area in the kernel page-table. + * Only the level which needs to be synchronized between all page-tables is + * allocated because the synchronization can be expensive. + */ +static void __init preallocate_vmalloc_pages(void) +{ + unsigned long addr; + const char *lvl; + + for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + /* Can only happen with 5-level paging */ + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) { + lvl = "p4d"; + goto failed; + } + } + + if (pgtable_l5_enabled()) + continue; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + /* Ends up here only with 4-level paging */ + pud = pud_alloc(&init_mm, p4d, addr); + if (!pud) { + lvl = "pud"; + goto failed; + } + } + } + + return; + +failed: + + /* + * The pages have to be there now or they will be missing in + * process page-tables later. + */ + panic("Failed to pre-allocate %s pages for vmalloc area\n", lvl); +} + void __init mem_init(void) { pci_iommu_alloc(); @@ -1261,6 +1311,8 @@ void __init mem_init(void) if (get_gate_vma(&init_mm)) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); + preallocate_vmalloc_pages(); + mem_init_print_info(NULL); } -- cgit v1.2.3 From 8bb9bf242d1fee925636353807c511d54fde8986 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 21 Jul 2020 11:59:52 +0200 Subject: x86/mm/64: Do not sync vmalloc/ioremap mappings Remove the code to sync the vmalloc and ioremap ranges for x86-64. The page-table pages are all pre-allocated now so that synchronization is no longer necessary. Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar Reviewed-by: Mike Rapoport Link: https://lore.kernel.org/r/20200721095953.6218-3-joro@8bytes.org --- arch/x86/mm/init_64.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e76bdb001460..e0cd2dfd333d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -217,11 +217,6 @@ void sync_global_pgds(unsigned long start, unsigned long end) sync_global_pgds_l4(start, end); } -void arch_sync_kernel_mappings(unsigned long start, unsigned long end) -{ - sync_global_pgds(start, end); -} - /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. -- cgit v1.2.3 From 2b32ab031e82a109e2c5b0d30ce563db0fe286b4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 21 Jul 2020 11:59:53 +0200 Subject: x86/mm/64: Make sync_global_pgds() static The function is only called from within init_64.c and can be static. Also remove it from pgtable_64.h. Signed-off-by: Joerg Roedel Signed-off-by: Ingo Molnar Reviewed-by: Mike Rapoport Link: https://lore.kernel.org/r/20200721095953.6218-4-joro@8bytes.org --- arch/x86/mm/init_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e0cd2dfd333d..e65b96f381a7 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -209,7 +209,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end) * When memory was added make sure all the processes MM have * suitable PGD entries in the local PGD level page. */ -void sync_global_pgds(unsigned long start, unsigned long end) +static void sync_global_pgds(unsigned long start, unsigned long end) { if (pgtable_l5_enabled()) sync_global_pgds_l5(start, end); -- cgit v1.2.3