diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2023-10-27 14:12:39 +0200 |
---|---|---|
committer | Vasily Gorbik <gor@linux.ibm.com> | 2023-11-05 22:34:58 +0100 |
commit | a51324c430db3fcf3e7d77c265491322c251a396 (patch) | |
tree | 8287b8d7b681c66ba708982b3c96cd8b26eb5279 /arch/s390/mm/page-states.c | |
parent | 65d37f163add1c6ead3a63788acb2f9590159f94 (diff) | |
download | linux-stable-a51324c430db3fcf3e7d77c265491322c251a396.tar.gz linux-stable-a51324c430db3fcf3e7d77c265491322c251a396.tar.bz2 linux-stable-a51324c430db3fcf3e7d77c265491322c251a396.zip |
s390/cmma: rework no-dat handling
Rework the way physical pages are set no-dat / dat:
The old way is:
- Rely on that all pages are initially marked "dat"
- Allocate page tables for the kernel mapping
- Enable dat
- Walk the whole kernel mapping and set PG_arch_1 bit in all struct pages
that belong to pages of kernel page tables
- Walk all struct pages and test and clear the PG_arch_1 bit. If the bit is
not set, set the page state to no-dat
- For all subsequent page table allocations, set the page state to dat
(remove the no-dat state) on allocation time
Change this rather complex logic to a simpler approach:
- Set the whole physical memory (all pages) to "no-dat"
- Explicitly set those page table pages to "dat" which are part of the
kernel image (e.g. swapper_pg_dir)
- For all subsequent page table allocations, set the page state to dat
(remove the no-dat state) on allocation time
In result the code is simpler, and this also allows to get rid of one
odd usage of the PG_arch_1 bit.
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/page-states.c')
-rw-r--r-- | arch/s390/mm/page-states.c | 127 |
1 files changed, 2 insertions, 125 deletions
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 511c43aad5df..01f9b39e65f5 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -7,136 +7,13 @@ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/types.h> #include <linux/mm.h> -#include <linux/memblock.h> -#include <linux/gfp.h> -#include <linux/init.h> -#include <asm/asm-extable.h> -#include <asm/facility.h> #include <asm/page-states.h> +#include <asm/sections.h> +#include <asm/page.h> int __bootdata_preserved(cmma_flag); -static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - pmd_t *pmd; - - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || pmd_large(*pmd)) - continue; - page = phys_to_page(pmd_val(*pmd)); - set_bit(PG_arch_1, &page->flags); - } while (pmd++, addr = next, addr != end); -} - -static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - pud_t *pud; - int i; - - pud = pud_offset(p4d, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none(*pud) || pud_large(*pud)) - continue; - if (!pud_folded(*pud)) { - page = phys_to_page(pud_val(*pud)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_pmd(pud, addr, next); - } while (pud++, addr = next, addr != end); -} - -static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - p4d_t *p4d; - int i; - - p4d = p4d_offset(pgd, addr); - do { - next = p4d_addr_end(addr, end); - if (p4d_none(*p4d)) - continue; - if (!p4d_folded(*p4d)) { - page = phys_to_page(p4d_val(*p4d)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_pud(p4d, addr, next); - } while (p4d++, addr = next, addr != end); -} - -static void mark_kernel_pgd(void) -{ - unsigned long addr, next, max_addr; - struct page *page; - pgd_t *pgd; - int i; - - addr = 0; - /* - * Figure out maximum virtual address accessible with the - * kernel ASCE. This is required to keep the page table walker - * from accessing non-existent entries. - */ - max_addr = (S390_lowcore.kernel_asce.val & _ASCE_TYPE_MASK) >> 2; - max_addr = 1UL << (max_addr * 11 + 31); - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, max_addr); - if (pgd_none(*pgd)) - continue; - if (!pgd_folded(*pgd)) { - page = phys_to_page(pgd_val(*pgd)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_p4d(pgd, addr, next); - } while (pgd++, addr = next, addr != max_addr); -} - -void __init cmma_init_nodat(void) -{ - struct page *page; - unsigned long start, end, ix; - int i; - - if (cmma_flag < 2) - return; - /* Mark pages used in kernel page tables */ - mark_kernel_pgd(); - page = virt_to_page(&swapper_pg_dir); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - page = virt_to_page(&invalid_pg_dir); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - - /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { - page = pfn_to_page(start); - for (ix = start; ix < end; ix++, page++) { - if (__test_and_clear_bit(PG_arch_1, &page->flags)) - continue; /* skip page table pages */ - if (!list_empty(&page->lru)) - continue; /* skip free pages */ - __set_page_stable_nodat(page_to_virt(page), 1); - } - } -} - void arch_free_page(struct page *page, int order) { if (!cmma_flag) |