diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2012-10-17 12:18:05 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2012-11-23 11:14:25 +0100 |
commit | f7817968d03df390d77d3af1b13298efa4f31047 (patch) | |
tree | 11e355fc56ce3ff4fd433a3fa282881a6481d676 /arch/s390 | |
parent | 18da236908793abccebc6f365fbe6e95a5b41db0 (diff) | |
download | linux-f7817968d03df390d77d3af1b13298efa4f31047.tar.gz linux-f7817968d03df390d77d3af1b13298efa4f31047.tar.bz2 linux-f7817968d03df390d77d3af1b13298efa4f31047.zip |
s390/mm,vmemmap: use 1MB frames for vmemmap
Use 1MB frames for vmemmap if EDAT1 is available in order to
reduce TLB pressure
Always use a 1MB frame even if its only partially needed for
struct pages. Otherwise we would end up with a mix of large
frame and page mappings, because vmemmap_populate gets called
for each section (256MB -> 3.5MB memmap) separately.
Worst case is that we would waste 512KB.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/mm/vmem.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index bf37a094a46b..6ed1426d27c5 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -205,7 +205,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) start_addr = (unsigned long) start; end_addr = (unsigned long) (start + nr); - for (address = start_addr; address < end_addr; address += PAGE_SIZE) { + for (address = start_addr; address < end_addr;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); @@ -224,10 +224,33 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { +#ifdef CONFIG_64BIT + /* Use 1MB frames for vmemmap if available. We always + * use large frames even if they are only partially + * used. + * Otherwise we would have also page tables since + * vmemmap_populate gets called for each section + * separately. */ + if (MACHINE_HAS_EDAT1) { + void *new_page; + + new_page = vmemmap_alloc_block(PMD_SIZE, node); + if (!new_page) + goto out; + pte = mk_pte_phys(__pa(new_page), PAGE_RW); + pte_val(pte) |= _SEGMENT_ENTRY_LARGE; + pmd_val(*pm_dir) = pte_val(pte); + address = (address + PMD_SIZE) & PMD_MASK; + continue; + } +#endif pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); + } else if (pmd_large(*pm_dir)) { + address = (address + PMD_SIZE) & PMD_MASK; + continue; } pt_dir = pte_offset_kernel(pm_dir, address); @@ -240,6 +263,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); *pt_dir = pte; } + address += PAGE_SIZE; } memset(start, 0, nr * sizeof(struct page)); ret = 0; |