diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:56:57 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:56:57 -0800 |
commit | d04ef3a795b3b7b376a02713ed5e211e9ae1f917 (patch) | |
tree | 837da034751a2fc1be0fc5a105c218d41a498eb6 /arch/sparc64 | |
parent | 36177ba655c238e33400cc2837a28720b62784bd (diff) | |
parent | dcc1e8dd88d4bc55e32a26dad7633d20ffe606d2 (diff) | |
download | linux-d04ef3a795b3b7b376a02713ed5e211e9ae1f917.tar.gz linux-d04ef3a795b3b7b376a02713ed5e211e9ae1f917.tar.bz2 linux-d04ef3a795b3b7b376a02713ed5e211e9ae1f917.zip |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
[SPARC64]: Add a secondary TSB for hugepage mappings.
[SPARC]: Respect vm_page_prot in io_remap_page_range().
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/Kconfig | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_tlb_miss.S | 39 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 21 | ||||
-rw-r--r-- | arch/sparc64/kernel/tsb.S | 210 | ||||
-rw-r--r-- | arch/sparc64/mm/fault.c | 15 | ||||
-rw-r--r-- | arch/sparc64/mm/generic.c | 1 | ||||
-rw-r--r-- | arch/sparc64/mm/hugetlbpage.c | 28 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 21 | ||||
-rw-r--r-- | arch/sparc64/mm/tsb.c | 234 |
10 files changed, 402 insertions, 173 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index c3685b314d71..267afddf63cf 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -175,11 +175,11 @@ config HUGETLB_PAGE_SIZE_4MB bool "4MB" config HUGETLB_PAGE_SIZE_512K - depends on !SPARC64_PAGE_SIZE_4MB + depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB bool "512K" config HUGETLB_PAGE_SIZE_64K - depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB + depends on !SPARC64_PAGE_SIZE_4MB && !SPARC64_PAGE_SIZE_512KB && !SPARC64_PAGE_SIZE_64K bool "64K" endchoice diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 95ffa9418620..dfccff29e182 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c @@ -656,6 +656,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state); + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, @@ -663,7 +664,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, if (ret) return ret; - vma->vm_flags |= VM_IO; return 0; } diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S index ab23ddb7116e..b731881224e8 100644 --- a/arch/sparc64/kernel/sun4v_tlb_miss.S +++ b/arch/sparc64/kernel/sun4v_tlb_miss.S @@ -29,15 +29,15 @@ * * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; * tsb_base = tsb_reg & ~0x7UL; - * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); + * tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask); * tsb_ptr = tsb_base + (tsb_index * 16); */ -#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, TMP1, TMP2) \ +#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \ and TSB_PTR, 0x7, TMP1; \ mov 512, TMP2; \ andn TSB_PTR, 0x7, TSB_PTR; \ sllx TMP2, TMP1, TMP2; \ - srlx VADDR, PAGE_SHIFT, TMP1; \ + srlx VADDR, HASH_SHIFT, TMP1; \ sub TMP2, 1, TMP2; \ and TMP1, TMP2, TMP1; \ sllx TMP1, 4, TMP1; \ @@ -53,7 +53,7 @@ sun4v_itlb_miss: LOAD_ITLB_INFO(%g2, %g4, %g5) COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v) - COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 @@ -99,7 +99,7 @@ sun4v_dtlb_miss: LOAD_DTLB_INFO(%g2, %g4, %g5) COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v) - COMPUTE_TSB_PTR(%g1, %g4, %g3, %g7) + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7) /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2 @@ -171,21 +171,26 @@ sun4v_dtsb_miss: /* fallthrough */ - /* Create TSB pointer into %g1. This is something like: - * - * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL; - * tsb_base = tsb_reg & ~0x7UL; - * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask); - * tsb_ptr = tsb_base + (tsb_index * 16); - */ sun4v_tsb_miss_common: - COMPUTE_TSB_PTR(%g1, %g4, %g5, %g7) + COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7) - /* Branch directly to page table lookup. We have SCRATCHPAD_MMU_MISS - * still in %g2, so it's quite trivial to get at the PGD PHYS value - * so we can preload it into %g7. - */ sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + +#ifdef CONFIG_HUGETLB_PAGE + mov SCRATCHPAD_UTSBREG2, %g5 + ldxa [%g5] ASI_SCRATCHPAD, %g5 + cmp %g5, -1 + be,pt %xcc, 80f + nop + COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) + + /* That clobbered %g2, reload it. */ + ldxa [%g0] ASI_SCRATCHPAD, %g2 + sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 + +80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP] +#endif + ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7 diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 7f7dba0ca96a..df612e4f75f9 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c @@ -2482,6 +2482,7 @@ void init_cur_cpu_trap(struct thread_info *t) extern void thread_info_offsets_are_bolixed_dave(void); extern void trap_per_cpu_offsets_are_bolixed_dave(void); +extern void tsb_config_offsets_are_bolixed_dave(void); /* Only invoked on boot processor. */ void __init trap_init(void) @@ -2535,9 +2536,27 @@ void __init trap_init(void) (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA != offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) || (TRAP_PER_CPU_CPU_LIST_PA != - offsetof(struct trap_per_cpu, cpu_list_pa))) + offsetof(struct trap_per_cpu, cpu_list_pa)) || + (TRAP_PER_CPU_TSB_HUGE != + offsetof(struct trap_per_cpu, tsb_huge)) || + (TRAP_PER_CPU_TSB_HUGE_TEMP != + offsetof(struct trap_per_cpu, tsb_huge_temp))) trap_per_cpu_offsets_are_bolixed_dave(); + if ((TSB_CONFIG_TSB != + offsetof(struct tsb_config, tsb)) || + (TSB_CONFIG_RSS_LIMIT != + offsetof(struct tsb_config, tsb_rss_limit)) || + (TSB_CONFIG_NENTRIES != + offsetof(struct tsb_config, tsb_nentries)) || + (TSB_CONFIG_REG_VAL != + offsetof(struct tsb_config, tsb_reg_val)) || + (TSB_CONFIG_MAP_VADDR != + offsetof(struct tsb_config, tsb_map_vaddr)) || + (TSB_CONFIG_MAP_PTE != + offsetof(struct tsb_config, tsb_map_pte))) + tsb_config_offsets_are_bolixed_dave(); + /* Attach to the address space of init_task. On SMP we * do this in smp.c:smp_callin for other cpus. */ diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S index 118baea44f69..a0c8ba58920b 100644 --- a/arch/sparc64/kernel/tsb.S +++ b/arch/sparc64/kernel/tsb.S @@ -3,8 +3,13 @@ * Copyright (C) 2006 David S. Miller <davem@davemloft.net> */ +#include <linux/config.h> + #include <asm/tsb.h> #include <asm/hypervisor.h> +#include <asm/page.h> +#include <asm/cpudata.h> +#include <asm/mmu.h> .text .align 32 @@ -34,34 +39,124 @@ tsb_miss_itlb: ldxa [%g4] ASI_IMMU, %g4 /* At this point we have: - * %g1 -- TSB entry address + * %g1 -- PAGE_SIZE TSB entry address * %g3 -- FAULT_CODE_{D,I}TLB * %g4 -- missing virtual address * %g6 -- TAG TARGET (vaddr >> 22) */ tsb_miss_page_table_walk: - TRAP_LOAD_PGD_PHYS(%g7, %g5) + TRAP_LOAD_TRAP_BLOCK(%g7, %g5) - /* And now we have the PGD base physical address in %g7. */ -tsb_miss_page_table_walk_sun4v_fastpath: - USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) + /* Before committing to a full page table walk, + * check the huge page TSB. + */ +#ifdef CONFIG_HUGETLB_PAGE + +661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 + nop + .section .sun4v_2insn_patch, "ax" + .word 661b + mov SCRATCHPAD_UTSBREG2, %g5 + ldxa [%g5] ASI_SCRATCHPAD, %g5 + .previous + + cmp %g5, -1 + be,pt %xcc, 80f + nop + + /* We need an aligned pair of registers containing 2 values + * which can be easily rematerialized. %g6 and %g7 foot the + * bill just nicely. We'll save %g6 away into %g2 for the + * huge page TSB TAG comparison. + * + * Perform a huge page TSB lookup. + */ + mov %g6, %g2 + and %g5, 0x7, %g6 + mov 512, %g7 + andn %g5, 0x7, %g5 + sllx %g7, %g6, %g7 + srlx %g4, HPAGE_SHIFT, %g6 + sub %g7, 1, %g7 + and %g6, %g7, %g6 + sllx %g6, 4, %g6 + add %g5, %g6, %g5 + + TSB_LOAD_QUAD(%g5, %g6) + cmp %g6, %g2 + be,a,pt %xcc, tsb_tlb_reload + mov %g7, %g5 + + /* No match, remember the huge page TSB entry address, + * and restore %g6 and %g7. + */ + TRAP_LOAD_TRAP_BLOCK(%g7, %g6) + srlx %g4, 22, %g6 +80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP] + +#endif + + ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7 /* At this point we have: * %g1 -- TSB entry address * %g3 -- FAULT_CODE_{D,I}TLB - * %g5 -- physical address of PTE in Linux page tables + * %g4 -- missing virtual address * %g6 -- TAG TARGET (vaddr >> 22) + * %g7 -- page table physical address + * + * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE + * TSB both lack a matching entry. */ -tsb_reload: - TSB_LOCK_TAG(%g1, %g2, %g7) +tsb_miss_page_table_walk_sun4v_fastpath: + USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 - mov 1, %g7 - sllx %g7, TSB_TAG_INVALID_BIT, %g7 - brgez,a,pn %g5, tsb_do_fault - TSB_STORE(%g1, %g7) + brgez,pn %g5, tsb_do_fault + nop + +#ifdef CONFIG_HUGETLB_PAGE +661: sethi %uhi(_PAGE_SZALL_4U), %g7 + sllx %g7, 32, %g7 + .section .sun4v_2insn_patch, "ax" + .word 661b + mov _PAGE_SZALL_4V, %g7 + nop + .previous + + and %g5, %g7, %g2 + +661: sethi %uhi(_PAGE_SZHUGE_4U), %g7 + sllx %g7, 32, %g7 + .section .sun4v_2insn_patch, "ax" + .word 661b + mov _PAGE_SZHUGE_4V, %g7 + nop + .previous + + cmp %g2, %g7 + bne,pt %xcc, 60f + nop + + /* It is a huge page, use huge page TSB entry address we + * calculated above. + */ + TRAP_LOAD_TRAP_BLOCK(%g7, %g2) + ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 + cmp %g2, -1 + movne %xcc, %g2, %g1 +60: +#endif + /* At this point we have: + * %g1 -- TSB entry address + * %g3 -- FAULT_CODE_{D,I}TLB + * %g5 -- valid PTE + * %g6 -- TAG TARGET (vaddr >> 22) + */ +tsb_reload: + TSB_LOCK_TAG(%g1, %g2, %g7) TSB_WRITE(%g1, %g5, %g6) /* Finally, load TLB and return from trap. */ @@ -240,10 +335,9 @@ tsb_flush: * schedule() time. * * %o0: page table physical address - * %o1: TSB register value - * %o2: TSB virtual address - * %o3: TSB mapping locked PTE - * %o4: Hypervisor TSB descriptor physical address + * %o1: TSB base config pointer + * %o2: TSB huge config pointer, or NULL if none + * %o3: Hypervisor TSB descriptor physical address * * We have to run this whole thing with interrupts * disabled so that the current cpu doesn't change @@ -253,63 +347,79 @@ tsb_flush: .globl __tsb_context_switch .type __tsb_context_switch,#function __tsb_context_switch: - rdpr %pstate, %o5 - wrpr %o5, PSTATE_IE, %pstate + rdpr %pstate, %g1 + wrpr %g1, PSTATE_IE, %pstate + + TRAP_LOAD_TRAP_BLOCK(%g2, %g3) - ldub [%g6 + TI_CPU], %g1 - sethi %hi(trap_block), %g2 - sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1 - or %g2, %lo(trap_block), %g2 - add %g2, %g1, %g2 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] - sethi %hi(tlb_type), %g1 - lduw [%g1 + %lo(tlb_type)], %g1 - cmp %g1, 3 - bne,pt %icc, 1f + ldx [%o1 + TSB_CONFIG_REG_VAL], %o0 + brz,pt %o2, 1f + mov -1, %g3 + + ldx [%o2 + TSB_CONFIG_REG_VAL], %g3 + +1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE] + + sethi %hi(tlb_type), %g2 + lduw [%g2 + %lo(tlb_type)], %g2 + cmp %g2, 3 + bne,pt %icc, 50f nop /* Hypervisor TSB switch. */ - mov SCRATCHPAD_UTSBREG1, %g1 - stxa %o1, [%g1] ASI_SCRATCHPAD - mov -1, %g2 - mov SCRATCHPAD_UTSBREG2, %g1 - stxa %g2, [%g1] ASI_SCRATCHPAD - - /* Save away %o5's %pstate, we have to use %o5 for - * the hypervisor call. - */ - mov %o5, %g1 + mov SCRATCHPAD_UTSBREG1, %o5 + stxa %o0, [%o5] ASI_SCRATCHPAD + mov SCRATCHPAD_UTSBREG2, %o5 + stxa %g3, [%o5] ASI_SCRATCHPAD + + mov 2, %o0 + cmp %g3, -1 + move %xcc, 1, %o0 mov HV_FAST_MMU_TSB_CTXNON0, %o5 - mov 1, %o0 - mov %o4, %o1 + mov %o3, %o1 ta HV_FAST_TRAP - /* Finish up and restore %o5. */ + /* Finish up. */ ba,pt %xcc, 9f - mov %g1, %o5 + nop /* SUN4U TSB switch. */ -1: mov TSB_REG, %g1 - stxa %o1, [%g1] ASI_DMMU +50: mov TSB_REG, %o5 + stxa %o0, [%o5] ASI_DMMU membar #Sync - stxa %o1, [%g1] ASI_IMMU + stxa %o0, [%o5] ASI_IMMU membar #Sync -2: brz %o2, 9f - nop +2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4 + brz %o4, 9f + ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5 sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2 - mov TLB_TAG_ACCESS, %g1 + mov TLB_TAG_ACCESS, %g3 lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2 - stxa %o2, [%g1] ASI_DMMU + stxa %o4, [%g3] ASI_DMMU membar #Sync sllx %g2, 3, %g2 - stxa %o3, [%g2] ASI_DTLB_DATA_ACCESS + stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS + membar #Sync + + brz,pt %o2, 9f + nop + + ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4 + ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5 + mov TLB_TAG_ACCESS, %g3 + stxa %o4, [%g3] ASI_DMMU + membar #Sync + sub %g2, (1 << 3), %g2 + stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS membar #Sync + 9: - wrpr %o5, %pstate + wrpr %g1, %pstate retl nop diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c index 63b6cc0cd5d5..d21ff3230c02 100644 --- a/arch/sparc64/mm/fault.c +++ b/arch/sparc64/mm/fault.c @@ -410,9 +410,18 @@ good_area: up_read(&mm->mmap_sem); mm_rss = get_mm_rss(mm); - if (unlikely(mm_rss >= mm->context.tsb_rss_limit)) - tsb_grow(mm, mm_rss); - +#ifdef CONFIG_HUGETLB_PAGE + mm_rss -= (mm->context.huge_pte_count * (HPAGE_SIZE / PAGE_SIZE)); +#endif + if (unlikely(mm_rss >= + mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit)) + tsb_grow(mm, MM_TSB_BASE, mm_rss); +#ifdef CONFIG_HUGETLB_PAGE + mm_rss = mm->context.huge_pte_count; + if (unlikely(mm_rss >= + mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) + tsb_grow(mm, MM_TSB_HUGE, mm_rss); +#endif return; /* diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 5fc5c579e35e..8cb06205d265 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c @@ -140,7 +140,6 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; vma->vm_pgoff = phys_base >> PAGE_SHIFT; - prot = __pgprot(pg_iobits); offset -= from; dir = pgd_offset(mm, from); flush_cache_range(vma, beg, end); diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 280dc7958a13..074620d413d4 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c @@ -199,13 +199,11 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (pgd) { - pud = pud_offset(pgd, addr); - if (pud) { - pmd = pmd_alloc(mm, pud, addr); - if (pmd) - pte = pte_alloc_map(mm, pmd, addr); - } + pud = pud_alloc(mm, pgd, addr); + if (pud) { + pmd = pmd_alloc(mm, pud, addr); + if (pmd) + pte = pte_alloc_map(mm, pmd, addr); } return pte; } @@ -231,13 +229,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) return pte; } -#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { int i; + if (!pte_present(*ptep) && pte_present(entry)) + mm->context.huge_pte_count++; + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { set_pte_at(mm, addr, ptep, entry); ptep++; @@ -253,6 +252,8 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, int i; entry = *ptep; + if (pte_present(entry)) + mm->context.huge_pte_count--; for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { pte_clear(mm, addr, ptep); @@ -290,6 +291,15 @@ static void context_reload(void *__data) void hugetlb_prefault_arch_hook(struct mm_struct *mm) { + struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE]; + + if (likely(tp->tsb != NULL)) + return; + + tsb_grow(mm, MM_TSB_HUGE, 0); + tsb_context_switch(mm); + smp_tsb_sync(mm); + /* On UltraSPARC-III+ and later, configure the second half of * the Data-TLB for huge pages. */ diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 2ae143ba50d8..ded63ee9c4fd 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c @@ -283,6 +283,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p struct mm_struct *mm; struct tsb *tsb; unsigned long tag, flags; + unsigned long tsb_index, tsb_hash_shift; if (tlb_type != hypervisor) { unsigned long pfn = pte_pfn(pte); @@ -312,10 +313,26 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p mm = vma->vm_mm; + tsb_index = MM_TSB_BASE; + tsb_hash_shift = PAGE_SHIFT; + spin_lock_irqsave(&mm->context.lock, flags); - tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & - (mm->context.tsb_nentries - 1UL)]; +#ifdef CONFIG_HUGETLB_PAGE + if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { + if ((tlb_type == hypervisor && + (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || + (tlb_type != hypervisor && + (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { + tsb_index = MM_TSB_HUGE; + tsb_hash_shift = HPAGE_SHIFT; + } + } +#endif + + tsb = mm->context.tsb_block[tsb_index].tsb; + tsb += ((address >> tsb_hash_shift) & + (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); tag = (address >> 22UL); tsb_insert(tsb, tag, pte_val(pte)); diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c index b2064e2a44d6..beaa02810f0e 100644 --- a/arch/sparc64/mm/tsb.c +++ b/arch/sparc64/mm/tsb.c @@ -15,9 +15,9 @@ extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; -static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries) +static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) { - vaddr >>= PAGE_SHIFT; + vaddr >>= hash_shift; return vaddr & (nentries - 1); } @@ -36,7 +36,8 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) unsigned long v; for (v = start; v < end; v += PAGE_SIZE) { - unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES); + unsigned long hash = tsb_hash(v, PAGE_SHIFT, + KERNEL_TSB_NENTRIES); struct tsb *ent = &swapper_tsb[hash]; if (tag_compare(ent->tag, v)) { @@ -46,49 +47,91 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -void flush_tsb_user(struct mmu_gather *mp) +static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) { - struct mm_struct *mm = mp->mm; - unsigned long nentries, base, flags; - struct tsb *tsb; - int i; - - spin_lock_irqsave(&mm->context.lock, flags); - - tsb = mm->context.tsb; - nentries = mm->context.tsb_nentries; + unsigned long i; - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - base = __pa(tsb); - else - base = (unsigned long) tsb; - for (i = 0; i < mp->tlb_nr; i++) { unsigned long v = mp->vaddrs[i]; unsigned long tag, ent, hash; v &= ~0x1UL; - hash = tsb_hash(v, nentries); - ent = base + (hash * sizeof(struct tsb)); + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); tag = (v >> 22UL); tsb_flush(ent, tag); } +} + +void flush_tsb_user(struct mmu_gather *mp) +{ + struct mm_struct *mm = mp->mm; + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); + +#ifdef CONFIG_HUGETLB_PAGE + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); + } +#endif spin_unlock_irqrestore(&mm->context.lock, flags); } -static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) +#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) +#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K +#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K +#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) +#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K +#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K +#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB) +#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K +#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K +#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB) +#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB +#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB +#else +#error Broken base page size setting... +#endif + +#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) +#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K +#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) +#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K +#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K +#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) +#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB +#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB +#else +#error Broken huge page size setting... +#endif +#endif + +static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) { unsigned long tsb_reg, base, tsb_paddr; unsigned long page_sz, tte; - mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); + mm->context.tsb_block[tsb_idx].tsb_nentries = + tsb_bytes / sizeof(struct tsb); base = TSBMAP_BASE; tte = pgprot_val(PAGE_KERNEL_LOCKED); - tsb_paddr = __pa(mm->context.tsb); + tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); /* Use the smallest page size that can map the whole TSB @@ -147,61 +190,49 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes) /* Physical mapping, no locked TLB entry for TSB. */ tsb_reg |= tsb_paddr; - mm->context.tsb_reg_val = tsb_reg; - mm->context.tsb_map_vaddr = 0; - mm->context.tsb_map_pte = 0; + mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; + mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; + mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; } else { tsb_reg |= base; tsb_reg |= (tsb_paddr & (page_sz - 1UL)); tte |= (tsb_paddr & ~(page_sz - 1UL)); - mm->context.tsb_reg_val = tsb_reg; - mm->context.tsb_map_vaddr = base; - mm->context.tsb_map_pte = tte; + mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; + mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; + mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; } /* Setup the Hypervisor TSB descriptor. */ if (tlb_type == hypervisor) { - struct hv_tsb_descr *hp = &mm->context.tsb_descr; + struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; - switch (PAGE_SIZE) { - case 8192: - default: - hp->pgsz_idx = HV_PGSZ_IDX_8K; + switch (tsb_idx) { + case MM_TSB_BASE: + hp->pgsz_idx = HV_PGSZ_IDX_BASE; break; - - case 64 * 1024: - hp->pgsz_idx = HV_PGSZ_IDX_64K; - break; - - case 512 * 1024: - hp->pgsz_idx = HV_PGSZ_IDX_512K; - break; - - case 4 * 1024 * 1024: - hp->pgsz_idx = HV_PGSZ_IDX_4MB; +#ifdef CONFIG_HUGETLB_PAGE + case MM_TSB_HUGE: + hp->pgsz_idx = HV_PGSZ_IDX_HUGE; break; +#endif + default: + BUG(); }; hp->assoc = 1; hp->num_ttes = tsb_bytes / 16; hp->ctx_idx = 0; - switch (PAGE_SIZE) { - case 8192: - default: - hp->pgsz_mask = HV_PGSZ_MASK_8K; - break; - - case 64 * 1024: - hp->pgsz_mask = HV_PGSZ_MASK_64K; - break; - - case 512 * 1024: - hp->pgsz_mask = HV_PGSZ_MASK_512K; + switch (tsb_idx) { + case MM_TSB_BASE: + hp->pgsz_mask = HV_PGSZ_MASK_BASE; break; - - case 4 * 1024 * 1024: - hp->pgsz_mask = HV_PGSZ_MASK_4MB; +#ifdef CONFIG_HUGETLB_PAGE + case MM_TSB_HUGE: + hp->pgsz_mask = HV_PGSZ_MASK_HUGE; break; +#endif + default: + BUG(); }; hp->tsb_base = tsb_paddr; hp->resv = 0; @@ -241,11 +272,11 @@ void __init tsb_cache_init(void) } } -/* When the RSS of an address space exceeds mm->context.tsb_rss_limit, - * do_sparc64_fault() invokes this routine to try and grow the TSB. +/* When the RSS of an address space exceeds tsb_rss_limit for a TSB, + * do_sparc64_fault() invokes this routine to try and grow it. * * When we reach the maximum TSB size supported, we stick ~0UL into - * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache() + * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() * will not trigger any longer. * * The TSB can be anywhere from 8K to 1MB in size, in increasing powers @@ -257,7 +288,7 @@ void __init tsb_cache_init(void) * the number of entries that the current TSB can hold at once. Currently, * we trigger when the RSS hits 3/4 of the TSB capacity. */ -void tsb_grow(struct mm_struct *mm, unsigned long rss) +void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) { unsigned long max_tsb_size = 1 * 1024 * 1024; unsigned long new_size, old_size, flags; @@ -297,7 +328,8 @@ retry_tsb_alloc: * down to a 0-order allocation and force no TSB * growing for this address space. */ - if (mm->context.tsb == NULL && new_cache_index > 0) { + if (mm->context.tsb_block[tsb_index].tsb == NULL && + new_cache_index > 0) { new_cache_index = 0; new_size = 8192; new_rss_limit = ~0UL; @@ -307,8 +339,8 @@ retry_tsb_alloc: /* If we failed on a TSB grow, we are under serious * memory pressure so don't try to grow any more. */ - if (mm->context.tsb != NULL) - mm->context.tsb_rss_limit = ~0UL; + if (mm->context.tsb_block[tsb_index].tsb != NULL) + mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; return; } @@ -339,23 +371,26 @@ retry_tsb_alloc: */ spin_lock_irqsave(&mm->context.lock, flags); - old_tsb = mm->context.tsb; - old_cache_index = (mm->context.tsb_reg_val & 0x7UL); - old_size = mm->context.tsb_nentries * sizeof(struct tsb); + old_tsb = mm->context.tsb_block[tsb_index].tsb; + old_cache_index = + (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); + old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * + sizeof(struct tsb)); /* Handle multiple threads trying to grow the TSB at the same time. * One will get in here first, and bump the size and the RSS limit. * The others will get in here next and hit this check. */ - if (unlikely(old_tsb && (rss < mm->context.tsb_rss_limit))) { + if (unlikely(old_tsb && + (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { spin_unlock_irqrestore(&mm->context.lock, flags); kmem_cache_free(tsb_caches[new_cache_index], new_tsb); return; } - mm->context.tsb_rss_limit = new_rss_limit; + mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; if (old_tsb) { extern void copy_tsb(unsigned long old_tsb_base, @@ -372,8 +407,8 @@ retry_tsb_alloc: copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); } - mm->context.tsb = new_tsb; - setup_tsb_params(mm, new_size); + mm->context.tsb_block[tsb_index].tsb = new_tsb; + setup_tsb_params(mm, tsb_index, new_size); spin_unlock_irqrestore(&mm->context.lock, flags); @@ -394,40 +429,65 @@ retry_tsb_alloc: int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { +#ifdef CONFIG_HUGETLB_PAGE + unsigned long huge_pte_count; +#endif + unsigned int i; + spin_lock_init(&mm->context.lock); mm->context.sparc64_ctx_val = 0UL; +#ifdef CONFIG_HUGETLB_PAGE + /* We reset it to zero because the fork() page copying + * will re-increment the counters as the parent PTEs are + * copied into the child address space. + */ + huge_pte_count = mm->context.huge_pte_count; + mm->context.huge_pte_count = 0; +#endif + /* copy_mm() copies over the parent's mm_struct before calling * us, so we need to zero out the TSB pointer or else tsb_grow() * will be confused and think there is an older TSB to free up. */ - mm->context.tsb = NULL; + for (i = 0; i < MM_NUM_TSBS; i++) + mm->context.tsb_block[i].tsb = NULL; /* If this is fork, inherit the parent's TSB size. We would * grow it to that size on the first page fault anyways. */ - tsb_grow(mm, get_mm_rss(mm)); + tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); - if (unlikely(!mm->context.tsb)) +#ifdef CONFIG_HUGETLB_PAGE + if (unlikely(huge_pte_count)) + tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); +#endif + + if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) return -ENOMEM; return 0; } -void destroy_context(struct mm_struct *mm) +static void tsb_destroy_one(struct tsb_config *tp) { - unsigned long flags, cache_index; + unsigned long cache_index; - cache_index = (mm->context.tsb_reg_val & 0x7UL); - kmem_cache_free(tsb_caches[cache_index], mm->context.tsb); + if (!tp->tsb) + return; + cache_index = tp->tsb_reg_val & 0x7UL; + kmem_cache_free(tsb_caches[cache_index], tp->tsb); + tp->tsb = NULL; + tp->tsb_reg_val = 0UL; +} - /* We can remove these later, but for now it's useful - * to catch any bogus post-destroy_context() references - * to the TSB. - */ - mm->context.tsb = NULL; - mm->context.tsb_reg_val = 0UL; +void destroy_context(struct mm_struct *mm) +{ + unsigned long flags, i; + + for (i = 0; i < MM_NUM_TSBS; i++) + tsb_destroy_one(&mm->context.tsb_block[i]); spin_lock_irqsave(&ctx_alloc_lock, flags); |