From c9a844c5100a01c024b59401e5963ff65d6b5f31 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Sat, 29 Jul 2017 11:42:17 -0700 Subject: sparc64: Support huge PUD case in get_user_pages get_user_pages() is used to do direct IO. It already handles the case where the address range is backed by PMD huge pages. This patch now adds the case where the range could be backed by PUD huge pages. Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 15 +++++++++++-- arch/sparc/mm/gup.c | 45 ++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6fbd931f0570..2579f5a22f46 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -687,6 +687,8 @@ static inline unsigned long pmd_write(pmd_t pmd) return pte_write(pte); } +#define pud_write(pud) pte_write(__pte(pud_val(pud))) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline unsigned long pmd_dirty(pmd_t pmd) { @@ -823,9 +825,18 @@ static inline unsigned long __pmd_page(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + unsigned long pfn; + + pfn = pte_pfn(pte); + + return ((unsigned long) __va(pfn << PAGE_SHIFT)); +} + #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) -#define pud_page_vaddr(pud) \ - ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index f80cfc64c55b..d809099ffd47 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 1; } +static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, + unsigned long end, int write, struct page **pages, + int *nr) +{ + struct page *head, *page; + int refs; + + if (!(pud_val(pud) & _PAGE_VALID)) + return 0; + + if (write && !pud_write(pud)) + return 0; + + refs = 0; + page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + head = compound_head(page); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pud_val(pud) != pud_val(*pudp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + return 1; +} + static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + if (unlikely(pud_large(pud))) { + if (!gup_huge_pud(pudp, pud, addr, next, + write, pages, nr)) + return 0; + } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); -- cgit v1.2.3 From f10bb00790dd3cf550f7ae6991c4d1a128802c00 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Sat, 29 Jul 2017 11:42:18 -0700 Subject: sparc64: Add 16GB hugepage support Adds support for 16GB hugepage size. To use this page size use kernel parameters as: default_hugepagesz=16G hugepagesz=16G hugepages=10 Testing: Tested with the stream benchmark which allocates 48G of arrays backed by 16G hugepages and does RW operation on them in parallel. Orabug: 25362942 Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/hugetlb.h | 7 ++++ arch/sparc/include/asm/page_64.h | 3 +- arch/sparc/include/asm/pgtable_64.h | 5 +++ arch/sparc/include/asm/tsb.h | 36 ++++++++++++++++++ arch/sparc/kernel/tsb.S | 2 +- arch/sparc/kernel/vmlinux.lds.S | 5 +++ arch/sparc/mm/hugetlbpage.c | 74 ++++++++++++++++++++++++++----------- arch/sparc/mm/init_64.c | 54 +++++++++++++++++++++++---- 8 files changed, 156 insertions(+), 30 deletions(-) diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index d1f837dc77a4..0ca7caab1b06 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -4,6 +4,13 @@ #include #include +#ifdef CONFIG_HUGETLB_PAGE +struct pud_huge_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; +#endif void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 5961b2d8398a..8ee1f97589a1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_16GB_SHIFT 34 #define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 @@ -28,7 +29,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 4 +#define HUGE_MAX_HSTATE 5 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2579f5a22f46..4fefe3762083 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); } +static inline bool is_hugetlb_pud(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_PUD_HUGE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 32258e08da03..acf55063aa3d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; nop; \ 699: + /* PUD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PUD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PUD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We have to propagate bits [32:22] from the virtual address + * to resolve at 4M granularity. + */ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ +700: ba 700f; \ + nop; \ + .section .pud_huge_patch, "ax"; \ + .word 700b; \ + nop; \ + .previous; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PUD_HUGE), REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(0x1ffc0000), REG2; \ + sllx REG2, 1, REG2; \ + brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it @@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 07c0df924960..5f42ac099fcb 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - sethi %uhi(_PAGE_PMD_HUGE), %g7 + sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 sllx %g7, 32, %g7 andcc %g5, %g7, %g0 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 03b3d65d1266..34d37e6c2d06 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -154,6 +154,11 @@ SECTIONS *(.get_tick_patch) __get_tick_patch_end = .; } + .pud_huge_patch : { + __pud_huge_patch = .; + *(.pud_huge_patch) + __pud_huge_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 28ee8d8ffa07..7acb84db952c 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_16GB_SHIFT: + hugepage_size = _PAGE_SZ16GB_4V; + pte_val(entry) |= _PAGE_PUD_HUGE; + break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ16GB_4V: + shift = HPAGE_16GB_SHIFT; + break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; @@ -263,7 +270,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { + if (!pud) + return NULL; + + if (sz >= PUD_SIZE) + pte = (pte_t *)pud; + else { pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; @@ -289,12 +301,16 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); + if (is_hugetlb_pud(*pud)) + pte = (pte_t *)pud; + else { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) { + if (is_hugetlb_pmd(*pmd)) + pte = (pte_t *)pmd; + else + pte = pte_offset_map(pmd, addr); + } } } } @@ -305,12 +321,20 @@ pte_t *huge_pte_offset(struct mm_struct *mm, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int i, nptes, orig_shift, shift; - unsigned long size; + unsigned int nptes, orig_shift, shift; + unsigned long i, size; pte_t orig; size = huge_tte_to_size(entry); - shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; + else + shift = PAGE_SHIFT; + nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) @@ -333,19 +357,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned int i, nptes, hugepage_shift; + unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); - if (size >= HPAGE_SIZE) - nptes = size >> PMD_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; else - nptes = size >> PAGE_SHIFT; + shift = PAGE_SHIFT; - hugepage_shift = pte_none(entry) ? PAGE_SHIFT : - huge_tte_to_shift(entry); + nptes = size >> shift; + orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; @@ -354,11 +382,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); - maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, - hugepage_shift); + orig_shift); return entry; } @@ -371,7 +399,8 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return 0; + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, @@ -435,8 +464,11 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); + if (is_hugetlb_pud(*pud)) + pud_clear(pud); + else + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 3c40ebd50f92..cab1510a82a0 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,6 +325,18 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE +static void __init pud_huge_patch(void) +{ + struct pud_huge_patch_entry *p; + unsigned long addr; + + p = &__pud_huge_patch; + addr = p->addr; + *(unsigned int *)addr = p->insn; + + __asm__ __volatile__("flush %0" : : "r" (addr)); +} + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -337,6 +349,11 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_16GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_16GB; + hv_pgsz_idx = HV_PGSZ_IDX_16GB; + pud_huge_patch(); + break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; @@ -377,6 +394,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * { struct mm_struct *mm; unsigned long flags; + bool is_huge_tsb; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -394,15 +412,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); + is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && - is_hugetlb_pmd(__pmd(pte_val(pte)))) { - /* We are fabricating 8MB pages using 4MB real hw pages. */ - pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, - address, pte_val(pte)); - } else + if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { + unsigned long hugepage_size = PAGE_SIZE; + + if (is_vm_hugetlb_page(vma)) + hugepage_size = huge_page_size(hstate_vma(vma)); + + if (hugepage_size >= PUD_SIZE) { + unsigned long mask = 0x1ffc00000UL; + + /* Transfer bits [32:22] from address to resolve + * at 4M granularity. + */ + pte_val(pte) &= ~mask; + pte_val(pte) |= (address & mask); + } else if (hugepage_size >= PMD_SIZE) { + /* We are fabricating 8MB pages using 4MB + * real hw pages. + */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + } + + if (hugepage_size >= PMD_SIZE) { + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, + REAL_HPAGE_SHIFT, address, pte_val(pte)); + is_huge_tsb = true; + } + } #endif + if (!is_huge_tsb) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); -- cgit v1.2.3 From 763797847460a9659a6dfa79b354f3b58e68d523 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Sat, 29 Jul 2017 11:42:19 -0700 Subject: sparc64: Cleanup hugepage table walk functions Flatten out nested code structure in huge_pte_offset() and huge_pte_alloc(). Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/mm/hugetlbpage.c | 54 +++++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7acb84db952c..bcd8cdbc377f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -266,27 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; - if (sz >= PUD_SIZE) - pte = (pte_t *)pud; - else { - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return NULL; - - if (sz >= PMD_SIZE) - pte = (pte_t *)pmd; - else - pte = pte_alloc_map(mm, pmd, addr); - } - - return pte; + return (pte_t *)pud; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + if (sz >= PMD_SIZE) + return (pte_t *)pmd; + return pte_alloc_map(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -295,27 +287,21 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (!pgd_none(*pgd)) { - pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - if (is_hugetlb_pud(*pud)) - pte = (pte_t *)pud; - else { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); - } - } - } - } - - return pte; + if (pgd_none(*pgd)) + return NULL; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + if (is_hugetlb_pud(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return NULL; + if (is_hugetlb_pmd(*pmd)) + return (pte_t *)pmd; + return pte_offset_map(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, -- cgit v1.2.3 From 28d43de741cc67a9d8748de7dcb6c5f54cfa669c Mon Sep 17 00:00:00 2001 From: Vijay Kumar Date: Fri, 21 Jul 2017 10:23:56 -0600 Subject: sparc64: Add a new hypercall CPU_POKE This adds a new hypercall CPU_POKE for quickly waking up an idle CPU. CPU_POKE should only be sent to valid non-local CPUs. Signed-off-by: Rob Gardner Signed-off-by: Vijay Kumar Reviewed-by: Anthony Yznaga Signed-off-by: David S. Miller --- arch/sparc/include/asm/hypervisor.h | 18 ++++++++++++++++++ arch/sparc/kernel/hvcalls.S | 11 +++++++++++ 2 files changed, 29 insertions(+) diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 73cb8978df58..3dc9215d0357 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -298,6 +298,24 @@ unsigned long sun4v_cpu_stop(unsigned long cpuid); unsigned long sun4v_cpu_yield(void); #endif +/* cpu_poke() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_CPU_POKE + * RET0: status + * ERRORS: ENOCPU cpuid refers to a CPU that does not exist + * EINVAL cpuid is current CPU + * + * Poke CPU cpuid. If the target CPU is currently suspended having + * invoked the cpu-yield service, that vCPU will be resumed. + * Poke interrupts may only be sent to valid, non-local CPUs. + * It is not legal to poke the current vCPU. + */ +#define HV_FAST_CPU_POKE 0x13 + +#ifndef __ASSEMBLY__ +unsigned long sun4v_cpu_poke(unsigned long cpuid); +#endif + /* cpu_qconf() * TRAP: HV_FAST_TRAP * FUNCTION: HV_FAST_CPU_QCONF diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index 4116ee5c7791..e57007ff7f8f 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -106,6 +106,17 @@ ENTRY(sun4v_cpu_yield) nop ENDPROC(sun4v_cpu_yield) + /* %o0: cpuid + * + * returns %o0: status + */ +ENTRY(sun4v_cpu_poke) + mov HV_FAST_CPU_POKE, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_cpu_poke) + /* %o0: type * %o1: queue paddr * %o2: num queue entries -- cgit v1.2.3 From 8536e02e912a46aa1c100bb1f5ccdca42e4e1ad2 Mon Sep 17 00:00:00 2001 From: Vijay Kumar Date: Fri, 21 Jul 2017 10:23:57 -0600 Subject: sparc64: Use CPU_POKE to resume idle cpu Use CPU_POKE hypervisor call to resume idle cpu if supported. Signed-off-by: Vijay Kumar Reviewed-by: Anthony Yznaga Signed-off-by: David S. Miller --- arch/sparc/include/asm/smp_64.h | 5 +++ arch/sparc/kernel/hvapi.c | 2 +- arch/sparc/kernel/process_64.c | 7 +++- arch/sparc/kernel/setup_64.c | 1 + arch/sparc/kernel/smp_64.c | 80 +++++++++++++++++++++++++++++++++++++++-- 5 files changed, 90 insertions(+), 5 deletions(-) diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index ce2233f7e662..a75089285db8 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -33,6 +33,9 @@ DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); extern cpumask_t cpu_core_map[NR_CPUS]; +void smp_init_cpu_poke(void); +void scheduler_poke(void); + void arch_send_call_function_single_ipi(int cpu); void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -74,6 +77,8 @@ void __cpu_die(unsigned int cpu); #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) #define smp_fill_in_cpu_possible_map() do { } while (0) +#define smp_init_cpu_poke() do { } while (0) +#define scheduler_poke() do { } while (0) #endif /* !(CONFIG_SMP) */ diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 267731234ce8..d41ce33d87d6 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -189,7 +189,7 @@ void __init sun4v_hvapi_init(void) group = HV_GRP_CORE; major = 1; - minor = 1; + minor = 6; if (sun4v_hvapi_register(group, major, &minor)) goto bad; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index b96104da5bd6..44e5da405f96 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -77,8 +77,13 @@ void arch_cpu_idle(void) : "=&r" (pstate) : "i" (PSTATE_IE)); - if (!need_resched() && !cpu_is_offline(smp_processor_id())) + if (!need_resched() && !cpu_is_offline(smp_processor_id())) { sun4v_cpu_yield(); + /* If resumed by cpu_poke then we need to explicitly + * call scheduler_ipi(). + */ + scheduler_poke(); + } /* Re-enable interrupts. */ __asm__ __volatile__( diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 4d9c3e13c150..f92d5c67938b 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -356,6 +356,7 @@ void __init start_early_boot(void) check_if_starfire(); per_cpu_patch(); sun4v_patch(); + smp_init_cpu_poke(); cpu = hard_smp_processor_id(); if (cpu >= NR_CPUS) { diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 3218bc43302e..4898329970c5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -74,6 +74,9 @@ EXPORT_SYMBOL(cpu_core_sib_cache_map); static cpumask_t smp_commenced_mask; +static DEFINE_PER_CPU(bool, poke); +static bool cpu_poke; + void smp_info(struct seq_file *m) { int i; @@ -1439,15 +1442,86 @@ void __init smp_cpus_done(unsigned int max_cpus) { } +static void send_cpu_ipi(int cpu) +{ + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); +} + +void scheduler_poke(void) +{ + if (!cpu_poke) + return; + + if (!__this_cpu_read(poke)) + return; + + __this_cpu_write(poke, false); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); +} + +static unsigned long send_cpu_poke(int cpu) +{ + unsigned long hv_err; + + per_cpu(poke, cpu) = true; + hv_err = sun4v_cpu_poke(cpu); + if (hv_err != HV_EOK) { + per_cpu(poke, cpu) = false; + pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n", + __func__, hv_err); + } + + return hv_err; +} + void smp_send_reschedule(int cpu) { if (cpu == smp_processor_id()) { WARN_ON_ONCE(preemptible()); set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); - } else { - xcall_deliver((u64) &xcall_receive_signal, - 0, 0, cpumask_of(cpu)); + return; + } + + /* Use cpu poke to resume idle cpu if supported. */ + if (cpu_poke && idle_cpu(cpu)) { + unsigned long ret; + + ret = send_cpu_poke(cpu); + if (ret == HV_EOK) + return; } + + /* Use IPI in following cases: + * - cpu poke not supported + * - cpu not idle + * - send_cpu_poke() returns with error + */ + send_cpu_ipi(cpu); +} + +void smp_init_cpu_poke(void) +{ + unsigned long major; + unsigned long minor; + int ret; + + if (tlb_type != hypervisor) + return; + + ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor); + if (ret) { + pr_debug("HV_GRP_CORE is not registered\n"); + return; + } + + if (major == 1 && minor >= 6) { + /* CPU POKE is registered. */ + cpu_poke = true; + return; + } + + pr_debug("CPU_POKE not supported\n"); } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) -- cgit v1.2.3 From 4d9fbf539b52810cd2903719b181ed3d3ccd861f Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Thu, 10 Aug 2017 09:49:15 -0700 Subject: sparc64: Revert 16GB huge page support. It overflows the amount of space available in the initial .text section of trap handler assembler in some configurations, resulting in build failures. Signed-off-by: David S. Miller --- arch/sparc/include/asm/hugetlb.h | 7 --- arch/sparc/include/asm/page_64.h | 3 +- arch/sparc/include/asm/pgtable_64.h | 20 +------ arch/sparc/include/asm/tsb.h | 36 ------------- arch/sparc/kernel/tsb.S | 2 +- arch/sparc/kernel/vmlinux.lds.S | 5 -- arch/sparc/mm/gup.c | 45 +--------------- arch/sparc/mm/hugetlbpage.c | 102 +++++++++++++++--------------------- arch/sparc/mm/init_64.c | 54 +++---------------- 9 files changed, 54 insertions(+), 220 deletions(-) diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index 0ca7caab1b06..d1f837dc77a4 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -4,13 +4,6 @@ #include #include -#ifdef CONFIG_HUGETLB_PAGE -struct pud_huge_patch_entry { - unsigned int addr; - unsigned int insn; -}; -extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; -#endif void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 8ee1f97589a1..5961b2d8398a 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,7 +17,6 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 -#define HPAGE_16GB_SHIFT 34 #define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 @@ -29,7 +28,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 5 +#define HUGE_MAX_HSTATE 4 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 4fefe3762083..6fbd931f0570 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -414,11 +414,6 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); } -static inline bool is_hugetlb_pud(pud_t pud) -{ - return !!(pud_val(pud) & _PAGE_PUD_HUGE); -} - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { @@ -692,8 +687,6 @@ static inline unsigned long pmd_write(pmd_t pmd) return pte_write(pte); } -#define pud_write(pud) pte_write(__pte(pud_val(pud))) - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline unsigned long pmd_dirty(pmd_t pmd) { @@ -830,18 +823,9 @@ static inline unsigned long __pmd_page(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } - -static inline unsigned long pud_page_vaddr(pud_t pud) -{ - pte_t pte = __pte(pud_val(pud)); - unsigned long pfn; - - pfn = pte_pfn(pte); - - return ((unsigned long) __va(pfn << PAGE_SHIFT)); -} - #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) +#define pud_page_vaddr(pud) \ + ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index acf55063aa3d..32258e08da03 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -195,41 +195,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; nop; \ 699: - /* PUD has been loaded into REG1, interpret the value, seeing - * if it is a HUGE PUD or a normal one. If it is not valid - * then jump to FAIL_LABEL. If it is a HUGE PUD, and it - * translates to a valid PTE, branch to PTE_LABEL. - * - * We have to propagate bits [32:22] from the virtual address - * to resolve at 4M granularity. - */ -#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) -#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ -700: ba 700f; \ - nop; \ - .section .pud_huge_patch, "ax"; \ - .word 700b; \ - nop; \ - .previous; \ - brz,pn REG1, FAIL_LABEL; \ - sethi %uhi(_PAGE_PUD_HUGE), REG2; \ - sllx REG2, 32, REG2; \ - andcc REG1, REG2, %g0; \ - be,pt %xcc, 700f; \ - sethi %hi(0x1ffc0000), REG2; \ - sllx REG2, 1, REG2; \ - brgez,pn REG1, FAIL_LABEL; \ - andn REG1, REG2, REG1; \ - and VADDR, REG2, REG2; \ - brlz,pt REG1, PTE_LABEL; \ - or REG1, REG2, REG1; \ -700: -#else -#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ - brz,pn REG1, FAIL_LABEL; \ - nop; -#endif - /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it @@ -277,7 +242,6 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ - USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 5f42ac099fcb..07c0df924960 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 + sethi %uhi(_PAGE_PMD_HUGE), %g7 sllx %g7, 32, %g7 andcc %g5, %g7, %g0 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 34d37e6c2d06..03b3d65d1266 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -154,11 +154,6 @@ SECTIONS *(.get_tick_patch) __get_tick_patch_end = .; } - .pud_huge_patch : { - __pud_huge_patch = .; - *(.pud_huge_patch) - __pud_huge_patch_end = .; - } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index d809099ffd47..f80cfc64c55b 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -103,45 +103,6 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 1; } -static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, - unsigned long end, int write, struct page **pages, - int *nr) -{ - struct page *head, *page; - int refs; - - if (!(pud_val(pud) & _PAGE_VALID)) - return 0; - - if (write && !pud_write(pud)) - return 0; - - refs = 0; - page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); - head = compound_head(page); - do { - VM_BUG_ON(compound_head(page) != head); - pages[*nr] = page; - (*nr)++; - page++; - refs++; - } while (addr += PAGE_SIZE, addr != end); - - if (!page_cache_add_speculative(head, refs)) { - *nr -= refs; - return 0; - } - - if (unlikely(pud_val(pud) != pud_val(*pudp))) { - *nr -= refs; - while (refs--) - put_page(head); - return 0; - } - - return 1; -} - static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -180,11 +141,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (unlikely(pud_large(pud))) { - if (!gup_huge_pud(pudp, pud, addr, next, - write, pages, nr)) - return 0; - } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index bcd8cdbc377f..28ee8d8ffa07 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,10 +143,6 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { - case HPAGE_16GB_SHIFT: - hugepage_size = _PAGE_SZ16GB_4V; - pte_val(entry) |= _PAGE_PUD_HUGE; - break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -191,9 +187,6 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { - case _PAGE_SZ16GB_4V: - shift = HPAGE_16GB_SHIFT; - break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; @@ -266,19 +259,22 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; + pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (!pud) - return NULL; - if (sz >= PUD_SIZE) - return (pte_t *)pud; - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return NULL; - if (sz >= PMD_SIZE) - return (pte_t *)pmd; - return pte_alloc_map(mm, pmd, addr); + if (pud) { + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + + if (sz >= PMD_SIZE) + pte = (pte_t *)pmd; + else + pte = pte_alloc_map(mm, pmd, addr); + } + + return pte; } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -287,40 +283,34 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; + pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (pgd_none(*pgd)) - return NULL; - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) - return NULL; - if (is_hugetlb_pud(*pud)) - return (pte_t *)pud; - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) - return NULL; - if (is_hugetlb_pmd(*pmd)) - return (pte_t *)pmd; - return pte_offset_map(pmd, addr); + if (!pgd_none(*pgd)) { + pud = pud_offset(pgd, addr); + if (!pud_none(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) { + if (is_hugetlb_pmd(*pmd)) + pte = (pte_t *)pmd; + else + pte = pte_offset_map(pmd, addr); + } + } + } + + return pte; } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int nptes, orig_shift, shift; - unsigned long i, size; + unsigned int i, nptes, orig_shift, shift; + unsigned long size; pte_t orig; size = huge_tte_to_size(entry); - - shift = PAGE_SHIFT; - if (size >= PUD_SIZE) - shift = PUD_SHIFT; - else if (size >= PMD_SIZE) - shift = PMD_SHIFT; - else - shift = PAGE_SHIFT; - + shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) @@ -343,23 +333,19 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned int i, nptes, orig_shift, shift; + unsigned int i, nptes, hugepage_shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); - - shift = PAGE_SHIFT; - if (size >= PUD_SIZE) - shift = PUD_SHIFT; - else if (size >= PMD_SIZE) - shift = PMD_SHIFT; + if (size >= HPAGE_SIZE) + nptes = size >> PMD_SHIFT; else - shift = PAGE_SHIFT; + nptes = size >> PAGE_SHIFT; - nptes = size >> shift; - orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); + hugepage_shift = pte_none(entry) ? PAGE_SHIFT : + huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; @@ -368,11 +354,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); - maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, - orig_shift); + hugepage_shift); return entry; } @@ -385,8 +371,7 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return !pud_none(pud) && - (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; + return 0; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, @@ -450,11 +435,8 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - if (is_hugetlb_pud(*pud)) - pud_clear(pud); - else - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index cab1510a82a0..3c40ebd50f92 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,18 +325,6 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE -static void __init pud_huge_patch(void) -{ - struct pud_huge_patch_entry *p; - unsigned long addr; - - p = &__pud_huge_patch; - addr = p->addr; - *(unsigned int *)addr = p->insn; - - __asm__ __volatile__("flush %0" : : "r" (addr)); -} - static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -349,11 +337,6 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { - case HPAGE_16GB_SHIFT: - hv_pgsz_mask = HV_PGSZ_MASK_16GB; - hv_pgsz_idx = HV_PGSZ_IDX_16GB; - pud_huge_patch(); - break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; @@ -394,7 +377,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * { struct mm_struct *mm; unsigned long flags; - bool is_huge_tsb; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -412,37 +394,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); - is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { - unsigned long hugepage_size = PAGE_SIZE; - - if (is_vm_hugetlb_page(vma)) - hugepage_size = huge_page_size(hstate_vma(vma)); - - if (hugepage_size >= PUD_SIZE) { - unsigned long mask = 0x1ffc00000UL; - - /* Transfer bits [32:22] from address to resolve - * at 4M granularity. - */ - pte_val(pte) &= ~mask; - pte_val(pte) |= (address & mask); - } else if (hugepage_size >= PMD_SIZE) { - /* We are fabricating 8MB pages using 4MB - * real hw pages. - */ - pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); - } - - if (hugepage_size >= PMD_SIZE) { - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, - REAL_HPAGE_SHIFT, address, pte_val(pte)); - is_huge_tsb = true; - } - } + if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && + is_hugetlb_pmd(__pmd(pte_val(pte)))) { + /* We are fabricating 8MB pages using 4MB real hw pages. */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, + address, pte_val(pte)); + } else #endif - if (!is_huge_tsb) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); -- cgit v1.2.3 From 061273f9ecdb9e55e80676bbbb8f65ecd3b9699a Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Fri, 4 Aug 2017 19:59:39 +0200 Subject: sparc64: update comments in U3memcpy Update comments about the range the different parts of the code copies, the original comments were wrong. Introduce a few descriptive labels too. No functional changes. Signed-off-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/lib/U3memcpy.S | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S index 5a8cb37f0a3b..f9b42b3c63b0 100644 --- a/arch/sparc/lib/U3memcpy.S +++ b/arch/sparc/lib/U3memcpy.S @@ -168,18 +168,25 @@ ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8) FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srlx %o2, 31, %g2 cmp %g2, 0 + + /* software trap 5 "Range Check" if dst >= 0x80000000 */ tne %xcc, 5 PREAMBLE mov %o0, %o4 + + /* if len == 0 */ cmp %o2, 0 - be,pn %XCC, 85f + be,pn %XCC, end_return or %o0, %o1, %o3 + + /* if len < 16 */ cmp %o2, 16 - blu,a,pn %XCC, 80f + blu,a,pn %XCC, less_than_16 or %o3, %o2, %o3 + /* if len < 192 */ cmp %o2, (3 * 64) - blu,pt %XCC, 70f + blu,pt %XCC, less_than_192 andcc %o3, 0x7, %g0 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve @@ -362,7 +369,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ cmp %o2, 0 add %o1, %g1, %o1 VISExitHalf - be,pn %XCC, 85f + be,pn %XCC, end_return sub %o0, %o1, %o3 andcc %g1, 0x7, %g0 @@ -392,14 +399,15 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, 2, %o2 1: andcc %o2, 0x1, %g0 - be,pt %icc, 85f + be,pt %icc, end_return nop EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2) - ba,pt %xcc, 85f + ba,pt %xcc, end_return EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2) .align 64 -70: /* 16 < len <= 64 */ + /* 16 <= len < 192 */ +less_than_192: bne,pn %XCC, 75f sub %o0, %o1, %o3 @@ -429,7 +437,7 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4) add %o1, 0x4, %o1 1: cmp %o2, 0 - be,pt %XCC, 85f + be,pt %XCC, end_return nop ba,pt %xcc, 90f nop @@ -475,13 +483,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ srl %g1, 3, %g1 andcc %o2, 0x7, %o2 - be,pn %icc, 85f + be,pn %icc, end_return add %o1, %g1, %o1 ba,pt %xcc, 90f sub %o0, %o1, %o3 .align 64 -80: /* 0 < len <= 16 */ + /* 0 < len < 16 */ +less_than_16: andcc %o3, 0x3, %g0 bne,pn %XCC, 90f sub %o0, %o1, %o3 @@ -493,7 +502,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ bgu,pt %XCC, 1b add %o1, 4, %o1 -85: retl +end_return: + retl mov EX_RETVAL(%o4), %o0 .align 32 -- cgit v1.2.3 From de5c073e383d884da391627dbab8e0e7cfe48558 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Aug 2017 17:52:49 -0600 Subject: arch/sparc: Separate the exception handlers from NG4memcpy Separate the exception handlers from NG4memcpy so that it can be used with new memcpy routines. Make a separate file for all these handlers. Signed-off-by: Babu Moger Signed-off-by: David S. Miller --- arch/sparc/lib/Makefile | 2 + arch/sparc/lib/Memcpy_utils.S | 163 ++++++++++++++++++++++++++++++++++++++++++ arch/sparc/lib/NG4memcpy.S | 149 -------------------------------------- 3 files changed, 165 insertions(+), 149 deletions(-) create mode 100644 arch/sparc/lib/Memcpy_utils.S diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 07c03e72d812..37930c0777fe 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -36,6 +36,8 @@ lib-$(CONFIG_SPARC64) += NG2patch.o lib-$(CONFIG_SPARC64) += NG4memcpy.o NG4copy_from_user.o NG4copy_to_user.o lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o +lib-$(CONFIG_SPARC64) += Memcpy_utils.o + lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S new file mode 100644 index 000000000000..f7a26e0be94f --- /dev/null +++ b/arch/sparc/lib/Memcpy_utils.S @@ -0,0 +1,163 @@ +#ifndef __ASM_MEMCPY_UTILS +#define __ASM_MEMCPY_UTILS + +#include +#include +#include + +ENTRY(__restore_asi_fp) + VISExitHalf + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi_fp) + +ENTRY(__restore_asi) + retl + wr %g0, ASI_AIUS, %asi +ENDPROC(__restore_asi) + +ENTRY(NG4_retl_o2) + ba,pt %xcc, __restore_asi + mov %o2, %o0 +ENDPROC(NG4_retl_o2) +ENTRY(NG4_retl_o2_plus_1) + ba,pt %xcc, __restore_asi + add %o2, 1, %o0 +ENDPROC(NG4_retl_o2_plus_1) +ENTRY(NG4_retl_o2_plus_4) + ba,pt %xcc, __restore_asi + add %o2, 4, %o0 +ENDPROC(NG4_retl_o2_plus_4) +ENTRY(NG4_retl_o2_plus_o5) + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5) +ENTRY(NG4_retl_o2_plus_o5_plus_4) + add %o5, 4, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_4) +ENTRY(NG4_retl_o2_plus_o5_plus_8) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_8) +ENTRY(NG4_retl_o2_plus_o5_plus_16) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_16) +ENTRY(NG4_retl_o2_plus_o5_plus_24) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_24) +ENTRY(NG4_retl_o2_plus_o5_plus_32) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(NG4_retl_o2_plus_o5_plus_32) +ENTRY(NG4_retl_o2_plus_g1) + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1) +ENTRY(NG4_retl_o2_plus_g1_plus_1) + add %g1, 1, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_1) +ENTRY(NG4_retl_o2_plus_g1_plus_8) + add %g1, 8, %g1 + ba,pt %xcc, __restore_asi + add %o2, %g1, %o0 +ENDPROC(NG4_retl_o2_plus_g1_plus_8) +ENTRY(NG4_retl_o2_plus_o4) + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4) +ENTRY(NG4_retl_o2_plus_o4_plus_8) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8) +ENTRY(NG4_retl_o2_plus_o4_plus_16) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16) +ENTRY(NG4_retl_o2_plus_o4_plus_24) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24) +ENTRY(NG4_retl_o2_plus_o4_plus_32) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32) +ENTRY(NG4_retl_o2_plus_o4_plus_40) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40) +ENTRY(NG4_retl_o2_plus_o4_plus_48) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48) +ENTRY(NG4_retl_o2_plus_o4_plus_56) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56) +ENTRY(NG4_retl_o2_plus_o4_plus_64) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64) +ENTRY(NG4_retl_o2_plus_o4_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) + add %o4, 8, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) + add %o4, 16, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) + add %o4, 24, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) + add %o4, 32, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) + add %o4, 40, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) + add %o4, 48, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) + add %o4, 56, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) +ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) + add %o4, 64, %o4 + ba,pt %xcc, __restore_asi_fp + add %o2, %o4, %o0 +ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) + +#endif diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index 78ea962edcbe..e20d6541fb68 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -94,155 +94,6 @@ .text #ifndef EX_RETVAL #define EX_RETVAL(x) x -__restore_asi_fp: - VISExitHalf -__restore_asi: - retl - wr %g0, ASI_AIUS, %asi - -ENTRY(NG4_retl_o2) - ba,pt %xcc, __restore_asi - mov %o2, %o0 -ENDPROC(NG4_retl_o2) -ENTRY(NG4_retl_o2_plus_1) - ba,pt %xcc, __restore_asi - add %o2, 1, %o0 -ENDPROC(NG4_retl_o2_plus_1) -ENTRY(NG4_retl_o2_plus_4) - ba,pt %xcc, __restore_asi - add %o2, 4, %o0 -ENDPROC(NG4_retl_o2_plus_4) -ENTRY(NG4_retl_o2_plus_o5) - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5) -ENTRY(NG4_retl_o2_plus_o5_plus_4) - add %o5, 4, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_4) -ENTRY(NG4_retl_o2_plus_o5_plus_8) - add %o5, 8, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_8) -ENTRY(NG4_retl_o2_plus_o5_plus_16) - add %o5, 16, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_16) -ENTRY(NG4_retl_o2_plus_o5_plus_24) - add %o5, 24, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_24) -ENTRY(NG4_retl_o2_plus_o5_plus_32) - add %o5, 32, %o5 - ba,pt %xcc, __restore_asi - add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_32) -ENTRY(NG4_retl_o2_plus_g1) - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1) -ENTRY(NG4_retl_o2_plus_g1_plus_1) - add %g1, 1, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_1) -ENTRY(NG4_retl_o2_plus_g1_plus_8) - add %g1, 8, %g1 - ba,pt %xcc, __restore_asi - add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_8) -ENTRY(NG4_retl_o2_plus_o4) - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4) -ENTRY(NG4_retl_o2_plus_o4_plus_8) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8) -ENTRY(NG4_retl_o2_plus_o4_plus_16) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16) -ENTRY(NG4_retl_o2_plus_o4_plus_24) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24) -ENTRY(NG4_retl_o2_plus_o4_plus_32) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32) -ENTRY(NG4_retl_o2_plus_o4_plus_40) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40) -ENTRY(NG4_retl_o2_plus_o4_plus_48) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48) -ENTRY(NG4_retl_o2_plus_o4_plus_56) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56) -ENTRY(NG4_retl_o2_plus_o4_plus_64) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64) -ENTRY(NG4_retl_o2_plus_o4_fp) - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) - add %o4, 8, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) - add %o4, 16, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) - add %o4, 24, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) - add %o4, 32, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) - add %o4, 40, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) - add %o4, 48, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) - add %o4, 56, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) - add %o4, 64, %o4 - ba,pt %xcc, __restore_asi_fp - add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) #endif .align 64 -- cgit v1.2.3 From 1ab326934fa1a16f776b014ba482c1be08a205a1 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Aug 2017 17:52:50 -0600 Subject: arch/sparc: Rename exception handlers Rename exception handlers to memcpy_xxx as these are going to be used by new memcpy routines and these handlers are not exclusive to NG4memcpy anymore. Signed-off-by: Babu Moger Signed-off-by: David S. Miller --- arch/sparc/lib/Memcpy_utils.S | 120 +++++++++++++++++++-------------------- arch/sparc/lib/NG4memcpy.S | 128 +++++++++++++++++++++--------------------- 2 files changed, 124 insertions(+), 124 deletions(-) diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S index f7a26e0be94f..bcc5d77b2027 100644 --- a/arch/sparc/lib/Memcpy_utils.S +++ b/arch/sparc/lib/Memcpy_utils.S @@ -16,148 +16,148 @@ ENTRY(__restore_asi) wr %g0, ASI_AIUS, %asi ENDPROC(__restore_asi) -ENTRY(NG4_retl_o2) +ENTRY(memcpy_retl_o2) ba,pt %xcc, __restore_asi mov %o2, %o0 -ENDPROC(NG4_retl_o2) -ENTRY(NG4_retl_o2_plus_1) +ENDPROC(memcpy_retl_o2) +ENTRY(memcpy_retl_o2_plus_1) ba,pt %xcc, __restore_asi add %o2, 1, %o0 -ENDPROC(NG4_retl_o2_plus_1) -ENTRY(NG4_retl_o2_plus_4) +ENDPROC(memcpy_retl_o2_plus_1) +ENTRY(memcpy_retl_o2_plus_4) ba,pt %xcc, __restore_asi add %o2, 4, %o0 -ENDPROC(NG4_retl_o2_plus_4) -ENTRY(NG4_retl_o2_plus_o5) +ENDPROC(memcpy_retl_o2_plus_4) +ENTRY(memcpy_retl_o2_plus_o5) ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5) -ENTRY(NG4_retl_o2_plus_o5_plus_4) +ENDPROC(memcpy_retl_o2_plus_o5) +ENTRY(memcpy_retl_o2_plus_o5_plus_4) add %o5, 4, %o5 ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_4) -ENTRY(NG4_retl_o2_plus_o5_plus_8) +ENDPROC(memcpy_retl_o2_plus_o5_plus_4) +ENTRY(memcpy_retl_o2_plus_o5_plus_8) add %o5, 8, %o5 ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_8) -ENTRY(NG4_retl_o2_plus_o5_plus_16) +ENDPROC(memcpy_retl_o2_plus_o5_plus_8) +ENTRY(memcpy_retl_o2_plus_o5_plus_16) add %o5, 16, %o5 ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_16) -ENTRY(NG4_retl_o2_plus_o5_plus_24) +ENDPROC(memcpy_retl_o2_plus_o5_plus_16) +ENTRY(memcpy_retl_o2_plus_o5_plus_24) add %o5, 24, %o5 ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_24) -ENTRY(NG4_retl_o2_plus_o5_plus_32) +ENDPROC(memcpy_retl_o2_plus_o5_plus_24) +ENTRY(memcpy_retl_o2_plus_o5_plus_32) add %o5, 32, %o5 ba,pt %xcc, __restore_asi add %o2, %o5, %o0 -ENDPROC(NG4_retl_o2_plus_o5_plus_32) -ENTRY(NG4_retl_o2_plus_g1) +ENDPROC(memcpy_retl_o2_plus_o5_plus_32) +ENTRY(memcpy_retl_o2_plus_g1) ba,pt %xcc, __restore_asi add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1) -ENTRY(NG4_retl_o2_plus_g1_plus_1) +ENDPROC(memcpy_retl_o2_plus_g1) +ENTRY(memcpy_retl_o2_plus_g1_plus_1) add %g1, 1, %g1 ba,pt %xcc, __restore_asi add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_1) -ENTRY(NG4_retl_o2_plus_g1_plus_8) +ENDPROC(memcpy_retl_o2_plus_g1_plus_1) +ENTRY(memcpy_retl_o2_plus_g1_plus_8) add %g1, 8, %g1 ba,pt %xcc, __restore_asi add %o2, %g1, %o0 -ENDPROC(NG4_retl_o2_plus_g1_plus_8) -ENTRY(NG4_retl_o2_plus_o4) +ENDPROC(memcpy_retl_o2_plus_g1_plus_8) +ENTRY(memcpy_retl_o2_plus_o4) ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4) -ENTRY(NG4_retl_o2_plus_o4_plus_8) +ENDPROC(memcpy_retl_o2_plus_o4) +ENTRY(memcpy_retl_o2_plus_o4_plus_8) add %o4, 8, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8) -ENTRY(NG4_retl_o2_plus_o4_plus_16) +ENDPROC(memcpy_retl_o2_plus_o4_plus_8) +ENTRY(memcpy_retl_o2_plus_o4_plus_16) add %o4, 16, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16) -ENTRY(NG4_retl_o2_plus_o4_plus_24) +ENDPROC(memcpy_retl_o2_plus_o4_plus_16) +ENTRY(memcpy_retl_o2_plus_o4_plus_24) add %o4, 24, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24) -ENTRY(NG4_retl_o2_plus_o4_plus_32) +ENDPROC(memcpy_retl_o2_plus_o4_plus_24) +ENTRY(memcpy_retl_o2_plus_o4_plus_32) add %o4, 32, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32) -ENTRY(NG4_retl_o2_plus_o4_plus_40) +ENDPROC(memcpy_retl_o2_plus_o4_plus_32) +ENTRY(memcpy_retl_o2_plus_o4_plus_40) add %o4, 40, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40) -ENTRY(NG4_retl_o2_plus_o4_plus_48) +ENDPROC(memcpy_retl_o2_plus_o4_plus_40) +ENTRY(memcpy_retl_o2_plus_o4_plus_48) add %o4, 48, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48) -ENTRY(NG4_retl_o2_plus_o4_plus_56) +ENDPROC(memcpy_retl_o2_plus_o4_plus_48) +ENTRY(memcpy_retl_o2_plus_o4_plus_56) add %o4, 56, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56) -ENTRY(NG4_retl_o2_plus_o4_plus_64) +ENDPROC(memcpy_retl_o2_plus_o4_plus_56) +ENTRY(memcpy_retl_o2_plus_o4_plus_64) add %o4, 64, %o4 ba,pt %xcc, __restore_asi add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64) -ENTRY(NG4_retl_o2_plus_o4_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_64) +ENTRY(memcpy_retl_o2_plus_o4_fp) ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_8_fp) +ENDPROC(memcpy_retl_o2_plus_o4_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_8_fp) add %o4, 8, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_8_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_16_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_8_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_16_fp) add %o4, 16, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_16_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_24_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_24_fp) add %o4, 24, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_24_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_32_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_32_fp) add %o4, 32, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_32_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_40_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_40_fp) add %o4, 40, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_40_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_48_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_48_fp) add %o4, 48, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_48_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_56_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_56_fp) add %o4, 56, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_56_fp) -ENTRY(NG4_retl_o2_plus_o4_plus_64_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp) add %o4, 64, %o4 ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 -ENDPROC(NG4_retl_o2_plus_o4_plus_64_fp) +ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp) #endif diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S index e20d6541fb68..b5dacd1d2078 100644 --- a/arch/sparc/lib/NG4memcpy.S +++ b/arch/sparc/lib/NG4memcpy.S @@ -126,12 +126,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 51: LOAD(prefetch, %o1 + 0x040, #n_reads_strong) LOAD(prefetch, %o1 + 0x080, #n_reads_strong) @@ -156,43 +156,43 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, .Llarge_aligned sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 8, %o1 subcc %g1, 8, %g1 add %o0, 8, %o0 bne,pt %icc, 1b - EX_ST(STORE(stx, %g2, %o0 - 0x08), NG4_retl_o2_plus_g1_plus_8) + EX_ST(STORE(stx, %g2, %o0 - 0x08), memcpy_retl_o2_plus_g1_plus_8) .Llarge_aligned: /* len >= 0x80 && src 8-byte aligned && dest 8-byte aligned */ andn %o2, 0x3f, %o4 sub %o2, %o4, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o4) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o4) add %o1, 0x40, %o1 - EX_LD(LOAD(ldx, %o1 - 0x38, %g2), NG4_retl_o2_plus_o4) + EX_LD(LOAD(ldx, %o1 - 0x38, %g2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD(LOAD(ldx, %o1 - 0x30, %g3), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_64) - EX_LD(LOAD(ldx, %o1 - 0x20, %o5), NG4_retl_o2_plus_o4_plus_64) - EX_ST(STORE_INIT(%g1, %o0), NG4_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x30, %g3), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x28, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_64) + EX_LD(LOAD(ldx, %o1 - 0x20, %o5), memcpy_retl_o2_plus_o4_plus_64) + EX_ST(STORE_INIT(%g1, %o0), memcpy_retl_o2_plus_o4_plus_64) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_56) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_56) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x18, %g2), NG4_retl_o2_plus_o4_plus_48) - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_48) + EX_LD(LOAD(ldx, %o1 - 0x18, %g2), memcpy_retl_o2_plus_o4_plus_48) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_48) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x10, %g3), NG4_retl_o2_plus_o4_plus_40) - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_40) + EX_LD(LOAD(ldx, %o1 - 0x10, %g3), memcpy_retl_o2_plus_o4_plus_40) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_40) add %o0, 0x08, %o0 - EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), NG4_retl_o2_plus_o4_plus_32) - EX_ST(STORE_INIT(%o5, %o0), NG4_retl_o2_plus_o4_plus_32) + EX_LD(LOAD(ldx, %o1 - 0x08, GLOBAL_SPARE), memcpy_retl_o2_plus_o4_plus_32) + EX_ST(STORE_INIT(%o5, %o0), memcpy_retl_o2_plus_o4_plus_32) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g2, %o0), NG4_retl_o2_plus_o4_plus_24) + EX_ST(STORE_INIT(%g2, %o0), memcpy_retl_o2_plus_o4_plus_24) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(%g3, %o0), NG4_retl_o2_plus_o4_plus_16) + EX_ST(STORE_INIT(%g3, %o0), memcpy_retl_o2_plus_o4_plus_16) add %o0, 0x08, %o0 - EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), NG4_retl_o2_plus_o4_plus_8) + EX_ST(STORE_INIT(GLOBAL_SPARE, %o0), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b LOAD(prefetch, %o1 + 0x200, #n_reads_strong) @@ -218,17 +218,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ sub %o2, %o4, %o2 alignaddr %o1, %g0, %g1 add %o1, %o4, %o1 - EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), NG4_retl_o2_plus_o4) -1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), NG4_retl_o2_plus_o4) + EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0), memcpy_retl_o2_plus_o4) +1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2), memcpy_retl_o2_plus_o4) subcc %o4, 0x40, %o4 - EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), NG4_retl_o2_plus_o4_plus_64) - EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12), memcpy_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14), memcpy_retl_o2_plus_o4_plus_64) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), NG4_retl_o2_plus_o4_plus_64) + EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0), memcpy_retl_o2_plus_o4_plus_64) faligndata %f2, %f4, %f18 add %g1, 0x40, %g1 faligndata %f4, %f6, %f20 @@ -237,14 +237,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ faligndata %f10, %f12, %f26 faligndata %f12, %f14, %f28 faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f16, %o0 + 0x00), NG4_retl_o2_plus_o4_plus_64) - EX_ST_FP(STORE(std, %f18, %o0 + 0x08), NG4_retl_o2_plus_o4_plus_56) - EX_ST_FP(STORE(std, %f20, %o0 + 0x10), NG4_retl_o2_plus_o4_plus_48) - EX_ST_FP(STORE(std, %f22, %o0 + 0x18), NG4_retl_o2_plus_o4_plus_40) - EX_ST_FP(STORE(std, %f24, %o0 + 0x20), NG4_retl_o2_plus_o4_plus_32) - EX_ST_FP(STORE(std, %f26, %o0 + 0x28), NG4_retl_o2_plus_o4_plus_24) - EX_ST_FP(STORE(std, %f28, %o0 + 0x30), NG4_retl_o2_plus_o4_plus_16) - EX_ST_FP(STORE(std, %f30, %o0 + 0x38), NG4_retl_o2_plus_o4_plus_8) + EX_ST_FP(STORE(std, %f16, %o0 + 0x00), memcpy_retl_o2_plus_o4_plus_64) + EX_ST_FP(STORE(std, %f18, %o0 + 0x08), memcpy_retl_o2_plus_o4_plus_56) + EX_ST_FP(STORE(std, %f20, %o0 + 0x10), memcpy_retl_o2_plus_o4_plus_48) + EX_ST_FP(STORE(std, %f22, %o0 + 0x18), memcpy_retl_o2_plus_o4_plus_40) + EX_ST_FP(STORE(std, %f24, %o0 + 0x20), memcpy_retl_o2_plus_o4_plus_32) + EX_ST_FP(STORE(std, %f26, %o0 + 0x28), memcpy_retl_o2_plus_o4_plus_24) + EX_ST_FP(STORE(std, %f28, %o0 + 0x30), memcpy_retl_o2_plus_o4_plus_16) + EX_ST_FP(STORE(std, %f30, %o0 + 0x38), memcpy_retl_o2_plus_o4_plus_8) add %o0, 0x40, %o0 bne,pt %icc, 1b LOAD(prefetch, %g1 + 0x200, #n_reads_strong) @@ -272,38 +272,38 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andncc %o2, 0x20 - 1, %o5 be,pn %icc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), NG4_retl_o2_plus_o5) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, GLOBAL_SPARE), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %g1, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_32) - EX_ST(STORE(stx, %g2, %o0 + 0x08), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), NG4_retl_o2_plus_o5_plus_24) - EX_ST(STORE(stx, %o4, %o0 + 0x18), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %icc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %icc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %icc, 1b - EX_ST(STORE(stx, %g1, %o0 - 0x08), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, %g1, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit cmp %o2, 0x04 bl,pn %icc, .Ltiny nop - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %icc, .Ltiny - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %icc, .Lexit .Lmedium_unaligned: /* First get dest 8 byte aligned. */ @@ -312,12 +312,12 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ brz,pt %g1, 2f sub %o2, %g1, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), NG4_retl_o2_plus_g1) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %g1, 1, %g1 add %o0, 1, %o0 bne,pt %icc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01), NG4_retl_o2_plus_g1_plus_1) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %g1 brz,pn %g1, .Lmedium_noprefetch @@ -325,16 +325,16 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ mov 64, %g2 sub %g2, %g1, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4), NG4_retl_o2) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %g1, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), NG4_retl_o2_plus_o5) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, GLOBAL_SPARE or GLOBAL_SPARE, %o4, GLOBAL_SPARE - EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), NG4_retl_o2_plus_o5_plus_8) + EX_ST(STORE(stx, GLOBAL_SPARE, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %icc, 1b sllx %g3, %g1, %o4 @@ -345,17 +345,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ ba,pt %icc, .Lsmall_unaligned .Ltiny: - EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) + EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x00), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x01, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %g1), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x01), NG4_retl_o2_plus_1) - EX_LD(LOAD(ldub, %o1 + 0x02, %g1), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %g1), memcpy_retl_o2) ba,pt %icc, .Lexit - EX_ST(STORE(stb, %g1, %o0 + 0x02), NG4_retl_o2) + EX_ST(STORE(stb, %g1, %o0 + 0x02), memcpy_retl_o2) .Lsmall: andcc %g2, 0x3, %g0 @@ -363,23 +363,23 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */ andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %g1), NG4_retl_o2_plus_o5) + EX_LD(LOAD(lduw, %o1 + 0x00, %g1), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %icc, 1b - EX_ST(STORE(stw, %g1, %o0 - 0x04), NG4_retl_o2_plus_o5_plus_4) + EX_ST(STORE(stw, %g1, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit nop ba,a,pt %icc, .Ltiny .Lsmall_unaligned: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), NG4_retl_o2) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g1), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %icc, 1b - EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1) + EX_ST(STORE(stb, %g1, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %icc, .Lexit nop .size FUNC_NAME, .-FUNC_NAME -- cgit v1.2.3 From b3a04ed507bf5b85e9eae521f5c6ca1d1bc6a4f2 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Aug 2017 17:52:51 -0600 Subject: arch/sparc: Optimized memcpy, memset, copy_to_user, copy_from_user for M7/M8 New algorithm that takes advantage of the M7/M8 block init store ASI, ie, overlapping pipelines and miss buffer filling. Full details in code comments. Signed-off-by: Babu Moger Signed-off-by: David S. Miller --- arch/sparc/kernel/head_64.S | 16 +- arch/sparc/lib/M7copy_from_user.S | 41 ++ arch/sparc/lib/M7copy_to_user.S | 51 +++ arch/sparc/lib/M7memcpy.S | 923 ++++++++++++++++++++++++++++++++++++++ arch/sparc/lib/M7memset.S | 352 +++++++++++++++ arch/sparc/lib/M7patch.S | 51 +++ arch/sparc/lib/Makefile | 3 + 7 files changed, 1435 insertions(+), 2 deletions(-) create mode 100644 arch/sparc/lib/M7copy_from_user.S create mode 100644 arch/sparc/lib/M7copy_to_user.S create mode 100644 arch/sparc/lib/M7memcpy.S create mode 100644 arch/sparc/lib/M7memset.S create mode 100644 arch/sparc/lib/M7patch.S diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 78e0211753d2..bf9a5acc8432 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -603,10 +603,10 @@ niagara_tlb_fixup: be,pt %xcc, niagara4_patch nop cmp %g1, SUN4V_CHIP_SPARC_M7 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_M8 - be,pt %xcc, niagara4_patch + be,pt %xcc, sparc_m7_patch nop cmp %g1, SUN4V_CHIP_SPARC_SN be,pt %xcc, niagara4_patch @@ -621,6 +621,18 @@ niagara_tlb_fixup: ba,a,pt %xcc, 80f nop + +sparc_m7_patch: + call m7_patch_copyops + nop + call m7_patch_bzero + nop + call m7_patch_pageops + nop + + ba,a,pt %xcc, 80f + nop + niagara4_patch: call niagara4_patch_copyops nop diff --git a/arch/sparc/lib/M7copy_from_user.S b/arch/sparc/lib/M7copy_from_user.S new file mode 100644 index 000000000000..d0689d762726 --- /dev/null +++ b/arch/sparc/lib/M7copy_from_user.S @@ -0,0 +1,41 @@ +/* + * M7copy_from_user.S: SPARC M7 optimized copy from userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_LD(x) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, __restore_asi; \ + .text; \ + .align 4; + +#define EX_LD_FP(x) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, __restore_asi_fp; \ + .text; \ + .align 4; + + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#define FUNC_NAME M7copy_from_user +#define LOAD(type,addr,dest) type##a [addr] %asi, dest +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7copy_to_user.S b/arch/sparc/lib/M7copy_to_user.S new file mode 100644 index 000000000000..d3be1327d7ce --- /dev/null +++ b/arch/sparc/lib/M7copy_to_user.S @@ -0,0 +1,51 @@ +/* + * M7copy_to_user.S: SPARC M7 optimized copy to userspace. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + +#define EX_ST(x) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, __restore_asi; \ + .text; \ + .align 4; + +#define EX_ST_FP(x) \ +98: x; \ + .section __ex_table,"a"; \ + .align 4; \ + .word 98b, __restore_asi_fp; \ + .text; \ + .align 4; + + +#ifndef ASI_AIUS +#define ASI_AIUS 0x11 +#endif + +#ifndef ASI_BLK_INIT_QUAD_LDD_AIUS +#define ASI_BLK_INIT_QUAD_LDD_AIUS 0x23 +#endif + +#define FUNC_NAME M7copy_to_user +#define STORE(type,src,addr) type##a src, [addr] %asi +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_AIUS +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_S +#define EX_RETVAL(x) 0 + +#ifdef __KERNEL__ + /* Writing to %asi is _expensive_ so we hardcode it. + * Reading %asi to check for KERNEL_DS is comparatively + * cheap. + */ +#define PREAMBLE \ + rd %asi, %g1; \ + cmp %g1, ASI_AIUS; \ + bne,pn %icc, raw_copy_in_user; \ + nop +#endif + +#include "M7memcpy.S" diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S new file mode 100644 index 000000000000..2c88175d3abf --- /dev/null +++ b/arch/sparc/lib/M7memcpy.S @@ -0,0 +1,923 @@ +/* + * M7memcpy: Optimized SPARC M7 memcpy + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + + .file "M7memcpy.S" + +/* + * memcpy(s1, s2, len) + * + * Copy s2 to s1, always copy n bytes. + * Note: this C code does not work for overlapped copies. + * + * Fast assembler language version of the following C-program for memcpy + * which represents the `standard' for the C-library. + * + * void * + * memcpy(void *s, const void *s0, size_t n) + * { + * if (n != 0) { + * char *s1 = s; + * const char *s2 = s0; + * do { + * *s1++ = *s2++; + * } while (--n != 0); + * } + * return (s); + * } + * + * + * SPARC T7/M7 Flow : + * + * if (count < SMALL_MAX) { + * if count < SHORTCOPY (SHORTCOPY=3) + * copy bytes; exit with dst addr + * if src & dst aligned on word boundary but not long word boundary, + * copy with ldw/stw; branch to finish_up + * if src & dst aligned on long word boundary + * copy with ldx/stx; branch to finish_up + * if src & dst not aligned and length <= SHORTCHECK (SHORTCHECK=14) + * copy bytes; exit with dst addr + * move enough bytes to get src to word boundary + * if dst now on word boundary + * move_words: + * copy words; branch to finish_up + * if dst now on half word boundary + * load words, shift half words, store words; branch to finish_up + * if dst on byte 1 + * load words, shift 3 bytes, store words; branch to finish_up + * if dst on byte 3 + * load words, shift 1 byte, store words; branch to finish_up + * finish_up: + * copy bytes; exit with dst addr + * } else { More than SMALL_MAX bytes + * move bytes until dst is on long word boundary + * if( src is on long word boundary ) { + * if (count < MED_MAX) { + * finish_long: src/dst aligned on 8 bytes + * copy with ldx/stx in 8-way unrolled loop; + * copy final 0-63 bytes; exit with dst addr + * } else { src/dst aligned; count > MED_MAX + * align dst on 64 byte boundary; for main data movement: + * prefetch src data to L2 cache; let HW prefetch move data to L1 cache + * Use BIS (block initializing store) to avoid copying store cache + * lines from memory. But pre-store first element of each cache line + * ST_CHUNK lines in advance of the rest of that cache line. That + * gives time for replacement cache lines to be written back without + * excess STQ and Miss Buffer filling. Repeat until near the end, + * then finish up storing before going to finish_long. + * } + * } else { src/dst not aligned on 8 bytes + * if src is word aligned and count < MED_WMAX + * move words in 8-way unrolled loop + * move final 0-31 bytes; exit with dst addr + * if count < MED_UMAX + * use alignaddr/faligndata combined with ldd/std in 8-way + * unrolled loop to move data. + * go to unalign_done + * else + * setup alignaddr for faligndata instructions + * align dst on 64 byte boundary; prefetch src data to L1 cache + * loadx8, falign, block-store, prefetch loop + * (only use block-init-store when src/dst on 8 byte boundaries.) + * unalign_done: + * move remaining bytes for unaligned cases. exit with dst addr. + * } + * + */ + +#include +#include + +#if !defined(EX_LD) && !defined(EX_ST) +#define NON_USER_COPY +#endif + +#ifndef EX_LD +#define EX_LD(x) x +#endif +#ifndef EX_LD_FP +#define EX_LD_FP(x) x +#endif + +#ifndef EX_ST +#define EX_ST(x) x +#endif +#ifndef EX_ST_FP +#define EX_ST_FP(x) x +#endif + +#ifndef EX_RETVAL +#define EX_RETVAL(x) x +#endif + +#ifndef LOAD +#define LOAD(type,addr,dest) type [addr], dest +#endif + +#ifndef STORE +#define STORE(type,src,addr) type src, [addr] +#endif + +/* + * ASI_BLK_INIT_QUAD_LDD_P/ASI_BLK_INIT_QUAD_LDD_S marks the cache + * line as "least recently used" which means if many threads are + * active, it has a high probability of being pushed out of the cache + * between the first initializing store and the final stores. + * Thus, we use ASI_ST_BLKINIT_MRU_P/ASI_ST_BLKINIT_MRU_S which + * marks the cache line as "most recently used" for all + * but the last cache line + */ +#ifndef STORE_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P +#else +#define STORE_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_MRU_ASI +#ifndef SIMULATE_NIAGARA_ON_NON_NIAGARA +#define STORE_MRU_ASI ASI_ST_BLKINIT_MRU_P +#else +#define STORE_MRU_ASI 0x80 /* ASI_P */ +#endif +#endif + +#ifndef STORE_INIT +#define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI +#endif + +#ifndef STORE_INIT_MRU +#define STORE_INIT_MRU(src,addr) stxa src, [addr] STORE_MRU_ASI +#endif + +#ifndef FUNC_NAME +#define FUNC_NAME M7memcpy +#endif + +#ifndef PREAMBLE +#define PREAMBLE +#endif + +#define BLOCK_SIZE 64 +#define SHORTCOPY 3 +#define SHORTCHECK 14 +#define SHORT_LONG 64 /* max copy for short longword-aligned case */ + /* must be at least 64 */ +#define SMALL_MAX 128 +#define MED_UMAX 1024 /* max copy for medium un-aligned case */ +#define MED_WMAX 1024 /* max copy for medium word-aligned case */ +#define MED_MAX 1024 /* max copy for medium longword-aligned case */ +#define ST_CHUNK 24 /* ST_CHUNK - block of values for BIS Store */ +#define ALIGN_PRE 24 /* distance for aligned prefetch loop */ + + .register %g2,#scratch + + .section ".text" + .global FUNC_NAME + .type FUNC_NAME, #function + .align 16 +FUNC_NAME: + srlx %o2, 31, %g2 + cmp %g2, 0 + tne %xcc, 5 + PREAMBLE + mov %o0, %g1 ! save %o0 + brz,pn %o2, .Lsmallx + cmp %o2, 3 + ble,pn %icc, .Ltiny_cp + cmp %o2, 19 + ble,pn %icc, .Lsmall_cp + or %o0, %o1, %g2 + cmp %o2, SMALL_MAX + bl,pn %icc, .Lmedium_cp + nop + +.Lmedium: + neg %o0, %o5 + andcc %o5, 7, %o5 ! bytes till DST 8 byte aligned + brz,pt %o5, .Ldst_aligned_on_8 + + ! %o5 has the bytes to be written in partial store. + sub %o2, %o5, %o2 + sub %o1, %o0, %o1 ! %o1 gets the difference +7: ! dst aligning loop + add %o1, %o0, %o4 + EX_LD(LOAD(ldub, %o4, %o4)) ! load one byte + subcc %o5, 1, %o5 + EX_ST(STORE(stb, %o4, %o0)) + bgu,pt %xcc, 7b + add %o0, 1, %o0 ! advance dst + add %o1, %o0, %o1 ! restore %o1 +.Ldst_aligned_on_8: + andcc %o1, 7, %o5 + brnz,pt %o5, .Lsrc_dst_unaligned_on_8 + nop + +.Lsrc_dst_aligned_on_8: + ! check if we are copying MED_MAX or more bytes + set MED_MAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bgu,pn %xcc, .Llarge_align8_copy + nop + +/* + * Special case for handling when src and dest are both long word aligned + * and total data to move is less than MED_MAX bytes + */ +.Lmedlong: + subcc %o2, 63, %o2 ! adjust length to allow cc test + ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes + nop +.Lmedl64: + EX_LD(LOAD(ldx, %o1, %o4)) ! load + subcc %o2, 64, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0)) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3)) ! a block of 64 bytes + EX_ST(STORE(stx, %o3, %o0+8)) + EX_LD(LOAD(ldx, %o1+16, %o4)) + EX_ST(STORE(stx, %o4, %o0+16)) + EX_LD(LOAD(ldx, %o1+24, %o3)) + EX_ST(STORE(stx, %o3, %o0+24)) + EX_LD(LOAD(ldx, %o1+32, %o4)) ! load + EX_ST(STORE(stx, %o4, %o0+32)) ! and store + EX_LD(LOAD(ldx, %o1+40, %o3)) ! a block of 64 bytes + add %o1, 64, %o1 ! increase src ptr by 64 + EX_ST(STORE(stx, %o3, %o0+40)) + EX_LD(LOAD(ldx, %o1-16, %o4)) + add %o0, 64, %o0 ! increase dst ptr by 64 + EX_ST(STORE(stx, %o4, %o0-16)) + EX_LD(LOAD(ldx, %o1-8, %o3)) + bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left + EX_ST(STORE(stx, %o3, %o0-8)) +.Lmedl63: + addcc %o2, 32, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left + nop + EX_LD(LOAD(ldx, %o1, %o4)) ! load + sub %o2, 32, %o2 ! decrement length count + EX_ST(STORE(stx, %o4, %o0)) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3)) ! a block of 32 bytes + add %o1, 32, %o1 ! increase src ptr by 32 + EX_ST(STORE(stx, %o3, %o0+8)) + EX_LD(LOAD(ldx, %o1-16, %o4)) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_ST(STORE(stx, %o4, %o0-16)) + EX_LD(LOAD(ldx, %o1-8, %o3)) + EX_ST(STORE(stx, %o3, %o0-8)) +.Lmedl31: + addcc %o2, 16, %o2 ! adjust remaining count + ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left + nop ! + EX_LD(LOAD(ldx, %o1, %o4)) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_ST(STORE(stx, %o4, %o0)) + sub %o2, 16, %o2 ! decrease count by 16 + EX_LD(LOAD(ldx, %o1-8, %o3)) + add %o0, 16, %o0 ! increase dst ptr by 16 + EX_ST(STORE(stx, %o3, %o0-8)) +.Lmedl15: + addcc %o2, 15, %o2 ! restore count + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ldx, %o1, %o4)) ! load 8 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + add %o0, 8, %o0 ! increase dst ptr by 8 + subcc %o2, 8, %o2 ! decrease count by 8 + bnz,pn %xcc, .Lmedw7 + EX_ST(STORE(stx, %o4, %o0-8)) ! and store 8 bytes + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 + + .align 16 +.Lsrc_dst_unaligned_on_8: + ! DST is 8-byte aligned, src is not +2: + andcc %o1, 0x3, %o5 ! test word alignment + bnz,pt %xcc, .Lunalignsetup ! branch to skip if not word aligned + nop + +/* + * Handle all cases where src and dest are aligned on word + * boundaries. Use unrolled loops for better performance. + * This option wins over standard large data move when + * source and destination is in cache for.Lmedium + * to short data moves. + */ + set MED_WMAX, %o3 + cmp %o2, %o3 ! limit to store buffer size + bge,pt %xcc, .Lunalignrejoin ! otherwise rejoin main loop + nop + + subcc %o2, 31, %o2 ! adjust length to allow cc test + ! for end of loop + ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 +.Lmedw32: + EX_LD(LOAD(ld, %o1, %o4)) ! move a block of 32 bytes + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+4, %o4)) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0)) + subcc %o2, 32, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+8, %o4)) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1+12, %o4)) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+8)) + add %o1, 32, %o1 ! increase src ptr by 32 + EX_LD(LOAD(ld, %o1-16, %o4)) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-12, %o4)) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0+16)) + add %o0, 32, %o0 ! increase dst ptr by 32 + EX_LD(LOAD(ld, %o1-8, %o4)) + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4)) + or %o4, %o5, %o5 + bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left + EX_ST(STORE(stx, %o5, %o0-8)) +.Lmedw31: + addcc %o2, 31, %o2 ! restore count + + bz,pt %xcc, .Lsmallx ! exit if finished + nop + cmp %o2, 16 + blt,pt %xcc, .Lmedw15 + nop + EX_LD(LOAD(ld, %o1, %o4)) ! move a block of 16 bytes + sllx %o4, 32, %o5 + subcc %o2, 16, %o2 ! decrement length count + EX_LD(LOAD(ld, %o1+4, %o4)) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0)) + add %o1, 16, %o1 ! increase src ptr by 16 + EX_LD(LOAD(ld, %o1-8, %o4)) + add %o0, 16, %o0 ! increase dst ptr by 16 + sllx %o4, 32, %o5 + EX_LD(LOAD(ld, %o1-4, %o4)) + or %o4, %o5, %o5 + EX_ST(STORE(stx, %o5, %o0-8)) +.Lmedw15: + bz,pt %xcc, .Lsmallx ! exit if finished + cmp %o2, 8 + blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left + tst %o2 + EX_LD(LOAD(ld, %o1, %o4)) ! load 4 bytes + subcc %o2, 8, %o2 ! decrease count by 8 + EX_ST(STORE(stw, %o4, %o0)) ! and store 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_LD(LOAD(ld, %o1-4, %o3)) ! load 4 bytes + add %o0, 8, %o0 ! increase dst ptr by 8 + EX_ST(STORE(stw, %o3, %o0-4)) ! and store 4 bytes + bz,pt %xcc, .Lsmallx ! exit if finished +.Lmedw7: ! count is ge 1, less than 8 + cmp %o2, 4 ! check for 4 bytes left + blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left + nop ! + EX_LD(LOAD(ld, %o1, %o4)) ! load 4 bytes + add %o1, 4, %o1 ! increase src ptr by 4 + add %o0, 4, %o0 ! increase dst ptr by 4 + subcc %o2, 4, %o2 ! decrease count by 4 + bnz .Lsmallleft3 + EX_ST(STORE(stw, %o4, %o0-4))! and store 4 bytes + retl + mov EX_RETVAL(%g1), %o0 + + .align 16 +.Llarge_align8_copy: ! Src and dst share 8 byte alignment + ! align dst to 64 byte boundary + andcc %o0, 0x3f, %o3 ! %o3 == 0 means dst is 64 byte aligned + brz,pn %o3, .Laligned_to_64 + andcc %o0, 8, %o3 ! odd long words to move? + brz,pt %o3, .Laligned_to_16 + nop + EX_LD(LOAD(ldx, %o1, %o4)) + sub %o2, 8, %o2 + add %o1, 8, %o1 ! increment src ptr + add %o0, 8, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8)) +.Laligned_to_16: + andcc %o0, 16, %o3 ! pair of long words to move? + brz,pt %o3, .Laligned_to_32 + nop + EX_LD(LOAD(ldx, %o1, %o4)) + sub %o2, 16, %o2 + EX_ST(STORE(stx, %o4, %o0)) + add %o1, 16, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4)) + add %o0, 16, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8)) +.Laligned_to_32: + andcc %o0, 32, %o3 ! four long words to move? + brz,pt %o3, .Laligned_to_64 + nop + EX_LD(LOAD(ldx, %o1, %o4)) + sub %o2, 32, %o2 + EX_ST(STORE(stx, %o4, %o0)) + EX_LD(LOAD(ldx, %o1+8, %o4)) + EX_ST(STORE(stx, %o4, %o0+8)) + EX_LD(LOAD(ldx, %o1+16, %o4)) + EX_ST(STORE(stx, %o4, %o0+16)) + add %o1, 32, %o1 ! increment src ptr + EX_LD(LOAD(ldx, %o1-8, %o4)) + add %o0, 32, %o0 ! increment dst ptr + EX_ST(STORE(stx, %o4, %o0-8)) +.Laligned_to_64: +! +! Using block init store (BIS) instructions to avoid fetching cache +! lines from memory. Use ST_CHUNK stores to first element of each cache +! line (similar to prefetching) to avoid overfilling STQ or miss buffers. +! Gives existing cache lines time to be moved out of L1/L2/L3 cache. +! Initial stores using MRU version of BIS to keep cache line in +! cache until we are ready to store final element of cache line. +! Then store last element using the LRU version of BIS. +! + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 +! +! We use STORE_MRU_ASI for the first seven stores to each cache line +! followed by STORE_ASI (mark as LRU) for the last store. That +! mixed approach reduces the probability that the cache line is removed +! before we finish setting it, while minimizing the effects on +! other cached values during a large memcpy +! +! ST_CHUNK batches up initial BIS operations for several cache lines +! to allow multiple requests to not be blocked by overflowing the +! the store miss buffer. Then the matching stores for all those +! BIS operations are executed. +! + + sub %o0, 8, %o0 ! adjust %o0 for ASI alignment +.Lalign_loop: + cmp %o5, ST_CHUNK*64 + blu,pt %xcc, .Lalign_loop_fin + mov ST_CHUNK,%o3 +.Lalign_loop_start: + prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1, %o4)) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + bgu %xcc,.Lalign_loop_start + add %o0, 56, %o0 + + mov ST_CHUNK,%o3 + sllx %o3, 6, %o4 ! ST_CHUNK*64 + sub %o1, %o4, %o1 ! reset %o1 + sub %o0, %o4, %o0 ! reset %o0 + +.Lalign_loop_rest: + EX_LD(LOAD(ldx, %o1+8, %o4)) + add %o0, 16, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_LD(LOAD(ldx, %o1+16, %o4)) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + subcc %o3, 1, %o3 + EX_LD(LOAD(ldx, %o1+24, %o4)) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_LD(LOAD(ldx, %o1+32, %o4)) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_LD(LOAD(ldx, %o1+40, %o4)) + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_LD(LOAD(ldx, %o1+48, %o4)) + add %o1, 64, %o1 + add %o0, 8, %o0 + EX_ST(STORE_INIT_MRU(%o4, %o0)) + add %o0, 8, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4)) + sub %o5, 64, %o5 + bgu %xcc,.Lalign_loop_rest + ! mark cache line as LRU + EX_ST(STORE_INIT(%o4, %o0)) + + cmp %o5, ST_CHUNK*64 + bgu,pt %xcc, .Lalign_loop_start + mov ST_CHUNK,%o3 + + cmp %o5, 0 + beq .Lalign_done + nop +.Lalign_loop_fin: + EX_LD(LOAD(ldx, %o1, %o4)) + EX_ST(STORE(stx, %o4, %o0+8)) + EX_LD(LOAD(ldx, %o1+8, %o4)) + EX_ST(STORE(stx, %o4, %o0+8+8)) + EX_LD(LOAD(ldx, %o1+16, %o4)) + EX_ST(STORE(stx, %o4, %o0+8+16)) + subcc %o5, 64, %o5 + EX_LD(LOAD(ldx, %o1+24, %o4)) + EX_ST(STORE(stx, %o4, %o0+8+24)) + EX_LD(LOAD(ldx, %o1+32, %o4)) + EX_ST(STORE(stx, %o4, %o0+8+32)) + EX_LD(LOAD(ldx, %o1+40, %o4)) + EX_ST(STORE(stx, %o4, %o0+8+40)) + EX_LD(LOAD(ldx, %o1+48, %o4)) + add %o1, 64, %o1 + EX_ST(STORE(stx, %o4, %o0+8+48)) + add %o0, 64, %o0 + EX_LD(LOAD(ldx, %o1-8, %o4)) + bgu %xcc,.Lalign_loop_fin + EX_ST(STORE(stx, %o4, %o0)) + +.Lalign_done: + add %o0, 8, %o0 ! restore %o0 from ASI alignment + membar #StoreStore + sub %o2, 63, %o2 ! adjust length to allow cc test + ba .Lmedl63 ! in .Lmedl63 + nop + + .align 16 + ! Dst is on 8 byte boundary; src is not; remaining count > SMALL_MAX +.Lunalignsetup: +.Lunalignrejoin: + mov %g1, %o3 ! save %g1 as VISEntryHalf clobbers it +#ifdef NON_USER_COPY + VISEntryHalfFast(.Lmedium_vis_entry_fail_cp) +#else + VISEntryHalf +#endif + mov %o3, %g1 ! restore %g1 + + set MED_UMAX, %o3 + cmp %o2, %o3 ! check for.Lmedium unaligned limit + bge,pt %xcc,.Lunalign_large + prefetch [%o1 + (4 * BLOCK_SIZE)], 20 + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + cmp %o2, 8 ! Insure we do not load beyond + bgt .Lunalign_adjust ! end of source buffer + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o2, 64, %o2 ! adjust to leave loop + sub %o5, 64, %o5 ! early if necessary +.Lunalign_adjust: + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + EX_LD_FP(LOAD(ldd, %o4, %f0)) +.Lunalign_loop: + EX_LD_FP(LOAD(ldd, %o4+8, %f2)) + faligndata %f0, %f2, %f16 + EX_LD_FP(LOAD(ldd, %o4+16, %f4)) + subcc %o5, BLOCK_SIZE, %o5 + EX_ST_FP(STORE(std, %f16, %o0)) + faligndata %f2, %f4, %f18 + EX_LD_FP(LOAD(ldd, %o4+24, %f6)) + EX_ST_FP(STORE(std, %f18, %o0+8)) + faligndata %f4, %f6, %f20 + EX_LD_FP(LOAD(ldd, %o4+32, %f8)) + EX_ST_FP(STORE(std, %f20, %o0+16)) + faligndata %f6, %f8, %f22 + EX_LD_FP(LOAD(ldd, %o4+40, %f10)) + EX_ST_FP(STORE(std, %f22, %o0+24)) + faligndata %f8, %f10, %f24 + EX_LD_FP(LOAD(ldd, %o4+48, %f12)) + EX_ST_FP(STORE(std, %f24, %o0+32)) + faligndata %f10, %f12, %f26 + EX_LD_FP(LOAD(ldd, %o4+56, %f14)) + add %o4, BLOCK_SIZE, %o4 + EX_ST_FP(STORE(std, %f26, %o0+40)) + faligndata %f12, %f14, %f28 + EX_LD_FP(LOAD(ldd, %o4, %f0)) + EX_ST_FP(STORE(std, %f28, %o0+48)) + faligndata %f14, %f0, %f30 + EX_ST_FP(STORE(std, %f30, %o0+56)) + add %o0, BLOCK_SIZE, %o0 + bgu,pt %xcc, .Lunalign_loop + prefetch [%o4 + (5 * BLOCK_SIZE)], 20 + ba .Lunalign_done + nop + +.Lunalign_large: + andcc %o0, 0x3f, %o3 ! is dst 64-byte block aligned? + bz %xcc, .Lunalignsrc + sub %o3, 64, %o3 ! %o3 will be multiple of 8 + neg %o3 ! bytes until dest is 64 byte aligned + sub %o2, %o3, %o2 ! update cnt with bytes to be moved + ! Move bytes according to source alignment + andcc %o1, 0x1, %o5 + bnz %xcc, .Lunalignbyte ! check for byte alignment + nop + andcc %o1, 2, %o5 ! check for half word alignment + bnz %xcc, .Lunalignhalf + nop + ! Src is word aligned +.Lunalignword: + EX_LD_FP(LOAD(ld, %o1, %o4)) ! load 4 bytes + add %o1, 8, %o1 ! increase src ptr by 8 + EX_ST_FP(STORE(stw, %o4, %o0)) ! and store 4 bytes + subcc %o3, 8, %o3 ! decrease count by 8 + EX_LD_FP(LOAD(ld, %o1-4, %o4)) ! load 4 bytes + add %o0, 8, %o0 ! increase dst ptr by 8 + bnz %xcc, .Lunalignword + EX_ST_FP(STORE(stw, %o4, %o0-4))! and store 4 bytes + ba .Lunalignsrc + nop + + ! Src is half-word aligned +.Lunalignhalf: + EX_LD_FP(LOAD(lduh, %o1, %o4)) ! load 2 bytes + sllx %o4, 32, %o5 ! shift left + EX_LD_FP(LOAD(lduw, %o1+2, %o4)) + or %o4, %o5, %o5 + sllx %o5, 16, %o5 + EX_LD_FP(LOAD(lduh, %o1+6, %o4)) + or %o4, %o5, %o5 + EX_ST_FP(STORE(stx, %o5, %o0)) + add %o1, 8, %o1 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignhalf + add %o0, 8, %o0 + ba .Lunalignsrc + nop + + ! Src is Byte aligned +.Lunalignbyte: + sub %o0, %o1, %o0 ! share pointer advance +.Lunalignbyte_loop: + EX_LD_FP(LOAD(ldub, %o1, %o4)) + sllx %o4, 56, %o5 + EX_LD_FP(LOAD(lduh, %o1+1, %o4)) + sllx %o4, 40, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+3, %o4)) + sllx %o4, 24, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(lduh, %o1+5, %o4)) + sllx %o4, 8, %o4 + or %o4, %o5, %o5 + EX_LD_FP(LOAD(ldub, %o1+7, %o4)) + or %o4, %o5, %o5 + add %o0, %o1, %o0 + EX_ST_FP(STORE(stx, %o5, %o0)) + sub %o0, %o1, %o0 + subcc %o3, 8, %o3 + bnz %xcc, .Lunalignbyte_loop + add %o1, 8, %o1 + add %o0,%o1, %o0 ! restore pointer + + ! Destination is now block (64 byte aligned) +.Lunalignsrc: + andn %o2, 0x3f, %o5 ! %o5 is multiple of block size + and %o2, 0x3f, %o2 ! residue bytes in %o2 + add %o2, 64, %o2 ! Insure we do not load beyond + sub %o5, 64, %o5 ! end of source buffer + + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + alignaddr %o1, %g0, %g0 ! generate %gsr + add %o1, %o5, %o1 ! advance %o1 to after blocks + + EX_LD_FP(LOAD(ldd, %o4, %f14)) + add %o4, 8, %o4 +.Lunalign_sloop: + EX_LD_FP(LOAD(ldd, %o4, %f16)) + faligndata %f14, %f16, %f0 + EX_LD_FP(LOAD(ldd, %o4+8, %f18)) + faligndata %f16, %f18, %f2 + EX_LD_FP(LOAD(ldd, %o4+16, %f20)) + faligndata %f18, %f20, %f4 + EX_ST_FP(STORE(std, %f0, %o0)) + subcc %o5, 64, %o5 + EX_LD_FP(LOAD(ldd, %o4+24, %f22)) + faligndata %f20, %f22, %f6 + EX_ST_FP(STORE(std, %f2, %o0+8)) + EX_LD_FP(LOAD(ldd, %o4+32, %f24)) + faligndata %f22, %f24, %f8 + EX_ST_FP(STORE(std, %f4, %o0+16)) + EX_LD_FP(LOAD(ldd, %o4+40, %f26)) + faligndata %f24, %f26, %f10 + EX_ST_FP(STORE(std, %f6, %o0+24)) + EX_LD_FP(LOAD(ldd, %o4+48, %f28)) + faligndata %f26, %f28, %f12 + EX_ST_FP(STORE(std, %f8, %o0+32)) + add %o4, 64, %o4 + EX_LD_FP(LOAD(ldd, %o4-8, %f30)) + faligndata %f28, %f30, %f14 + EX_ST_FP(STORE(std, %f10, %o0+40)) + EX_ST_FP(STORE(std, %f12, %o0+48)) + add %o0, 64, %o0 + EX_ST_FP(STORE(std, %f14, %o0-8)) + fsrc2 %f30, %f14 + bgu,pt %xcc, .Lunalign_sloop + prefetch [%o4 + (8 * BLOCK_SIZE)], 20 + +.Lunalign_done: + ! Handle trailing bytes, 64 to 127 + ! Dest long word aligned, Src not long word aligned + cmp %o2, 15 + bleu %xcc, .Lunalign_short + + andn %o2, 0x7, %o5 ! %o5 is multiple of 8 + and %o2, 0x7, %o2 ! residue bytes in %o2 + add %o2, 8, %o2 + sub %o5, 8, %o5 ! insure we do not load past end of src + andn %o1, 0x7, %o4 ! %o4 has long word aligned src address + add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 + EX_LD_FP(LOAD(ldd, %o4, %f0)) ! fetch partial word +.Lunalign_by8: + EX_LD_FP(LOAD(ldd, %o4+8, %f2)) + add %o4, 8, %o4 + faligndata %f0, %f2, %f16 + subcc %o5, 8, %o5 + EX_ST_FP(STORE(std, %f16, %o0)) + fsrc2 %f2, %f0 + bgu,pt %xcc, .Lunalign_by8 + add %o0, 8, %o0 + +.Lunalign_short: +#ifdef NON_USER_COPY + VISExitHalfFast +#else + VISExitHalf +#endif + ba .Lsmallrest + nop + +/* + * This is a special case of nested memcpy. This can happen when kernel + * calls unaligned memcpy back to back without saving FP registers. We need + * traps(context switch) to save/restore FP registers. If the kernel calls + * memcpy without this trap sequence we will hit FP corruption. Let's use + * the normal integer load/store method in this case. + */ + +#ifdef NON_USER_COPY +.Lmedium_vis_entry_fail_cp: + or %o0, %o1, %g2 +#endif +.Lmedium_cp: + LOAD(prefetch, %o1 + 0x40, #n_reads_strong) + andcc %g2, 0x7, %g0 + bne,pn %xcc, .Lmedium_unaligned_cp + nop + +.Lmedium_noprefetch_cp: + andncc %o2, 0x20 - 1, %o5 + be,pn %xcc, 2f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3)) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) + EX_LD(LOAD(ldx, %o1 + 0x10, %g7)) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) + add %o1, 0x20, %o1 + subcc %o5, 0x20, %o5 + EX_ST(STORE(stx, %o3, %o0 + 0x00)) + EX_ST(STORE(stx, %g2, %o0 + 0x08)) + EX_ST(STORE(stx, %g7, %o0 + 0x10)) + EX_ST(STORE(stx, %o4, %o0 + 0x18)) + bne,pt %xcc, 1b + add %o0, 0x20, %o0 +2: andcc %o2, 0x18, %o5 + be,pt %xcc, 3f + sub %o2, %o5, %o2 +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3)) + add %o1, 0x08, %o1 + add %o0, 0x08, %o0 + subcc %o5, 0x08, %o5 + bne,pt %xcc, 1b + EX_ST(STORE(stx, %o3, %o0 - 0x08)) +3: brz,pt %o2, .Lexit_cp + cmp %o2, 0x04 + bl,pn %xcc, .Ltiny_cp + nop + EX_LD(LOAD(lduw, %o1 + 0x00, %o3)) + add %o1, 0x04, %o1 + add %o0, 0x04, %o0 + subcc %o2, 0x04, %o2 + bne,pn %xcc, .Ltiny_cp + EX_ST(STORE(stw, %o3, %o0 - 0x04)) + ba,a,pt %xcc, .Lexit_cp + +.Lmedium_unaligned_cp: + /* First get dest 8 byte aligned. */ + sub %g0, %o0, %o3 + and %o3, 0x7, %o3 + brz,pt %o3, 2f + sub %o2, %o3, %o2 + +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) + add %o1, 1, %o1 + subcc %o3, 1, %o3 + add %o0, 1, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %g2, %o0 - 0x01)) +2: + and %o1, 0x7, %o3 + brz,pn %o3, .Lmedium_noprefetch_cp + sll %o3, 3, %o3 + mov 64, %g2 + sub %g2, %o3, %g2 + andn %o1, 0x7, %o1 + EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) + sllx %o4, %o3, %o4 + andn %o2, 0x08 - 1, %o5 + sub %o2, %o5, %o2 + +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) + add %o1, 0x08, %o1 + subcc %o5, 0x08, %o5 + srlx %g3, %g2, %g7 + or %g7, %o4, %g7 + EX_ST(STORE(stx, %g7, %o0 + 0x00)) + add %o0, 0x08, %o0 + bne,pt %xcc, 1b + sllx %g3, %o3, %o4 + srl %o3, 3, %o3 + add %o1, %o3, %o1 + brz,pn %o2, .Lexit_cp + nop + ba,pt %xcc, .Lsmall_unaligned_cp + +.Ltiny_cp: + EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x00)) + EX_LD(LOAD(ldub, %o1 + 0x01, %o3)) + subcc %o2, 1, %o2 + be,pn %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x01)) + EX_LD(LOAD(ldub, %o1 + 0x02, %o3)) + ba,pt %xcc, .Lexit_cp + EX_ST(STORE(stb, %o3, %o0 + 0x02)) + +.Lsmall_cp: + andcc %g2, 0x3, %g0 + bne,pn %xcc, .Lsmall_unaligned_cp + andn %o2, 0x4 - 1, %o5 + sub %o2, %o5, %o2 +1: + EX_LD(LOAD(lduw, %o1 + 0x00, %o3)) + add %o1, 0x04, %o1 + subcc %o5, 0x04, %o5 + add %o0, 0x04, %o0 + bne,pt %xcc, 1b + EX_ST(STORE(stw, %o3, %o0 - 0x04)) + brz,pt %o2, .Lexit_cp + nop + ba,a,pt %xcc, .Ltiny_cp + +.Lsmall_unaligned_cp: +1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) + add %o1, 1, %o1 + add %o0, 1, %o0 + subcc %o2, 1, %o2 + bne,pt %xcc, 1b + EX_ST(STORE(stb, %o3, %o0 - 0x01)) + ba,a,pt %xcc, .Lexit_cp + +.Lsmallrest: + tst %o2 + bz,pt %xcc, .Lsmallx + cmp %o2, 4 + blt,pn %xcc, .Lsmallleft3 + nop + sub %o2, 3, %o2 +.Lsmallnotalign4: + EX_LD(LOAD(ldub, %o1, %o3))! read byte + subcc %o2, 4, %o2 ! reduce count by 4 + EX_ST(STORE(stb, %o3, %o0)) ! write byte + EX_LD(LOAD(ldub, %o1+1, %o3))! repeat for total of 4 bytes + add %o1, 4, %o1 ! advance SRC by 4 + EX_ST(STORE(stb, %o3, %o0+1)) + EX_LD(LOAD(ldub, %o1-2, %o3)) + add %o0, 4, %o0 ! advance DST by 4 + EX_ST(STORE(stb, %o3, %o0-2)) + EX_LD(LOAD(ldub, %o1-1, %o3)) + bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain + EX_ST(STORE(stb, %o3, %o0-1)) + addcc %o2, 3, %o2 ! restore count + bz,pt %xcc, .Lsmallx +.Lsmallleft3: ! 1, 2, or 3 bytes remain + subcc %o2, 1, %o2 + EX_LD(LOAD(ldub, %o1, %o3)) ! load one byte + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0)) ! store one byte + EX_LD(LOAD(ldub, %o1+1, %o3)) ! load second byte + subcc %o2, 1, %o2 + bz,pt %xcc, .Lsmallx + EX_ST(STORE(stb, %o3, %o0+1))! store second byte + EX_LD(LOAD(ldub, %o1+2, %o3)) ! load third byte + EX_ST(STORE(stb, %o3, %o0+2)) ! store third byte +.Lsmallx: + retl + mov EX_RETVAL(%g1), %o0 +.Lsmallfin: + tst %o2 + bnz,pn %xcc, .Lsmallleft3 + nop + retl + mov EX_RETVAL(%g1), %o0 ! restore %o0 +.Lexit_cp: + retl + mov EX_RETVAL(%g1), %o0 + .size FUNC_NAME, .-FUNC_NAME diff --git a/arch/sparc/lib/M7memset.S b/arch/sparc/lib/M7memset.S new file mode 100644 index 000000000000..62ea91b3a6b8 --- /dev/null +++ b/arch/sparc/lib/M7memset.S @@ -0,0 +1,352 @@ +/* + * M7memset.S: SPARC M7 optimized memset. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +/* + * M7memset.S: M7 optimized memset. + * + * char *memset(sp, c, n) + * + * Set an array of n chars starting at sp to the character c. + * Return sp. + * + * Fast assembler language version of the following C-program for memset + * which represents the `standard' for the C-library. + * + * void * + * memset(void *sp1, int c, size_t n) + * { + * if (n != 0) { + * char *sp = sp1; + * do { + * *sp++ = (char)c; + * } while (--n != 0); + * } + * return (sp1); + * } + * + * The algorithm is as follows : + * + * For small 6 or fewer bytes stores, bytes will be stored. + * + * For less than 32 bytes stores, align the address on 4 byte boundary. + * Then store as many 4-byte chunks, followed by trailing bytes. + * + * For sizes greater than 32 bytes, align the address on 8 byte boundary. + * if (count >= 64) { + * store 8-bytes chunks to align the address on 64 byte boundary + * if (value to be set is zero && count >= MIN_ZERO) { + * Using BIS stores, set the first long word of each + * 64-byte cache line to zero which will also clear the + * other seven long words of the cache line. + * } + * else if (count >= MIN_LOOP) { + * Using BIS stores, set the first long word of each of + * ST_CHUNK cache lines (64 bytes each) before the main + * loop is entered. + * In the main loop, continue pre-setting the first long + * word of each cache line ST_CHUNK lines in advance while + * setting the other seven long words (56 bytes) of each + * cache line until fewer than ST_CHUNK*64 bytes remain. + * Then set the remaining seven long words of each cache + * line that has already had its first long word set. + * } + * store remaining data in 64-byte chunks until less than + * 64 bytes remain. + * } + * Store as many 8-byte chunks, followed by trailing bytes. + * + * BIS = Block Init Store + * Doing the advance store of the first element of the cache line + * initiates the displacement of a cache line while only using a single + * instruction in the pipeline. That avoids various pipeline delays, + * such as filling the miss buffer. The performance effect is + * similar to prefetching for normal stores. + * The special case for zero fills runs faster and uses fewer instruction + * cycles than the normal memset loop. + * + * We only use BIS for memset of greater than MIN_LOOP bytes because a sequence + * BIS stores must be followed by a membar #StoreStore. The benefit of + * the BIS store must be balanced against the cost of the membar operation. + */ + +/* + * ASI_STBI_P marks the cache line as "least recently used" + * which means if many threads are active, it has a high chance + * of being pushed out of the cache between the first initializing + * store and the final stores. + * Thus, we use ASI_STBIMRU_P which marks the cache line as + * "most recently used" for all but the last store to the cache line. + */ + +#include +#include + +#define ASI_STBI_P ASI_BLK_INIT_QUAD_LDD_P +#define ASI_STBIMRU_P ASI_ST_BLKINIT_MRU_P + + +#define ST_CHUNK 24 /* multiple of 4 due to loop unrolling */ +#define MIN_LOOP 16320 +#define MIN_ZERO 512 + + .section ".text" + .align 32 + +/* + * Define clear_page(dest) as memset(dest, 0, PAGE_SIZE) + * (can create a more optimized version later.) + */ + .globl M7clear_page + .globl M7clear_user_page +M7clear_page: /* clear_page(dest) */ +M7clear_user_page: + set PAGE_SIZE, %o1 + /* fall through into bzero code */ + + .size M7clear_page,.-M7clear_page + .size M7clear_user_page,.-M7clear_user_page + +/* + * Define bzero(dest, n) as memset(dest, 0, n) + * (can create a more optimized version later.) + */ + .globl M7bzero +M7bzero: /* bzero(dest, size) */ + mov %o1, %o2 + mov 0, %o1 + /* fall through into memset code */ + + .size M7bzero,.-M7bzero + + .global M7memset + .type M7memset, #function + .register %g3, #scratch +M7memset: + mov %o0, %o5 ! copy sp1 before using it + cmp %o2, 7 ! if small counts, just write bytes + bleu,pn %xcc, .wrchar + and %o1, 0xff, %o1 ! o1 is (char)c + + sll %o1, 8, %o3 + or %o1, %o3, %o1 ! now o1 has 2 bytes of c + sll %o1, 16, %o3 + cmp %o2, 32 + blu,pn %xcc, .wdalign + or %o1, %o3, %o1 ! now o1 has 4 bytes of c + + sllx %o1, 32, %o3 + or %o1, %o3, %o1 ! now o1 has 8 bytes of c + +.dbalign: + andcc %o5, 7, %o3 ! is sp1 aligned on a 8 byte bound? + bz,pt %xcc, .blkalign ! already long word aligned + sub %o3, 8, %o3 ! -(bytes till long word aligned) + + add %o2, %o3, %o2 ! update o2 with new count + ! Set -(%o3) bytes till sp1 long word aligned +1: stb %o1, [%o5] ! there is at least 1 byte to set + inccc %o3 ! byte clearing loop + bl,pt %xcc, 1b + inc %o5 + + ! Now sp1 is long word aligned (sp1 is found in %o5) +.blkalign: + cmp %o2, 64 ! check if there are 64 bytes to set + blu,pn %xcc, .wrshort + mov %o2, %o3 + + andcc %o5, 63, %o3 ! is sp1 block aligned? + bz,pt %xcc, .blkwr ! now block aligned + sub %o3, 64, %o3 ! o3 is -(bytes till block aligned) + add %o2, %o3, %o2 ! o2 is the remainder + + ! Store -(%o3) bytes till dst is block (64 byte) aligned. + ! Use long word stores. + ! Recall that dst is already long word aligned +1: + addcc %o3, 8, %o3 + stx %o1, [%o5] + bl,pt %xcc, 1b + add %o5, 8, %o5 + + ! Now sp1 is block aligned +.blkwr: + andn %o2, 63, %o4 ! calculate size of blocks in bytes + brz,pn %o1, .wrzero ! special case if c == 0 + and %o2, 63, %o3 ! %o3 = bytes left after blk stores. + + set MIN_LOOP, %g1 + cmp %o4, %g1 ! check there are enough bytes to set + blu,pn %xcc, .short_set ! to justify cost of membar + ! must be > pre-cleared lines + nop + + ! initial cache-clearing stores + ! get store pipeline moving + rd %asi, %g3 ! save %asi to be restored later + wr %g0, ASI_STBIMRU_P, %asi + + ! Primary memset loop for large memsets +.wr_loop: + sub %o5, 8, %o5 ! adjust %o5 for ASI store alignment + mov ST_CHUNK, %g1 +.wr_loop_start: + stxa %o1, [%o5+8]%asi + subcc %g1, 4, %g1 + stxa %o1, [%o5+8+64]%asi + add %o5, 256, %o5 + stxa %o1, [%o5+8-128]%asi + bgu %xcc, .wr_loop_start + stxa %o1, [%o5+8-64]%asi + + sub %o5, ST_CHUNK*64, %o5 ! reset %o5 + mov ST_CHUNK, %g1 + +.wr_loop_rest: + stxa %o1, [%o5+8+8]%asi + sub %o4, 64, %o4 + stxa %o1, [%o5+16+8]%asi + subcc %g1, 1, %g1 + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu %xcc, .wr_loop_rest + stxa %o1, [%o5]ASI_STBI_P + + ! If more than ST_CHUNK*64 bytes remain to set, continue + ! setting the first long word of each cache line in advance + ! to keep the store pipeline moving. + + cmp %o4, ST_CHUNK*64 + bge,pt %xcc, .wr_loop_start + mov ST_CHUNK, %g1 + + brz,a,pn %o4, .asi_done + add %o5, 8, %o5 ! restore %o5 offset + +.wr_loop_small: + stxa %o1, [%o5+8]%asi + stxa %o1, [%o5+8+8]%asi + stxa %o1, [%o5+16+8]%asi + stxa %o1, [%o5+24+8]%asi + stxa %o1, [%o5+32+8]%asi + subcc %o4, 64, %o4 + stxa %o1, [%o5+40+8]%asi + add %o5, 64, %o5 + stxa %o1, [%o5-8]%asi + bgu,pt %xcc, .wr_loop_small + stxa %o1, [%o5]ASI_STBI_P + + ba .asi_done + add %o5, 8, %o5 ! restore %o5 offset + + ! Special case loop for zero fill memsets + ! For each 64 byte cache line, single STBI to first element + ! clears line +.wrzero: + cmp %o4, MIN_ZERO ! check if enough bytes to set + ! to pay %asi + membar cost + blu %xcc, .short_set + nop + sub %o4, 256, %o4 + +.wrzero_loop: + mov 64, %g3 + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 256, %o4 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o5, 256, %o5 + sub %g3, 192, %g3 + stxa %o1, [%o5+%g3]ASI_STBI_P + add %g3, 64, %g3 + bge,pt %xcc, .wrzero_loop + stxa %o1, [%o5+%g3]ASI_STBI_P + add %o4, 256, %o4 + + brz,pn %o4, .bsi_done + nop + +.wrzero_small: + stxa %o1, [%o5]ASI_STBI_P + subcc %o4, 64, %o4 + bgu,pt %xcc, .wrzero_small + add %o5, 64, %o5 + ba,a .bsi_done + +.asi_done: + wr %g3, 0x0, %asi ! restored saved %asi +.bsi_done: + membar #StoreStore ! required by use of Block Store Init + +.short_set: + cmp %o4, 64 ! check if 64 bytes to set + blu %xcc, 5f + nop +4: ! set final blocks of 64 bytes + stx %o1, [%o5] + stx %o1, [%o5+8] + stx %o1, [%o5+16] + stx %o1, [%o5+24] + subcc %o4, 64, %o4 + stx %o1, [%o5+32] + stx %o1, [%o5+40] + add %o5, 64, %o5 + stx %o1, [%o5-16] + bgu,pt %xcc, 4b + stx %o1, [%o5-8] + +5: + ! Set the remaining long words +.wrshort: + subcc %o3, 8, %o3 ! Can we store any long words? + blu,pn %xcc, .wrchars + and %o2, 7, %o2 ! calc bytes left after long words +6: + subcc %o3, 8, %o3 + stx %o1, [%o5] ! store the long words + bgeu,pt %xcc, 6b + add %o5, 8, %o5 + +.wrchars: ! check for extra chars + brnz %o2, .wrfin + nop + retl + nop + +.wdalign: + andcc %o5, 3, %o3 ! is sp1 aligned on a word boundary + bz,pn %xcc, .wrword + andn %o2, 3, %o3 ! create word sized count in %o3 + + dec %o2 ! decrement count + stb %o1, [%o5] ! clear a byte + b .wdalign + inc %o5 ! next byte + +.wrword: + subcc %o3, 4, %o3 + st %o1, [%o5] ! 4-byte writing loop + bnz,pt %xcc, .wrword + add %o5, 4, %o5 + + and %o2, 3, %o2 ! leftover count, if any + +.wrchar: + ! Set the remaining bytes, if any + brz %o2, .exit + nop +.wrfin: + deccc %o2 + stb %o1, [%o5] + bgu,pt %xcc, .wrfin + inc %o5 +.exit: + retl ! %o0 was preserved + nop + + .size M7memset,.-M7memset diff --git a/arch/sparc/lib/M7patch.S b/arch/sparc/lib/M7patch.S new file mode 100644 index 000000000000..9000b7bc5f2b --- /dev/null +++ b/arch/sparc/lib/M7patch.S @@ -0,0 +1,51 @@ +/* + * M7patch.S: Patch generic routines with M7 variant. + * + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + */ + +#include + +#define BRANCH_ALWAYS 0x10680000 +#define NOP 0x01000000 +#define NG_DO_PATCH(OLD, NEW) \ + sethi %hi(NEW), %g1; \ + or %g1, %lo(NEW), %g1; \ + sethi %hi(OLD), %g2; \ + or %g2, %lo(OLD), %g2; \ + sub %g1, %g2, %g1; \ + sethi %hi(BRANCH_ALWAYS), %g3; \ + sll %g1, 11, %g1; \ + srl %g1, 11 + 2, %g1; \ + or %g3, %lo(BRANCH_ALWAYS), %g3; \ + or %g3, %g1, %g3; \ + stw %g3, [%g2]; \ + sethi %hi(NOP), %g3; \ + or %g3, %lo(NOP), %g3; \ + stw %g3, [%g2 + 0x4]; \ + flush %g2; + +ENTRY(m7_patch_copyops) + NG_DO_PATCH(memcpy, M7memcpy) + NG_DO_PATCH(raw_copy_from_user, M7copy_from_user) + NG_DO_PATCH(raw_copy_to_user, M7copy_to_user) + retl + nop +ENDPROC(m7_patch_copyops) + +ENTRY(m7_patch_bzero) + NG_DO_PATCH(memset, M7memset) + NG_DO_PATCH(__bzero, M7bzero) + NG_DO_PATCH(__clear_user, NGclear_user) + NG_DO_PATCH(tsb_init, NGtsb_init) + retl + nop +ENDPROC(m7_patch_bzero) + +ENTRY(m7_patch_pageops) + NG_DO_PATCH(copy_user_page, NG4copy_user_page) + NG_DO_PATCH(_clear_page, M7clear_page) + NG_DO_PATCH(clear_user_page, M7clear_user_page) + retl + nop +ENDPROC(m7_patch_pageops) diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 37930c0777fe..a1a2d39ec96e 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -38,6 +38,9 @@ lib-$(CONFIG_SPARC64) += NG4patch.o NG4copy_page.o NG4clear_page.o NG4memset.o lib-$(CONFIG_SPARC64) += Memcpy_utils.o +lib-$(CONFIG_SPARC64) += M7memcpy.o M7copy_from_user.o M7copy_to_user.o +lib-$(CONFIG_SPARC64) += M7patch.o M7memset.o + lib-$(CONFIG_SPARC64) += GENmemcpy.o GENcopy_from_user.o GENcopy_to_user.o lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o -- cgit v1.2.3 From 34060b8fffa76ded52d9e115d6b759b0456114ee Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Mon, 7 Aug 2017 17:52:52 -0600 Subject: arch/sparc: Add accurate exception reporting in M7memcpy Add accurate exception reporting in M7memcpy Signed-off-by: Babu Moger Signed-off-by: David S. Miller --- arch/sparc/lib/M7copy_from_user.S | 11 +- arch/sparc/lib/M7copy_to_user.S | 10 +- arch/sparc/lib/M7memcpy.S | 396 +++++++++++++++++++------------------- arch/sparc/lib/Memcpy_utils.S | 182 ++++++++++++++++++ 4 files changed, 390 insertions(+), 209 deletions(-) diff --git a/arch/sparc/lib/M7copy_from_user.S b/arch/sparc/lib/M7copy_from_user.S index d0689d762726..66464b3e3649 100644 --- a/arch/sparc/lib/M7copy_from_user.S +++ b/arch/sparc/lib/M7copy_from_user.S @@ -5,23 +5,22 @@ */ -#define EX_LD(x) \ +#define EX_LD(x, y) \ 98: x; \ .section __ex_table,"a"; \ .align 4; \ - .word 98b, __restore_asi; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_LD_FP(x) \ +#define EX_LD_FP(x, y) \ 98: x; \ .section __ex_table,"a"; \ .align 4; \ - .word 98b, __restore_asi_fp; \ + .word 98b, y##_fp; \ .text; \ .align 4; - #ifndef ASI_AIUS #define ASI_AIUS 0x11 #endif @@ -35,7 +34,7 @@ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ bne,pn %icc, raw_copy_in_user; \ - nop + nop #endif #include "M7memcpy.S" diff --git a/arch/sparc/lib/M7copy_to_user.S b/arch/sparc/lib/M7copy_to_user.S index d3be1327d7ce..a60ac467f808 100644 --- a/arch/sparc/lib/M7copy_to_user.S +++ b/arch/sparc/lib/M7copy_to_user.S @@ -5,19 +5,19 @@ */ -#define EX_ST(x) \ +#define EX_ST(x, y) \ 98: x; \ .section __ex_table,"a"; \ .align 4; \ - .word 98b, __restore_asi; \ + .word 98b, y; \ .text; \ .align 4; -#define EX_ST_FP(x) \ +#define EX_ST_FP(x, y) \ 98: x; \ .section __ex_table,"a"; \ .align 4; \ - .word 98b, __restore_asi_fp; \ + .word 98b, y##_fp; \ .text; \ .align 4; @@ -45,7 +45,7 @@ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ bne,pn %icc, raw_copy_in_user; \ - nop + nop #endif #include "M7memcpy.S" diff --git a/arch/sparc/lib/M7memcpy.S b/arch/sparc/lib/M7memcpy.S index 2c88175d3abf..cbd42ea7c3f7 100644 --- a/arch/sparc/lib/M7memcpy.S +++ b/arch/sparc/lib/M7memcpy.S @@ -96,17 +96,17 @@ #endif #ifndef EX_LD -#define EX_LD(x) x +#define EX_LD(x,y) x #endif #ifndef EX_LD_FP -#define EX_LD_FP(x) x +#define EX_LD_FP(x,y) x #endif #ifndef EX_ST -#define EX_ST(x) x +#define EX_ST(x,y) x #endif #ifndef EX_ST_FP -#define EX_ST_FP(x) x +#define EX_ST_FP(x,y) x #endif #ifndef EX_RETVAL @@ -206,9 +206,9 @@ FUNC_NAME: sub %o1, %o0, %o1 ! %o1 gets the difference 7: ! dst aligning loop add %o1, %o0, %o4 - EX_LD(LOAD(ldub, %o4, %o4)) ! load one byte + EX_LD(LOAD(ldub, %o4, %o4), memcpy_retl_o2_plus_o5) ! load one byte subcc %o5, 1, %o5 - EX_ST(STORE(stb, %o4, %o0)) + EX_ST(STORE(stb, %o4, %o0), memcpy_retl_o2_plus_o5_plus_1) bgu,pt %xcc, 7b add %o0, 1, %o0 ! advance dst add %o1, %o0, %o1 ! restore %o1 @@ -233,64 +233,64 @@ FUNC_NAME: ble,pn %xcc, .Lmedl63 ! skip big loop if less than 64 bytes nop .Lmedl64: - EX_LD(LOAD(ldx, %o1, %o4)) ! load + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_63) ! load subcc %o2, 64, %o2 ! decrement length count - EX_ST(STORE(stx, %o4, %o0)) ! and store - EX_LD(LOAD(ldx, %o1+8, %o3)) ! a block of 64 bytes - EX_ST(STORE(stx, %o3, %o0+8)) - EX_LD(LOAD(ldx, %o1+16, %o4)) - EX_ST(STORE(stx, %o4, %o0+16)) - EX_LD(LOAD(ldx, %o1+24, %o3)) - EX_ST(STORE(stx, %o3, %o0+24)) - EX_LD(LOAD(ldx, %o1+32, %o4)) ! load - EX_ST(STORE(stx, %o4, %o0+32)) ! and store - EX_LD(LOAD(ldx, %o1+40, %o3)) ! a block of 64 bytes + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_63_64) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_63_56) ! a block of 64 + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_63_56) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_63_48) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_63_48) + EX_LD(LOAD(ldx, %o1+24, %o3), memcpy_retl_o2_plus_63_40) + EX_ST(STORE(stx, %o3, %o0+24), memcpy_retl_o2_plus_63_40) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_63_32)! load and store + EX_ST(STORE(stx, %o4, %o0+32), memcpy_retl_o2_plus_63_32) + EX_LD(LOAD(ldx, %o1+40, %o3), memcpy_retl_o2_plus_63_24)! a block of 64 add %o1, 64, %o1 ! increase src ptr by 64 - EX_ST(STORE(stx, %o3, %o0+40)) - EX_LD(LOAD(ldx, %o1-16, %o4)) + EX_ST(STORE(stx, %o3, %o0+40), memcpy_retl_o2_plus_63_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_63_16) add %o0, 64, %o0 ! increase dst ptr by 64 - EX_ST(STORE(stx, %o4, %o0-16)) - EX_LD(LOAD(ldx, %o1-8, %o3)) + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_63_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_63_8) bgu,pt %xcc, .Lmedl64 ! repeat if at least 64 bytes left - EX_ST(STORE(stx, %o3, %o0-8)) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_63_8) .Lmedl63: addcc %o2, 32, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl31 ! to skip if 31 or fewer bytes left nop - EX_LD(LOAD(ldx, %o1, %o4)) ! load + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_31) ! load sub %o2, 32, %o2 ! decrement length count - EX_ST(STORE(stx, %o4, %o0)) ! and store - EX_LD(LOAD(ldx, %o1+8, %o3)) ! a block of 32 bytes + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_31_32) ! and store + EX_LD(LOAD(ldx, %o1+8, %o3), memcpy_retl_o2_plus_31_24) ! a block of 32 add %o1, 32, %o1 ! increase src ptr by 32 - EX_ST(STORE(stx, %o3, %o0+8)) - EX_LD(LOAD(ldx, %o1-16, %o4)) + EX_ST(STORE(stx, %o3, %o0+8), memcpy_retl_o2_plus_31_24) + EX_LD(LOAD(ldx, %o1-16, %o4), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 - EX_ST(STORE(stx, %o4, %o0-16)) - EX_LD(LOAD(ldx, %o1-8, %o3)) - EX_ST(STORE(stx, %o3, %o0-8)) + EX_ST(STORE(stx, %o4, %o0-16), memcpy_retl_o2_plus_31_16) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_31_8) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedl31: addcc %o2, 16, %o2 ! adjust remaining count ble,pt %xcc, .Lmedl15 ! skip if 15 or fewer bytes left nop ! - EX_LD(LOAD(ldx, %o1, %o4)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_15) add %o1, 16, %o1 ! increase src ptr by 16 - EX_ST(STORE(stx, %o4, %o0)) + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_15) sub %o2, 16, %o2 ! decrease count by 16 - EX_LD(LOAD(ldx, %o1-8, %o3)) + EX_LD(LOAD(ldx, %o1-8, %o3), memcpy_retl_o2_plus_15_8) add %o0, 16, %o0 ! increase dst ptr by 16 - EX_ST(STORE(stx, %o3, %o0-8)) + EX_ST(STORE(stx, %o3, %o0-8), memcpy_retl_o2_plus_15_8) .Lmedl15: addcc %o2, 15, %o2 ! restore count bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pt %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 - EX_LD(LOAD(ldx, %o1, %o4)) ! load 8 bytes + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) ! load 8 bytes add %o1, 8, %o1 ! increase src ptr by 8 add %o0, 8, %o0 ! increase dst ptr by 8 subcc %o2, 8, %o2 ! decrease count by 8 bnz,pn %xcc, .Lmedw7 - EX_ST(STORE(stx, %o4, %o0-8)) ! and store 8 bytes + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) ! and store 8 retl mov EX_RETVAL(%g1), %o0 ! restore %o0 @@ -318,30 +318,30 @@ FUNC_NAME: ! for end of loop ble,pt %xcc, .Lmedw31 ! skip big loop if less than 16 .Lmedw32: - EX_LD(LOAD(ld, %o1, %o4)) ! move a block of 32 bytes + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_31)! move a block of 32 sllx %o4, 32, %o5 - EX_LD(LOAD(ld, %o1+4, %o4)) + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_31) or %o4, %o5, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_31) subcc %o2, 32, %o2 ! decrement length count - EX_LD(LOAD(ld, %o1+8, %o4)) + EX_LD(LOAD(ld, %o1+8, %o4), memcpy_retl_o2_plus_31_24) sllx %o4, 32, %o5 - EX_LD(LOAD(ld, %o1+12, %o4)) + EX_LD(LOAD(ld, %o1+12, %o4), memcpy_retl_o2_plus_31_24) or %o4, %o5, %o5 - EX_ST(STORE(stx, %o5, %o0+8)) + EX_ST(STORE(stx, %o5, %o0+8), memcpy_retl_o2_plus_31_24) add %o1, 32, %o1 ! increase src ptr by 32 - EX_LD(LOAD(ld, %o1-16, %o4)) + EX_LD(LOAD(ld, %o1-16, %o4), memcpy_retl_o2_plus_31_16) sllx %o4, 32, %o5 - EX_LD(LOAD(ld, %o1-12, %o4)) + EX_LD(LOAD(ld, %o1-12, %o4), memcpy_retl_o2_plus_31_16) or %o4, %o5, %o5 - EX_ST(STORE(stx, %o5, %o0+16)) + EX_ST(STORE(stx, %o5, %o0+16), memcpy_retl_o2_plus_31_16) add %o0, 32, %o0 ! increase dst ptr by 32 - EX_LD(LOAD(ld, %o1-8, %o4)) + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_31_8) sllx %o4, 32, %o5 - EX_LD(LOAD(ld, %o1-4, %o4)) + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_31_8) or %o4, %o5, %o5 bgu,pt %xcc, .Lmedw32 ! repeat if at least 32 bytes left - EX_ST(STORE(stx, %o5, %o0-8)) + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_31_8) .Lmedw31: addcc %o2, 31, %o2 ! restore count @@ -350,42 +350,42 @@ FUNC_NAME: cmp %o2, 16 blt,pt %xcc, .Lmedw15 nop - EX_LD(LOAD(ld, %o1, %o4)) ! move a block of 16 bytes + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2)! move a block of 16 bytes sllx %o4, 32, %o5 subcc %o2, 16, %o2 ! decrement length count - EX_LD(LOAD(ld, %o1+4, %o4)) + EX_LD(LOAD(ld, %o1+4, %o4), memcpy_retl_o2_plus_16) or %o4, %o5, %o5 - EX_ST(STORE(stx, %o5, %o0)) + EX_ST(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increase src ptr by 16 - EX_LD(LOAD(ld, %o1-8, %o4)) + EX_LD(LOAD(ld, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increase dst ptr by 16 sllx %o4, 32, %o5 - EX_LD(LOAD(ld, %o1-4, %o4)) + EX_LD(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_8) or %o4, %o5, %o5 - EX_ST(STORE(stx, %o5, %o0-8)) + EX_ST(STORE(stx, %o5, %o0-8), memcpy_retl_o2_plus_8) .Lmedw15: bz,pt %xcc, .Lsmallx ! exit if finished cmp %o2, 8 blt,pn %xcc, .Lmedw7 ! skip if 7 or fewer bytes left tst %o2 - EX_LD(LOAD(ld, %o1, %o4)) ! load 4 bytes + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes subcc %o2, 8, %o2 ! decrease count by 8 - EX_ST(STORE(stw, %o4, %o0)) ! and store 4 bytes + EX_ST(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_8)! and store 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 - EX_LD(LOAD(ld, %o1-4, %o3)) ! load 4 bytes + EX_LD(LOAD(ld, %o1-4, %o3), memcpy_retl_o2_plus_4) ! load 4 bytes add %o0, 8, %o0 ! increase dst ptr by 8 - EX_ST(STORE(stw, %o3, %o0-4)) ! and store 4 bytes + EX_ST(STORE(stw, %o3, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes bz,pt %xcc, .Lsmallx ! exit if finished .Lmedw7: ! count is ge 1, less than 8 cmp %o2, 4 ! check for 4 bytes left blt,pn %xcc, .Lsmallleft3 ! skip if 3 or fewer bytes left nop ! - EX_LD(LOAD(ld, %o1, %o4)) ! load 4 bytes + EX_LD(LOAD(ld, %o1, %o4), memcpy_retl_o2) ! load 4 bytes add %o1, 4, %o1 ! increase src ptr by 4 add %o0, 4, %o0 ! increase dst ptr by 4 subcc %o2, 4, %o2 ! decrease count by 4 bnz .Lsmallleft3 - EX_ST(STORE(stw, %o4, %o0-4))! and store 4 bytes + EX_ST(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_4)! and store 4 bytes retl mov EX_RETVAL(%g1), %o0 @@ -397,37 +397,37 @@ FUNC_NAME: andcc %o0, 8, %o3 ! odd long words to move? brz,pt %o3, .Laligned_to_16 nop - EX_LD(LOAD(ldx, %o1, %o4)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 8, %o2 add %o1, 8, %o1 ! increment src ptr add %o0, 8, %o0 ! increment dst ptr - EX_ST(STORE(stx, %o4, %o0-8)) + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_16: andcc %o0, 16, %o3 ! pair of long words to move? brz,pt %o3, .Laligned_to_32 nop - EX_LD(LOAD(ldx, %o1, %o4)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 16, %o2 - EX_ST(STORE(stx, %o4, %o0)) + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_16) add %o1, 16, %o1 ! increment src ptr - EX_LD(LOAD(ldx, %o1-8, %o4)) + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 16, %o0 ! increment dst ptr - EX_ST(STORE(stx, %o4, %o0-8)) + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_32: andcc %o0, 32, %o3 ! four long words to move? brz,pt %o3, .Laligned_to_64 nop - EX_LD(LOAD(ldx, %o1, %o4)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2) sub %o2, 32, %o2 - EX_ST(STORE(stx, %o4, %o0)) - EX_LD(LOAD(ldx, %o1+8, %o4)) - EX_ST(STORE(stx, %o4, %o0+8)) - EX_LD(LOAD(ldx, %o1+16, %o4)) - EX_ST(STORE(stx, %o4, %o0+16)) + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_32) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_24) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_24) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_16) + EX_ST(STORE(stx, %o4, %o0+16), memcpy_retl_o2_plus_16) add %o1, 32, %o1 ! increment src ptr - EX_LD(LOAD(ldx, %o1-8, %o4)) + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_8) add %o0, 32, %o0 ! increment dst ptr - EX_ST(STORE(stx, %o4, %o0-8)) + EX_ST(STORE(stx, %o4, %o0-8), memcpy_retl_o2_plus_8) .Laligned_to_64: ! ! Using block init store (BIS) instructions to avoid fetching cache @@ -461,10 +461,10 @@ FUNC_NAME: .Lalign_loop_start: prefetch [%o1 + (ALIGN_PRE * BLOCK_SIZE)], 21 subcc %o3, 1, %o3 - EX_LD(LOAD(ldx, %o1, %o4)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) bgu %xcc,.Lalign_loop_start add %o0, 56, %o0 @@ -474,32 +474,32 @@ FUNC_NAME: sub %o0, %o4, %o0 ! reset %o0 .Lalign_loop_rest: - EX_LD(LOAD(ldx, %o1+8, %o4)) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) add %o0, 16, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) - EX_LD(LOAD(ldx, %o1+16, %o4)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) subcc %o3, 1, %o3 - EX_LD(LOAD(ldx, %o1+24, %o4)) + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) - EX_LD(LOAD(ldx, %o1+32, %o4)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) - EX_LD(LOAD(ldx, %o1+40, %o4)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) - EX_LD(LOAD(ldx, %o1+48, %o4)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5) add %o1, 64, %o1 add %o0, 8, %o0 - EX_ST(STORE_INIT_MRU(%o4, %o0)) + EX_ST(STORE_INIT_MRU(%o4, %o0), memcpy_retl_o2_plus_o5) add %o0, 8, %o0 - EX_LD(LOAD(ldx, %o1-8, %o4)) + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5) sub %o5, 64, %o5 bgu %xcc,.Lalign_loop_rest ! mark cache line as LRU - EX_ST(STORE_INIT(%o4, %o0)) + EX_ST(STORE_INIT(%o4, %o0), memcpy_retl_o2_plus_o5_plus_64) cmp %o5, ST_CHUNK*64 bgu,pt %xcc, .Lalign_loop_start @@ -509,26 +509,26 @@ FUNC_NAME: beq .Lalign_done nop .Lalign_loop_fin: - EX_LD(LOAD(ldx, %o1, %o4)) - EX_ST(STORE(stx, %o4, %o0+8)) - EX_LD(LOAD(ldx, %o1+8, %o4)) - EX_ST(STORE(stx, %o4, %o0+8+8)) - EX_LD(LOAD(ldx, %o1+16, %o4)) - EX_ST(STORE(stx, %o4, %o0+8+16)) + EX_LD(LOAD(ldx, %o1, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+8, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+8), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1+16, %o4), memcpy_retl_o2_plus_o5) + EX_ST(STORE(stx, %o4, %o0+8+16), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 - EX_LD(LOAD(ldx, %o1+24, %o4)) - EX_ST(STORE(stx, %o4, %o0+8+24)) - EX_LD(LOAD(ldx, %o1+32, %o4)) - EX_ST(STORE(stx, %o4, %o0+8+32)) - EX_LD(LOAD(ldx, %o1+40, %o4)) - EX_ST(STORE(stx, %o4, %o0+8+40)) - EX_LD(LOAD(ldx, %o1+48, %o4)) + EX_LD(LOAD(ldx, %o1+24, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+24), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+32, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+32), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+40, %o4), memcpy_retl_o2_plus_o5_64) + EX_ST(STORE(stx, %o4, %o0+8+40), memcpy_retl_o2_plus_o5_64) + EX_LD(LOAD(ldx, %o1+48, %o4), memcpy_retl_o2_plus_o5_64) add %o1, 64, %o1 - EX_ST(STORE(stx, %o4, %o0+8+48)) + EX_ST(STORE(stx, %o4, %o0+8+48), memcpy_retl_o2_plus_o5_64) add %o0, 64, %o0 - EX_LD(LOAD(ldx, %o1-8, %o4)) + EX_LD(LOAD(ldx, %o1-8, %o4), memcpy_retl_o2_plus_o5_64) bgu %xcc,.Lalign_loop_fin - EX_ST(STORE(stx, %o4, %o0)) + EX_ST(STORE(stx, %o4, %o0), memcpy_retl_o2_plus_o5_64) .Lalign_done: add %o0, 8, %o0 ! restore %o0 from ASI alignment @@ -563,34 +563,34 @@ FUNC_NAME: .Lunalign_adjust: alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks - EX_LD_FP(LOAD(ldd, %o4, %f0)) + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5) .Lunalign_loop: - EX_LD_FP(LOAD(ldd, %o4+8, %f2)) + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) faligndata %f0, %f2, %f16 - EX_LD_FP(LOAD(ldd, %o4+16, %f4)) + EX_LD_FP(LOAD(ldd, %o4+16, %f4), memcpy_retl_o2_plus_o5) subcc %o5, BLOCK_SIZE, %o5 - EX_ST_FP(STORE(std, %f16, %o0)) + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5_plus_64) faligndata %f2, %f4, %f18 - EX_LD_FP(LOAD(ldd, %o4+24, %f6)) - EX_ST_FP(STORE(std, %f18, %o0+8)) + EX_LD_FP(LOAD(ldd, %o4+24, %f6), memcpy_retl_o2_plus_o5_plus_56) + EX_ST_FP(STORE(std, %f18, %o0+8), memcpy_retl_o2_plus_o5_plus_56) faligndata %f4, %f6, %f20 - EX_LD_FP(LOAD(ldd, %o4+32, %f8)) - EX_ST_FP(STORE(std, %f20, %o0+16)) + EX_LD_FP(LOAD(ldd, %o4+32, %f8), memcpy_retl_o2_plus_o5_plus_48) + EX_ST_FP(STORE(std, %f20, %o0+16), memcpy_retl_o2_plus_o5_plus_48) faligndata %f6, %f8, %f22 - EX_LD_FP(LOAD(ldd, %o4+40, %f10)) - EX_ST_FP(STORE(std, %f22, %o0+24)) + EX_LD_FP(LOAD(ldd, %o4+40, %f10), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f22, %o0+24), memcpy_retl_o2_plus_o5_plus_40) faligndata %f8, %f10, %f24 - EX_LD_FP(LOAD(ldd, %o4+48, %f12)) - EX_ST_FP(STORE(std, %f24, %o0+32)) + EX_LD_FP(LOAD(ldd, %o4+48, %f12), memcpy_retl_o2_plus_o5_plus_32) + EX_ST_FP(STORE(std, %f24, %o0+32), memcpy_retl_o2_plus_o5_plus_32) faligndata %f10, %f12, %f26 - EX_LD_FP(LOAD(ldd, %o4+56, %f14)) + EX_LD_FP(LOAD(ldd, %o4+56, %f14), memcpy_retl_o2_plus_o5_plus_24) add %o4, BLOCK_SIZE, %o4 - EX_ST_FP(STORE(std, %f26, %o0+40)) + EX_ST_FP(STORE(std, %f26, %o0+40), memcpy_retl_o2_plus_o5_plus_24) faligndata %f12, %f14, %f28 - EX_LD_FP(LOAD(ldd, %o4, %f0)) - EX_ST_FP(STORE(std, %f28, %o0+48)) + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5_plus_16) + EX_ST_FP(STORE(std, %f28, %o0+48), memcpy_retl_o2_plus_o5_plus_16) faligndata %f14, %f0, %f30 - EX_ST_FP(STORE(std, %f30, %o0+56)) + EX_ST_FP(STORE(std, %f30, %o0+56), memcpy_retl_o2_plus_o5_plus_8) add %o0, BLOCK_SIZE, %o0 bgu,pt %xcc, .Lunalign_loop prefetch [%o4 + (5 * BLOCK_SIZE)], 20 @@ -612,27 +612,27 @@ FUNC_NAME: nop ! Src is word aligned .Lunalignword: - EX_LD_FP(LOAD(ld, %o1, %o4)) ! load 4 bytes + EX_LD_FP(LOAD(ld, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 4 bytes add %o1, 8, %o1 ! increase src ptr by 8 - EX_ST_FP(STORE(stw, %o4, %o0)) ! and store 4 bytes + EX_ST_FP(STORE(stw, %o4, %o0), memcpy_retl_o2_plus_o3) ! and store 4 subcc %o3, 8, %o3 ! decrease count by 8 - EX_LD_FP(LOAD(ld, %o1-4, %o4)) ! load 4 bytes + EX_LD_FP(LOAD(ld, %o1-4, %o4), memcpy_retl_o2_plus_o3_plus_4)! load 4 add %o0, 8, %o0 ! increase dst ptr by 8 bnz %xcc, .Lunalignword - EX_ST_FP(STORE(stw, %o4, %o0-4))! and store 4 bytes + EX_ST_FP(STORE(stw, %o4, %o0-4), memcpy_retl_o2_plus_o3_plus_4) ba .Lunalignsrc nop ! Src is half-word aligned .Lunalignhalf: - EX_LD_FP(LOAD(lduh, %o1, %o4)) ! load 2 bytes + EX_LD_FP(LOAD(lduh, %o1, %o4), memcpy_retl_o2_plus_o3) ! load 2 bytes sllx %o4, 32, %o5 ! shift left - EX_LD_FP(LOAD(lduw, %o1+2, %o4)) + EX_LD_FP(LOAD(lduw, %o1+2, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 sllx %o5, 16, %o5 - EX_LD_FP(LOAD(lduh, %o1+6, %o4)) + EX_LD_FP(LOAD(lduh, %o1+6, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 - EX_ST_FP(STORE(stx, %o5, %o0)) + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) add %o1, 8, %o1 subcc %o3, 8, %o3 bnz %xcc, .Lunalignhalf @@ -644,21 +644,21 @@ FUNC_NAME: .Lunalignbyte: sub %o0, %o1, %o0 ! share pointer advance .Lunalignbyte_loop: - EX_LD_FP(LOAD(ldub, %o1, %o4)) + EX_LD_FP(LOAD(ldub, %o1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 56, %o5 - EX_LD_FP(LOAD(lduh, %o1+1, %o4)) + EX_LD_FP(LOAD(lduh, %o1+1, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 40, %o4 or %o4, %o5, %o5 - EX_LD_FP(LOAD(lduh, %o1+3, %o4)) + EX_LD_FP(LOAD(lduh, %o1+3, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 24, %o4 or %o4, %o5, %o5 - EX_LD_FP(LOAD(lduh, %o1+5, %o4)) + EX_LD_FP(LOAD(lduh, %o1+5, %o4), memcpy_retl_o2_plus_o3) sllx %o4, 8, %o4 or %o4, %o5, %o5 - EX_LD_FP(LOAD(ldub, %o1+7, %o4)) + EX_LD_FP(LOAD(ldub, %o1+7, %o4), memcpy_retl_o2_plus_o3) or %o4, %o5, %o5 add %o0, %o1, %o0 - EX_ST_FP(STORE(stx, %o5, %o0)) + EX_ST_FP(STORE(stx, %o5, %o0), memcpy_retl_o2_plus_o3) sub %o0, %o1, %o0 subcc %o3, 8, %o3 bnz %xcc, .Lunalignbyte_loop @@ -676,36 +676,36 @@ FUNC_NAME: alignaddr %o1, %g0, %g0 ! generate %gsr add %o1, %o5, %o1 ! advance %o1 to after blocks - EX_LD_FP(LOAD(ldd, %o4, %f14)) + EX_LD_FP(LOAD(ldd, %o4, %f14), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 .Lunalign_sloop: - EX_LD_FP(LOAD(ldd, %o4, %f16)) + EX_LD_FP(LOAD(ldd, %o4, %f16), memcpy_retl_o2_plus_o5) faligndata %f14, %f16, %f0 - EX_LD_FP(LOAD(ldd, %o4+8, %f18)) + EX_LD_FP(LOAD(ldd, %o4+8, %f18), memcpy_retl_o2_plus_o5) faligndata %f16, %f18, %f2 - EX_LD_FP(LOAD(ldd, %o4+16, %f20)) + EX_LD_FP(LOAD(ldd, %o4+16, %f20), memcpy_retl_o2_plus_o5) faligndata %f18, %f20, %f4 - EX_ST_FP(STORE(std, %f0, %o0)) + EX_ST_FP(STORE(std, %f0, %o0), memcpy_retl_o2_plus_o5) subcc %o5, 64, %o5 - EX_LD_FP(LOAD(ldd, %o4+24, %f22)) + EX_LD_FP(LOAD(ldd, %o4+24, %f22), memcpy_retl_o2_plus_o5_plus_56) faligndata %f20, %f22, %f6 - EX_ST_FP(STORE(std, %f2, %o0+8)) - EX_LD_FP(LOAD(ldd, %o4+32, %f24)) + EX_ST_FP(STORE(std, %f2, %o0+8), memcpy_retl_o2_plus_o5_plus_56) + EX_LD_FP(LOAD(ldd, %o4+32, %f24), memcpy_retl_o2_plus_o5_plus_48) faligndata %f22, %f24, %f8 - EX_ST_FP(STORE(std, %f4, %o0+16)) - EX_LD_FP(LOAD(ldd, %o4+40, %f26)) + EX_ST_FP(STORE(std, %f4, %o0+16), memcpy_retl_o2_plus_o5_plus_48) + EX_LD_FP(LOAD(ldd, %o4+40, %f26), memcpy_retl_o2_plus_o5_plus_40) faligndata %f24, %f26, %f10 - EX_ST_FP(STORE(std, %f6, %o0+24)) - EX_LD_FP(LOAD(ldd, %o4+48, %f28)) + EX_ST_FP(STORE(std, %f6, %o0+24), memcpy_retl_o2_plus_o5_plus_40) + EX_LD_FP(LOAD(ldd, %o4+48, %f28), memcpy_retl_o2_plus_o5_plus_40) faligndata %f26, %f28, %f12 - EX_ST_FP(STORE(std, %f8, %o0+32)) + EX_ST_FP(STORE(std, %f8, %o0+32), memcpy_retl_o2_plus_o5_plus_40) add %o4, 64, %o4 - EX_LD_FP(LOAD(ldd, %o4-8, %f30)) + EX_LD_FP(LOAD(ldd, %o4-8, %f30), memcpy_retl_o2_plus_o5_plus_40) faligndata %f28, %f30, %f14 - EX_ST_FP(STORE(std, %f10, %o0+40)) - EX_ST_FP(STORE(std, %f12, %o0+48)) + EX_ST_FP(STORE(std, %f10, %o0+40), memcpy_retl_o2_plus_o5_plus_40) + EX_ST_FP(STORE(std, %f12, %o0+48), memcpy_retl_o2_plus_o5_plus_40) add %o0, 64, %o0 - EX_ST_FP(STORE(std, %f14, %o0-8)) + EX_ST_FP(STORE(std, %f14, %o0-8), memcpy_retl_o2_plus_o5_plus_40) fsrc2 %f30, %f14 bgu,pt %xcc, .Lunalign_sloop prefetch [%o4 + (8 * BLOCK_SIZE)], 20 @@ -722,13 +722,13 @@ FUNC_NAME: sub %o5, 8, %o5 ! insure we do not load past end of src andn %o1, 0x7, %o4 ! %o4 has long word aligned src address add %o1, %o5, %o1 ! advance %o1 to after multiple of 8 - EX_LD_FP(LOAD(ldd, %o4, %f0)) ! fetch partial word + EX_LD_FP(LOAD(ldd, %o4, %f0), memcpy_retl_o2_plus_o5)! fetch partialword .Lunalign_by8: - EX_LD_FP(LOAD(ldd, %o4+8, %f2)) + EX_LD_FP(LOAD(ldd, %o4+8, %f2), memcpy_retl_o2_plus_o5) add %o4, 8, %o4 faligndata %f0, %f2, %f16 subcc %o5, 8, %o5 - EX_ST_FP(STORE(std, %f16, %o0)) + EX_ST_FP(STORE(std, %f16, %o0), memcpy_retl_o2_plus_o5) fsrc2 %f2, %f0 bgu,pt %xcc, .Lunalign_by8 add %o0, 8, %o0 @@ -764,37 +764,37 @@ FUNC_NAME: andncc %o2, 0x20 - 1, %o5 be,pn %xcc, 2f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3)) - EX_LD(LOAD(ldx, %o1 + 0x08, %g2)) - EX_LD(LOAD(ldx, %o1 + 0x10, %g7)) - EX_LD(LOAD(ldx, %o1 + 0x18, %o4)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x08, %g2), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x10, %g7), memcpy_retl_o2_plus_o5) + EX_LD(LOAD(ldx, %o1 + 0x18, %o4), memcpy_retl_o2_plus_o5) add %o1, 0x20, %o1 subcc %o5, 0x20, %o5 - EX_ST(STORE(stx, %o3, %o0 + 0x00)) - EX_ST(STORE(stx, %g2, %o0 + 0x08)) - EX_ST(STORE(stx, %g7, %o0 + 0x10)) - EX_ST(STORE(stx, %o4, %o0 + 0x18)) + EX_ST(STORE(stx, %o3, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_32) + EX_ST(STORE(stx, %g2, %o0 + 0x08), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %g7, %o0 + 0x10), memcpy_retl_o2_plus_o5_plus_24) + EX_ST(STORE(stx, %o4, %o0 + 0x18), memcpy_retl_o2_plus_o5_plus_8) bne,pt %xcc, 1b add %o0, 0x20, %o0 2: andcc %o2, 0x18, %o5 be,pt %xcc, 3f sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3)) +1: EX_LD(LOAD(ldx, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 add %o0, 0x08, %o0 subcc %o5, 0x08, %o5 bne,pt %xcc, 1b - EX_ST(STORE(stx, %o3, %o0 - 0x08)) + EX_ST(STORE(stx, %o3, %o0 - 0x08), memcpy_retl_o2_plus_o5_plus_8) 3: brz,pt %o2, .Lexit_cp cmp %o2, 0x04 bl,pn %xcc, .Ltiny_cp nop - EX_LD(LOAD(lduw, %o1 + 0x00, %o3)) + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 0x04, %o1 add %o0, 0x04, %o0 subcc %o2, 0x04, %o2 bne,pn %xcc, .Ltiny_cp - EX_ST(STORE(stw, %o3, %o0 - 0x04)) + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_4) ba,a,pt %xcc, .Lexit_cp .Lmedium_unaligned_cp: @@ -804,12 +804,12 @@ FUNC_NAME: brz,pt %o3, 2f sub %o2, %o3, %o2 -1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %g2), memcpy_retl_o2_plus_g1) add %o1, 1, %o1 subcc %o3, 1, %o3 add %o0, 1, %o0 bne,pt %xcc, 1b - EX_ST(STORE(stb, %g2, %o0 - 0x01)) + EX_ST(STORE(stb, %g2, %o0 - 0x01), memcpy_retl_o2_plus_g1_plus_1) 2: and %o1, 0x7, %o3 brz,pn %o3, .Lmedium_noprefetch_cp @@ -817,17 +817,17 @@ FUNC_NAME: mov 64, %g2 sub %g2, %o3, %g2 andn %o1, 0x7, %o1 - EX_LD(LOAD(ldx, %o1 + 0x00, %o4)) + EX_LD(LOAD(ldx, %o1 + 0x00, %o4), memcpy_retl_o2) sllx %o4, %o3, %o4 andn %o2, 0x08 - 1, %o5 sub %o2, %o5, %o2 -1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3)) +1: EX_LD(LOAD(ldx, %o1 + 0x08, %g3), memcpy_retl_o2_plus_o5) add %o1, 0x08, %o1 subcc %o5, 0x08, %o5 srlx %g3, %g2, %g7 or %g7, %o4, %g7 - EX_ST(STORE(stx, %g7, %o0 + 0x00)) + EX_ST(STORE(stx, %g7, %o0 + 0x00), memcpy_retl_o2_plus_o5_plus_8) add %o0, 0x08, %o0 bne,pt %xcc, 1b sllx %g3, %o3, %o4 @@ -838,17 +838,17 @@ FUNC_NAME: ba,pt %xcc, .Lsmall_unaligned_cp .Ltiny_cp: - EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) + EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp - EX_ST(STORE(stb, %o3, %o0 + 0x00)) - EX_LD(LOAD(ldub, %o1 + 0x01, %o3)) + EX_ST(STORE(stb, %o3, %o0 + 0x00), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x01, %o3), memcpy_retl_o2) subcc %o2, 1, %o2 be,pn %xcc, .Lexit_cp - EX_ST(STORE(stb, %o3, %o0 + 0x01)) - EX_LD(LOAD(ldub, %o1 + 0x02, %o3)) + EX_ST(STORE(stb, %o3, %o0 + 0x01), memcpy_retl_o2_plus_1) + EX_LD(LOAD(ldub, %o1 + 0x02, %o3), memcpy_retl_o2) ba,pt %xcc, .Lexit_cp - EX_ST(STORE(stb, %o3, %o0 + 0x02)) + EX_ST(STORE(stb, %o3, %o0 + 0x02), memcpy_retl_o2) .Lsmall_cp: andcc %g2, 0x3, %g0 @@ -856,23 +856,23 @@ FUNC_NAME: andn %o2, 0x4 - 1, %o5 sub %o2, %o5, %o2 1: - EX_LD(LOAD(lduw, %o1 + 0x00, %o3)) + EX_LD(LOAD(lduw, %o1 + 0x00, %o3), memcpy_retl_o2_plus_o5) add %o1, 0x04, %o1 subcc %o5, 0x04, %o5 add %o0, 0x04, %o0 bne,pt %xcc, 1b - EX_ST(STORE(stw, %o3, %o0 - 0x04)) + EX_ST(STORE(stw, %o3, %o0 - 0x04), memcpy_retl_o2_plus_o5_plus_4) brz,pt %o2, .Lexit_cp nop ba,a,pt %xcc, .Ltiny_cp .Lsmall_unaligned_cp: -1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) +1: EX_LD(LOAD(ldub, %o1 + 0x00, %o3), memcpy_retl_o2) add %o1, 1, %o1 add %o0, 1, %o0 subcc %o2, 1, %o2 bne,pt %xcc, 1b - EX_ST(STORE(stb, %o3, %o0 - 0x01)) + EX_ST(STORE(stb, %o3, %o0 - 0x01), memcpy_retl_o2_plus_1) ba,a,pt %xcc, .Lexit_cp .Lsmallrest: @@ -883,31 +883,31 @@ FUNC_NAME: nop sub %o2, 3, %o2 .Lsmallnotalign4: - EX_LD(LOAD(ldub, %o1, %o3))! read byte + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_3)! read byte subcc %o2, 4, %o2 ! reduce count by 4 - EX_ST(STORE(stb, %o3, %o0)) ! write byte - EX_LD(LOAD(ldub, %o1+1, %o3))! repeat for total of 4 bytes + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_7)! write byte & repeat + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2_plus_6)! for total of 4 add %o1, 4, %o1 ! advance SRC by 4 - EX_ST(STORE(stb, %o3, %o0+1)) - EX_LD(LOAD(ldub, %o1-2, %o3)) + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_6) + EX_LD(LOAD(ldub, %o1-2, %o3), memcpy_retl_o2_plus_5) add %o0, 4, %o0 ! advance DST by 4 - EX_ST(STORE(stb, %o3, %o0-2)) - EX_LD(LOAD(ldub, %o1-1, %o3)) + EX_ST(STORE(stb, %o3, %o0-2), memcpy_retl_o2_plus_5) + EX_LD(LOAD(ldub, %o1-1, %o3), memcpy_retl_o2_plus_4) bgu,pt %xcc, .Lsmallnotalign4 ! loop til 3 or fewer bytes remain - EX_ST(STORE(stb, %o3, %o0-1)) + EX_ST(STORE(stb, %o3, %o0-1), memcpy_retl_o2_plus_4) addcc %o2, 3, %o2 ! restore count bz,pt %xcc, .Lsmallx .Lsmallleft3: ! 1, 2, or 3 bytes remain subcc %o2, 1, %o2 - EX_LD(LOAD(ldub, %o1, %o3)) ! load one byte + EX_LD(LOAD(ldub, %o1, %o3), memcpy_retl_o2_plus_1) ! load one byte bz,pt %xcc, .Lsmallx - EX_ST(STORE(stb, %o3, %o0)) ! store one byte - EX_LD(LOAD(ldub, %o1+1, %o3)) ! load second byte + EX_ST(STORE(stb, %o3, %o0), memcpy_retl_o2_plus_1) ! store one byte + EX_LD(LOAD(ldub, %o1+1, %o3), memcpy_retl_o2) ! load second byte subcc %o2, 1, %o2 bz,pt %xcc, .Lsmallx - EX_ST(STORE(stb, %o3, %o0+1))! store second byte - EX_LD(LOAD(ldub, %o1+2, %o3)) ! load third byte - EX_ST(STORE(stb, %o3, %o0+2)) ! store third byte + EX_ST(STORE(stb, %o3, %o0+1), memcpy_retl_o2_plus_1)! store second byte + EX_LD(LOAD(ldub, %o1+2, %o3), memcpy_retl_o2) ! load third byte + EX_ST(STORE(stb, %o3, %o0+2), memcpy_retl_o2) ! store third byte .Lsmallx: retl mov EX_RETVAL(%g1), %o0 diff --git a/arch/sparc/lib/Memcpy_utils.S b/arch/sparc/lib/Memcpy_utils.S index bcc5d77b2027..64fbac28b3db 100644 --- a/arch/sparc/lib/Memcpy_utils.S +++ b/arch/sparc/lib/Memcpy_utils.S @@ -24,14 +24,128 @@ ENTRY(memcpy_retl_o2_plus_1) ba,pt %xcc, __restore_asi add %o2, 1, %o0 ENDPROC(memcpy_retl_o2_plus_1) +ENTRY(memcpy_retl_o2_plus_3) + ba,pt %xcc, __restore_asi + add %o2, 3, %o0 +ENDPROC(memcpy_retl_o2_plus_3) ENTRY(memcpy_retl_o2_plus_4) ba,pt %xcc, __restore_asi add %o2, 4, %o0 ENDPROC(memcpy_retl_o2_plus_4) +ENTRY(memcpy_retl_o2_plus_5) + ba,pt %xcc, __restore_asi + add %o2, 5, %o0 +ENDPROC(memcpy_retl_o2_plus_5) +ENTRY(memcpy_retl_o2_plus_6) + ba,pt %xcc, __restore_asi + add %o2, 6, %o0 +ENDPROC(memcpy_retl_o2_plus_6) +ENTRY(memcpy_retl_o2_plus_7) + ba,pt %xcc, __restore_asi + add %o2, 7, %o0 +ENDPROC(memcpy_retl_o2_plus_7) +ENTRY(memcpy_retl_o2_plus_8) + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_8) +ENTRY(memcpy_retl_o2_plus_15) + ba,pt %xcc, __restore_asi + add %o2, 15, %o0 +ENDPROC(memcpy_retl_o2_plus_15) +ENTRY(memcpy_retl_o2_plus_15_8) + add %o2, 15, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_15_8) +ENTRY(memcpy_retl_o2_plus_16) + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_16) +ENTRY(memcpy_retl_o2_plus_24) + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_24) +ENTRY(memcpy_retl_o2_plus_31) + ba,pt %xcc, __restore_asi + add %o2, 31, %o0 +ENDPROC(memcpy_retl_o2_plus_31) +ENTRY(memcpy_retl_o2_plus_32) + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_32) +ENTRY(memcpy_retl_o2_plus_31_32) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_31_32) +ENTRY(memcpy_retl_o2_plus_31_24) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_31_24) +ENTRY(memcpy_retl_o2_plus_31_16) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_31_16) +ENTRY(memcpy_retl_o2_plus_31_8) + add %o2, 31, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_31_8) +ENTRY(memcpy_retl_o2_plus_63) + ba,pt %xcc, __restore_asi + add %o2, 63, %o0 +ENDPROC(memcpy_retl_o2_plus_63) +ENTRY(memcpy_retl_o2_plus_63_64) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 64, %o0 +ENDPROC(memcpy_retl_o2_plus_63_64) +ENTRY(memcpy_retl_o2_plus_63_56) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 56, %o0 +ENDPROC(memcpy_retl_o2_plus_63_56) +ENTRY(memcpy_retl_o2_plus_63_48) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 48, %o0 +ENDPROC(memcpy_retl_o2_plus_63_48) +ENTRY(memcpy_retl_o2_plus_63_40) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 40, %o0 +ENDPROC(memcpy_retl_o2_plus_63_40) +ENTRY(memcpy_retl_o2_plus_63_32) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 32, %o0 +ENDPROC(memcpy_retl_o2_plus_63_32) +ENTRY(memcpy_retl_o2_plus_63_24) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 24, %o0 +ENDPROC(memcpy_retl_o2_plus_63_24) +ENTRY(memcpy_retl_o2_plus_63_16) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 16, %o0 +ENDPROC(memcpy_retl_o2_plus_63_16) +ENTRY(memcpy_retl_o2_plus_63_8) + add %o2, 63, %o2 + ba,pt %xcc, __restore_asi + add %o2, 8, %o0 +ENDPROC(memcpy_retl_o2_plus_63_8) ENTRY(memcpy_retl_o2_plus_o5) ba,pt %xcc, __restore_asi add %o2, %o5, %o0 ENDPROC(memcpy_retl_o2_plus_o5) +ENTRY(memcpy_retl_o2_plus_o5_plus_1) + add %o5, 1, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_1) ENTRY(memcpy_retl_o2_plus_o5_plus_4) add %o5, 4, %o5 ba,pt %xcc, __restore_asi @@ -57,6 +171,11 @@ ENTRY(memcpy_retl_o2_plus_o5_plus_32) ba,pt %xcc, __restore_asi add %o2, %o5, %o0 ENDPROC(memcpy_retl_o2_plus_o5_plus_32) +ENTRY(memcpy_retl_o2_plus_o5_64) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_64) ENTRY(memcpy_retl_o2_plus_g1) ba,pt %xcc, __restore_asi add %o2, %g1, %o0 @@ -115,6 +234,25 @@ ENTRY(memcpy_retl_o2_plus_o4_plus_64) ba,pt %xcc, __restore_asi add %o2, %o4, %o0 ENDPROC(memcpy_retl_o2_plus_o4_plus_64) +ENTRY(memcpy_retl_o2_plus_o5_plus_64) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64) +ENTRY(memcpy_retl_o2_plus_o3_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_1_fp) + add %o3, 1, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_1_fp) +ENTRY(memcpy_retl_o2_plus_o3_plus_4_fp) + add %o3, 4, %o3 + ba,pt %xcc, __restore_asi_fp + add %o2, %o3, %o0 +ENDPROC(memcpy_retl_o2_plus_o3_plus_4_fp) ENTRY(memcpy_retl_o2_plus_o4_fp) ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 @@ -159,5 +297,49 @@ ENTRY(memcpy_retl_o2_plus_o4_plus_64_fp) ba,pt %xcc, __restore_asi_fp add %o2, %o4, %o0 ENDPROC(memcpy_retl_o2_plus_o4_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_fp) + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_64_fp) + add %o5, 64, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_64_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_56_fp) + add %o5, 56, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_56_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_48_fp) + add %o5, 48, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_48_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_40_fp) + add %o5, 40, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_40_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_32_fp) + add %o5, 32, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_32_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_24_fp) + add %o5, 24, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_24_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_16_fp) + add %o5, 16, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_16_fp) +ENTRY(memcpy_retl_o2_plus_o5_plus_8_fp) + add %o5, 8, %o5 + ba,pt %xcc, __restore_asi_fp + add %o2, %o5, %o0 +ENDPROC(memcpy_retl_o2_plus_o5_plus_8_fp) #endif -- cgit v1.2.3 From 55bd2133fc3c5ee138d19b76ed8f9495b35d57ec Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:02:57 -0400 Subject: sparc64: vcc: Enable VCC module in linux Enables the Virtual Console Concentrator (VCC) module in linux kernel Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- MAINTAINERS | 1 + arch/sparc/configs/sparc64_defconfig | 1 + drivers/tty/Kconfig | 5 +++++ drivers/tty/Makefile | 1 + drivers/tty/vcc.c | 18 ++++++++++++++++++ 5 files changed, 26 insertions(+) create mode 100644 drivers/tty/vcc.c diff --git a/MAINTAINERS b/MAINTAINERS index 84d6a8277cbd..28c0b4b98450 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12294,6 +12294,7 @@ F: drivers/tty/serial/sunsab.h F: drivers/tty/serial/sunsu.c F: drivers/tty/serial/sunzilog.c F: drivers/tty/serial/sunzilog.h +F: drivers/tty/vcc.c SPARSE CHECKER M: "Christopher Li" diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index ca8609d7292f..4d4e1cc6402f 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -238,3 +238,4 @@ CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC16=m CONFIG_LIBCRC32C=m +CONFIG_VCC=m diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 95103054c0e4..8643b55dcae2 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig @@ -455,4 +455,9 @@ config MIPS_EJTAG_FDC_KGDB_CHAN help FDC channel number to use for KGDB. +config VCC + tristate "Sun Virtual Console Concentrator" + depends on SUN_LDOMS + help + Support for Sun logical domain consoles. endif # TTY diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile index 8689279afdf1..16330a819685 100644 --- a/drivers/tty/Makefile +++ b/drivers/tty/Makefile @@ -33,5 +33,6 @@ obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o obj-$(CONFIG_DA_TTY) += metag_da.o obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o +obj-$(CONFIG_VCC) += vcc.o obj-y += ipwireless/ diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c new file mode 100644 index 000000000000..e1b809927cd6 --- /dev/null +++ b/drivers/tty/vcc.c @@ -0,0 +1,18 @@ +/* vcc.c: sun4v virtual channel concentrator + * + * Copyright (C) 2017 Oracle. All rights reserved. + */ + +#include + +static int __init vcc_init(void) +{ + return 0; +} + +static void __exit vcc_exit(void) +{ +} + +module_init(vcc_init); +module_exit(vcc_exit); -- cgit v1.2.3 From f283ebd5642d837d372f5e580d550c34dd6eef3d Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:02:58 -0400 Subject: sparc64: vcc: Add VCC debug message macros Add C macros to print debug messages from VCC module Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- arch/sparc/kernel/ldc.c | 1 + drivers/tty/vcc.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 840e0b21bfe3..1169915f7568 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1493,6 +1493,7 @@ void __ldc_print(struct ldc_channel *lp, const char *caller) lp->tx_head, lp->tx_tail, lp->tx_num_entries, lp->rcv_nxt, lp->snd_nxt); } +EXPORT_SYMBOL(__ldc_print); static int write_raw(struct ldc_channel *lp, const void *buf, unsigned int size) { diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index e1b809927cd6..4fd8dd0bda5e 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -5,6 +5,47 @@ #include +#define DRV_MODULE_NAME "vcc" +#define DRV_MODULE_VERSION "1.1" +#define DRV_MODULE_RELDATE "July 1, 2017" + +MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +int vcc_dbg; +int vcc_dbg_ldc; +int vcc_dbg_vio; + +module_param(vcc_dbg, uint, 0664); +module_param(vcc_dbg_ldc, uint, 0664); +module_param(vcc_dbg_vio, uint, 0664); + +#define VCC_DBG_DRV 0x1 +#define VCC_DBG_LDC 0x2 +#define VCC_DBG_PKT 0x4 + +#define vccdbg(f, a...) \ + do { \ + if (vcc_dbg & VCC_DBG_DRV) \ + pr_info(f, ## a); \ + } while (0) \ + +#define vccdbgl(l) \ + do { \ + if (vcc_dbg & VCC_DBG_LDC) \ + ldc_print(l); \ + } while (0) \ + +#define vccdbgp(pkt) \ + do { \ + if (vcc_dbg & VCC_DBG_PKT) { \ + int i; \ + for (i = 0; i < pkt.tag.stype; i++) \ + pr_info("[%c]", pkt.data[i]); \ + } \ + } while (0) \ + static int __init vcc_init(void) { return 0; -- cgit v1.2.3 From ce808b746325975192d8cd1d29f1ec03d5b6b0fc Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:02:59 -0400 Subject: sparc64: vcc: TTY driver initialization and cleanup Allocate and register TTY driver during module init. Cleanup TTY driver during module exit. Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 4fd8dd0bda5e..ba3384d23d79 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -4,15 +4,26 @@ */ #include +#include #define DRV_MODULE_NAME "vcc" #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "July 1, 2017" +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")"; + MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +#define VCC_MAX_PORTS 1024 +#define VCC_MINOR_START 0 /* must be zero */ + +static const char vcc_driver_name[] = "vcc"; +static const char vcc_device_node[] = "vcc"; +static struct tty_driver *vcc_tty_driver; + int vcc_dbg; int vcc_dbg_ldc; int vcc_dbg_vio; @@ -46,13 +57,83 @@ module_param(vcc_dbg_vio, uint, 0664); } \ } while (0) \ -static int __init vcc_init(void) +/* Note: Be careful when adding flags to this line discipline. Don't + * add anything that will cause echoing or we'll go into recursive + * loop echoing chars back and forth with the console drivers. + */ +static struct ktermios vcc_tty_termios = { + .c_iflag = IGNBRK | IGNPAR, + .c_oflag = OPOST, + .c_cflag = B38400 | CS8 | CREAD | HUPCL, + .c_cc = INIT_C_CC, + .c_ispeed = 38400, + .c_ospeed = 38400 +}; + +static const struct tty_operations vcc_ops; + +#define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) + +static int vcc_tty_init(void) { + int rv; + + pr_info("VCC: %s\n", version); + + vcc_tty_driver = tty_alloc_driver(VCC_MAX_PORTS, VCC_TTY_FLAGS); + if (!vcc_tty_driver) { + pr_err("VCC: TTY driver alloc failed\n"); + return -ENOMEM; + } + + vcc_tty_driver->driver_name = vcc_driver_name; + vcc_tty_driver->name = vcc_device_node; + + vcc_tty_driver->minor_start = VCC_MINOR_START; + vcc_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + vcc_tty_driver->init_termios = vcc_tty_termios; + + tty_set_operations(vcc_tty_driver, &vcc_ops); + + rv = tty_register_driver(vcc_tty_driver); + if (rv) { + pr_err("VCC: TTY driver registration failed\n"); + put_tty_driver(vcc_tty_driver); + vcc_tty_driver = NULL; + return rv; + } + + vccdbg("VCC: TTY driver registered\n"); + return 0; } +static void vcc_tty_exit(void) +{ + tty_unregister_driver(vcc_tty_driver); + put_tty_driver(vcc_tty_driver); + vccdbg("VCC: TTY driver unregistered\n"); + + vcc_tty_driver = NULL; +} + +static int __init vcc_init(void) +{ + int rv; + + rv = vcc_tty_init(); + if (rv) { + pr_err("VCC: TTY init failed\n"); + return rv; + } + + return rv; +} + static void __exit vcc_exit(void) { + vcc_tty_exit(); + vccdbg("VCC: TTY driver unregistered\n"); } module_init(vcc_init); -- cgit v1.2.3 From 5d171050e28f823aeb040f2830da4d3422b54b63 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:00 -0400 Subject: sparc64: vcc: Enable VCC port probe and removal Enables VCC port probe and removal to initialize and terminate VCC ports respectively. When a device/port matching the VCC driver is added, the probe function is invoked along with a reference to the device. remove function is called when the device is removed. Also add APIs to cache and retrieve VCC ports from a VCC table Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- arch/sparc/include/asm/vio.h | 9 + arch/sparc/kernel/vio.c | 1 + arch/sparc/kernel/viohs.c | 12 +- drivers/tty/vcc.c | 379 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 398 insertions(+), 3 deletions(-) diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index d1c47e9f0090..f3d4ac232690 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h @@ -52,6 +52,7 @@ struct vio_ver_info { #define VDEV_NETWORK_SWITCH 0x02 #define VDEV_DISK 0x03 #define VDEV_DISK_SERVER 0x04 +#define VDEV_CONSOLE_CON 0x05 u8 resv1[3]; u64 resv2[5]; @@ -282,6 +283,14 @@ struct vio_dring_state { struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES]; }; +#define VIO_TAG_SIZE ((int)sizeof(struct vio_msg_tag)) +#define VIO_VCC_MTU_SIZE (LDC_PACKET_SIZE - VIO_TAG_SIZE) + +struct vio_vcc { + struct vio_msg_tag tag; + char data[VIO_VCC_MTU_SIZE]; +}; + static inline void *vio_dring_cur(struct vio_dring_state *dr) { return dr->base + (dr->entry_size * dr->prod); diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 1c8763c9c52b..da1ac3f22b24 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -246,6 +246,7 @@ u64 vio_vdev_node(struct mdesc_handle *hp, struct vio_dev *vdev) return node; } +EXPORT_SYMBOL(vio_vdev_node); static void vio_fill_channel_info(struct mdesc_handle *hp, u64 mp, struct vio_dev *vdev) diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index d4f13c037a40..dcd278f29573 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -814,15 +814,21 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, case VDEV_NETWORK_SWITCH: case VDEV_DISK: case VDEV_DISK_SERVER: + case VDEV_CONSOLE_CON: break; default: return -EINVAL; } - if (!ops || !ops->send_attr || !ops->handle_attr || - !ops->handshake_complete) - return -EINVAL; + if (dev_class == VDEV_NETWORK || + dev_class == VDEV_NETWORK_SWITCH || + dev_class == VDEV_DISK || + dev_class == VDEV_DISK_SERVER) { + if (!ops || !ops->send_attr || !ops->handle_attr || + !ops->handshake_complete) + return -EINVAL; + } if (!ver_table || ver_table_size < 0) return -EINVAL; diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index ba3384d23d79..4edace1d9e4b 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -3,8 +3,13 @@ * Copyright (C) 2017 Oracle. All rights reserved. */ +#include +#include #include +#include #include +#include +#include #define DRV_MODULE_NAME "vcc" #define DRV_MODULE_VERSION "1.1" @@ -17,6 +22,33 @@ MODULE_DESCRIPTION("Sun LDOM virtual console concentrator driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); +struct vcc_port { + struct vio_driver_state vio; + + spinlock_t lock; + char *domain; + struct tty_struct *tty; /* only populated while dev is open */ + unsigned long index; /* index into the vcc_table */ + + u64 refcnt; + bool excl_locked; + + bool removed; + + /* This buffer is required to support the tty write_room interface + * and guarantee that any characters that the driver accepts will + * be eventually sent, either immediately or later. + */ + int chars_in_buffer; + struct vio_vcc buffer; + + struct timer_list rx_timer; + struct timer_list tx_timer; +}; + +/* Microseconds that thread will delay waiting for a vcc port ref */ +#define VCC_REF_DELAY 100 + #define VCC_MAX_PORTS 1024 #define VCC_MINOR_START 0 /* must be zero */ @@ -24,6 +56,9 @@ static const char vcc_driver_name[] = "vcc"; static const char vcc_device_node[] = "vcc"; static struct tty_driver *vcc_tty_driver; +static struct vcc_port *vcc_table[VCC_MAX_PORTS]; +static DEFINE_SPINLOCK(vcc_table_lock); + int vcc_dbg; int vcc_dbg_ldc; int vcc_dbg_vio; @@ -70,6 +105,340 @@ static struct ktermios vcc_tty_termios = { .c_ospeed = 38400 }; +/** + * vcc_table_add() - Add VCC port to the VCC table + * @port: pointer to the VCC port + * + * Return: index of the port in the VCC table on success, + * -1 on failure + */ +static int vcc_table_add(struct vcc_port *port) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&vcc_table_lock, flags); + for (i = VCC_MINOR_START; i < VCC_MAX_PORTS; i++) { + if (!vcc_table[i]) { + vcc_table[i] = port; + break; + } + } + spin_unlock_irqrestore(&vcc_table_lock, flags); + + if (i < VCC_MAX_PORTS) + return i; + else + return -1; +} + +/** + * vcc_table_remove() - Removes a VCC port from the VCC table + * @index: Index into the VCC table + */ +static void vcc_table_remove(unsigned long index) +{ + unsigned long flags; + + if (WARN_ON(index >= VCC_MAX_PORTS)) + return; + + spin_lock_irqsave(&vcc_table_lock, flags); + vcc_table[index] = NULL; + spin_unlock_irqrestore(&vcc_table_lock, flags); +} + +/** + * vcc_get() - Gets a reference to VCC port + * @index: Index into the VCC table + * @excl: Indicates if an exclusive access is requested + * + * Return: reference to the VCC port, if found + * NULL, if port not found + */ +static struct vcc_port *vcc_get(unsigned long index, bool excl) +{ + struct vcc_port *port; + unsigned long flags; + +try_again: + spin_lock_irqsave(&vcc_table_lock, flags); + + port = vcc_table[index]; + if (!port) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + return NULL; + } + + if (!excl) { + if (port->excl_locked) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + udelay(VCC_REF_DELAY); + goto try_again; + } + port->refcnt++; + spin_unlock_irqrestore(&vcc_table_lock, flags); + return port; + } + + if (port->refcnt) { + spin_unlock_irqrestore(&vcc_table_lock, flags); + /* Threads wanting exclusive access will wait half the time, + * probably giving them higher priority in the case of + * multiple waiters. + */ + udelay(VCC_REF_DELAY/2); + goto try_again; + } + + port->refcnt++; + port->excl_locked = true; + spin_unlock_irqrestore(&vcc_table_lock, flags); + + return port; +} + +/** + * vcc_put() - Returns a reference to VCC port + * @port: pointer to VCC port + * @excl: Indicates if the returned reference is an exclusive reference + * + * Note: It's the caller's responsibility to ensure the correct value + * for the excl flag + */ +static void vcc_put(struct vcc_port *port, bool excl) +{ + unsigned long flags; + + if (!port) + return; + + spin_lock_irqsave(&vcc_table_lock, flags); + + /* check if caller attempted to put with the wrong flags */ + if (WARN_ON((excl && !port->excl_locked) || + (!excl && port->excl_locked))) + goto done; + + port->refcnt--; + + if (excl) + port->excl_locked = false; + +done: + spin_unlock_irqrestore(&vcc_table_lock, flags); +} + +/** + * vcc_get_ne() - Get a non-exclusive reference to VCC port + * @index: Index into the VCC table + * + * Gets a non-exclusive reference to VCC port, if it's not removed + * + * Return: pointer to the VCC port, if found + * NULL, if port not found + */ +static struct vcc_port *vcc_get_ne(unsigned long index) +{ + struct vcc_port *port; + + port = vcc_get(index, false); + + if (port && port->removed) { + vcc_put(port, false); + return NULL; + } + + return port; +} + +static void vcc_event(void *arg, int event) +{ +} + +static struct ldc_channel_config vcc_ldc_cfg = { + .event = vcc_event, + .mtu = VIO_VCC_MTU_SIZE, + .mode = LDC_MODE_RAW, + .debug = 0, +}; + +/* Ordered from largest major to lowest */ +static struct vio_version vcc_versions[] = { + { .major = 1, .minor = 0 }, +}; + +/** + * vcc_probe() - Initialize VCC port + * @vdev: Pointer to VIO device of the new VCC port + * @id: VIO device ID + * + * Initializes a VCC port to receive serial console data from + * the guest domain. Sets up a TTY end point on the control + * domain. Sets up VIO/LDC link between the guest & control + * domain endpoints. + * + * Return: status of the probe + */ +static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vcc_port *port; + struct device *dev; + const char *domain; + char *name; + u64 node; + int rv; + + vccdbg("VCC: name=%s\n", dev_name(&vdev->dev)); + + if (!vcc_tty_driver) { + pr_err("VCC: TTY driver not registered\n"); + return -ENODEV; + } + + port = kzalloc(sizeof(struct vcc_port), GFP_KERNEL); + if (!port) + return -ENOMEM; + + name = kstrdup(dev_name(&vdev->dev), GFP_KERNEL); + + rv = vio_driver_init(&port->vio, vdev, VDEV_CONSOLE_CON, vcc_versions, + ARRAY_SIZE(vcc_versions), NULL, name); + if (rv) + goto free_port; + + port->vio.debug = vcc_dbg_vio; + vcc_ldc_cfg.debug = vcc_dbg_ldc; + + rv = vio_ldc_alloc(&port->vio, &vcc_ldc_cfg, port); + if (rv) + goto free_port; + + spin_lock_init(&port->lock); + + port->index = vcc_table_add(port); + if (port->index == -1) { + pr_err("VCC: no more TTY indices left for allocation\n"); + goto free_ldc; + } + + /* Register the device using VCC table index as TTY index */ + dev = tty_register_device(vcc_tty_driver, port->index, &vdev->dev); + if (IS_ERR(dev)) { + rv = PTR_ERR(dev); + goto free_table; + } + + hp = mdesc_grab(); + + node = vio_vdev_node(hp, vdev); + if (node == MDESC_NODE_NULL) { + rv = -ENXIO; + mdesc_release(hp); + goto unreg_tty; + } + + domain = mdesc_get_property(hp, node, "vcc-domain-name", NULL); + if (!domain) { + rv = -ENXIO; + mdesc_release(hp); + goto unreg_tty; + } + port->domain = kstrdup(domain, GFP_KERNEL); + + mdesc_release(hp); + + dev_set_drvdata(&vdev->dev, port); + + /* It's possible to receive IRQs in the middle of vio_port_up. Disable + * IRQs until the port is up. + */ + disable_irq_nosync(vdev->rx_irq); + vio_port_up(&port->vio); + enable_irq(vdev->rx_irq); + + return 0; + +unreg_tty: + tty_unregister_device(vcc_tty_driver, port->index); +free_table: + vcc_table_remove(port->index); +free_ldc: + vio_ldc_free(&port->vio); +free_port: + kfree(name); + kfree(port); + + return rv; +} + +/** + * vcc_remove() - Terminate a VCC port + * @vdev: Pointer to VIO device of the VCC port + * + * Terminates a VCC port. Sets up the teardown of TTY and + * VIO/LDC link between guest and primary domains. + * + * Return: status of removal + */ +static int vcc_remove(struct vio_dev *vdev) +{ + struct vcc_port *port = dev_get_drvdata(&vdev->dev); + + if (!port) + return -ENODEV; + + /* If there's a process with the device open, do a synchronous + * hangup of the TTY. This *may* cause the process to call close + * asynchronously, but it's not guaranteed. + */ + if (port->tty) + tty_vhangup(port->tty); + + /* Get exclusive reference to VCC, ensures that there are no other + * clients to this port + */ + port = vcc_get(port->index, true); + + if (WARN_ON(!port)) + return -ENODEV; + + tty_unregister_device(vcc_tty_driver, port->index); + + del_timer_sync(&port->vio.timer); + vio_ldc_free(&port->vio); + dev_set_drvdata(&vdev->dev, NULL); + + if (port->tty) { + port->removed = true; + vcc_put(port, true); + } else { + vcc_table_remove(port->index); + + kfree(port->vio.name); + kfree(port->domain); + kfree(port); + } + + return 0; +} + +static const struct vio_device_id vcc_match[] = { + { + .type = "vcc-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vcc_match); + +static struct vio_driver vcc_driver = { + .id_table = vcc_match, + .probe = vcc_probe, + .remove = vcc_remove, + .name = "vcc", +}; + static const struct tty_operations vcc_ops; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) @@ -127,11 +496,21 @@ static int __init vcc_init(void) return rv; } + rv = vio_register_driver(&vcc_driver); + if (rv) { + pr_err("VCC: VIO driver registration failed\n"); + vcc_tty_exit(); + } else { + vccdbg("VCC: VIO driver registered successfully\n"); + } + return rv; } static void __exit vcc_exit(void) { + vio_unregister_driver(&vcc_driver); + vccdbg("VCC: VIO driver unregistered\n"); vcc_tty_exit(); vccdbg("VCC: TTY driver unregistered\n"); } -- cgit v1.2.3 From 8868e44a56160328cd4003d95f4083f64b136e33 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:01 -0400 Subject: sparc64: vcc: Create sysfs attribute group Create sysfs attribute group to show the domain name and send break command. Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 4edace1d9e4b..72e0879e332d 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -52,6 +53,9 @@ struct vcc_port { #define VCC_MAX_PORTS 1024 #define VCC_MINOR_START 0 /* must be zero */ +#define VCC_CTL_BREAK -1 +#define VCC_CTL_HUP -2 + static const char vcc_driver_name[] = "vcc"; static const char vcc_device_node[] = "vcc"; static struct tty_driver *vcc_tty_driver; @@ -268,6 +272,77 @@ static struct vio_version vcc_versions[] = { { .major = 1, .minor = 0 }, }; +static ssize_t vcc_sysfs_domain_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct vcc_port *port; + int rv; + + port = dev_get_drvdata(dev); + if (!port) + return -ENODEV; + + rv = scnprintf(buf, PAGE_SIZE, "%s\n", port->domain); + + return rv; +} + +static int vcc_send_ctl(struct vcc_port *port, int ctl) +{ + struct vio_vcc pkt; + int rv; + + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.sid = ctl; + pkt.tag.stype = 0; + + rv = ldc_write(port->vio.lp, &pkt, sizeof(pkt.tag)); + WARN_ON(!rv); + vccdbg("VCC: ldc_write(%ld)=%d\n", sizeof(pkt.tag), rv); + + return rv; +} + +static ssize_t vcc_sysfs_break_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct vcc_port *port; + unsigned long flags; + int rv = count; + int brk; + + port = dev_get_drvdata(dev); + if (!port) + return -ENODEV; + + spin_lock_irqsave(&port->lock, flags); + + if (sscanf(buf, "%ud", &brk) != 1 || brk != 1) + rv = -EINVAL; + else if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0) + pr_err("VCC: unable to send CTL_BREAK\n"); + + spin_unlock_irqrestore(&port->lock, flags); + + return rv; +} + +static DEVICE_ATTR(domain, 0400, vcc_sysfs_domain_show, NULL); +static DEVICE_ATTR(break, 0200, NULL, vcc_sysfs_break_store); + +static struct attribute *vcc_sysfs_entries[] = { + &dev_attr_domain.attr, + &dev_attr_break.attr, + NULL +}; + +static struct attribute_group vcc_attribute_group = { + .name = NULL, + .attrs = vcc_sysfs_entries, +}; + /** * vcc_probe() - Initialize VCC port * @vdev: Pointer to VIO device of the new VCC port @@ -349,6 +424,10 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) mdesc_release(hp); + rv = sysfs_create_group(&vdev->dev.kobj, &vcc_attribute_group); + if (rv) + goto free_domain; + dev_set_drvdata(&vdev->dev, port); /* It's possible to receive IRQs in the middle of vio_port_up. Disable @@ -360,6 +439,8 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) return 0; +free_domain: + kfree(port->domain); unreg_tty: tty_unregister_device(vcc_tty_driver, port->index); free_table: @@ -408,6 +489,7 @@ static int vcc_remove(struct vio_dev *vdev) del_timer_sync(&port->vio.timer); vio_ldc_free(&port->vio); + sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group); dev_set_drvdata(&vdev->dev, NULL); if (port->tty) { -- cgit v1.2.3 From f8c553354e71fbb982ff63f2f574c1658aea50a9 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:02 -0400 Subject: sparc64: vcc: Add RX & TX timer for delayed LDC operation Add RX & TX timers to perform delayed/asynchronous LDC read and write operations. Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- arch/sparc/kernel/ldc.c | 1 + drivers/tty/vcc.c | 195 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 194 insertions(+), 2 deletions(-) diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index 1169915f7568..acffbc894ab0 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -1480,6 +1480,7 @@ int ldc_rx_reset(struct ldc_channel *lp) { return __set_rx_head(lp, lp->rx_tail); } +EXPORT_SYMBOL(ldc_rx_reset); void __ldc_print(struct ldc_channel *lp, const char *caller) { diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 72e0879e332d..27566d5f7df0 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -52,6 +53,7 @@ struct vcc_port { #define VCC_MAX_PORTS 1024 #define VCC_MINOR_START 0 /* must be zero */ +#define VCC_BUFF_LEN VIO_VCC_MTU_SIZE #define VCC_CTL_BREAK -1 #define VCC_CTL_HUP -2 @@ -256,6 +258,185 @@ static struct vcc_port *vcc_get_ne(unsigned long index) return port; } +static void vcc_kick_rx(struct vcc_port *port) +{ + struct vio_driver_state *vio = &port->vio; + + assert_spin_locked(&port->lock); + + if (!timer_pending(&port->rx_timer) && !port->removed) { + disable_irq_nosync(vio->vdev->rx_irq); + port->rx_timer.expires = (jiffies + 1); + add_timer(&port->rx_timer); + } +} + +static void vcc_kick_tx(struct vcc_port *port) +{ + assert_spin_locked(&port->lock); + + if (!timer_pending(&port->tx_timer) && !port->removed) { + port->tx_timer.expires = (jiffies + 1); + add_timer(&port->tx_timer); + } +} + +static int vcc_rx_check(struct tty_struct *tty, int size) +{ + if (WARN_ON(!tty || !tty->port)) + return 1; + + /* tty_buffer_request_room won't sleep because it uses + * GFP_ATOMIC flag to allocate buffer + */ + if (test_bit(TTY_THROTTLED, &tty->flags) || + (tty_buffer_request_room(tty->port, VCC_BUFF_LEN) < VCC_BUFF_LEN)) + return 0; + + return 1; +} + +static int vcc_rx(struct tty_struct *tty, char *buf, int size) +{ + int len = 0; + + if (WARN_ON(!tty || !tty->port)) + return len; + + len = tty_insert_flip_string(tty->port, buf, size); + if (len) + tty_flip_buffer_push(tty->port); + + return len; +} + +static int vcc_ldc_read(struct vcc_port *port) +{ + struct vio_driver_state *vio = &port->vio; + struct tty_struct *tty; + struct vio_vcc pkt; + int rv = 0; + + tty = port->tty; + if (!tty) { + rv = ldc_rx_reset(vio->lp); + vccdbg("VCC: reset rx q: rv=%d\n", rv); + goto done; + } + + /* Read as long as LDC has incoming data. */ + while (1) { + if (!vcc_rx_check(tty, VIO_VCC_MTU_SIZE)) { + vcc_kick_rx(port); + break; + } + + vccdbgl(vio->lp); + + rv = ldc_read(vio->lp, &pkt, sizeof(pkt)); + if (rv <= 0) + break; + + vccdbg("VCC: ldc_read()=%d\n", rv); + vccdbg("TAG [%02x:%02x:%04x:%08x]\n", + pkt.tag.type, pkt.tag.stype, + pkt.tag.stype_env, pkt.tag.sid); + + if (pkt.tag.type == VIO_TYPE_DATA) { + vccdbgp(pkt); + /* vcc_rx_check ensures memory availability */ + vcc_rx(tty, pkt.data, pkt.tag.stype); + } else { + pr_err("VCC: unknown msg [%02x:%02x:%04x:%08x]\n", + pkt.tag.type, pkt.tag.stype, + pkt.tag.stype_env, pkt.tag.sid); + rv = -ECONNRESET; + break; + } + + WARN_ON(rv != LDC_PACKET_SIZE); + } + +done: + return rv; +} + +static void vcc_rx_timer(unsigned long index) +{ + struct vio_driver_state *vio; + struct vcc_port *port; + unsigned long flags; + int rv; + + port = vcc_get_ne(index); + if (!port) + return; + + spin_lock_irqsave(&port->lock, flags); + port->rx_timer.expires = 0; + + vio = &port->vio; + + enable_irq(vio->vdev->rx_irq); + + if (!port->tty || port->removed) + goto done; + + rv = vcc_ldc_read(port); + if (rv == -ECONNRESET) + vio_conn_reset(vio); + +done: + spin_unlock_irqrestore(&port->lock, flags); + vcc_put(port, false); +} + +static void vcc_tx_timer(unsigned long index) +{ + struct vcc_port *port; + struct vio_vcc *pkt; + unsigned long flags; + int tosend = 0; + int rv; + + port = vcc_get_ne(index); + if (!port) + return; + + spin_lock_irqsave(&port->lock, flags); + port->tx_timer.expires = 0; + + if (!port->tty || port->removed) + goto done; + + tosend = min(VCC_BUFF_LEN, port->chars_in_buffer); + if (!tosend) + goto done; + + pkt = &port->buffer; + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = tosend; + vccdbgl(port->vio.lp); + + rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend)); + WARN_ON(!rv); + + if (rv < 0) { + vccdbg("VCC: ldc_write()=%d\n", rv); + vcc_kick_tx(port); + } else { + struct tty_struct *tty = port->tty; + + port->chars_in_buffer = 0; + if (tty) + tty_wakeup(tty); + } + +done: + spin_unlock_irqrestore(&port->lock, flags); + vcc_put(port, false); +} + static void vcc_event(void *arg, int event) { } @@ -322,7 +503,7 @@ static ssize_t vcc_sysfs_break_store(struct device *dev, if (sscanf(buf, "%ud", &brk) != 1 || brk != 1) rv = -EINVAL; else if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0) - pr_err("VCC: unable to send CTL_BREAK\n"); + vcc_kick_tx(port); spin_unlock_irqrestore(&port->lock, flags); @@ -428,6 +609,14 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (rv) goto free_domain; + init_timer(&port->rx_timer); + port->rx_timer.function = vcc_rx_timer; + port->rx_timer.data = port->index; + + init_timer(&port->tx_timer); + port->tx_timer.function = vcc_tx_timer; + port->tx_timer.data = port->index; + dev_set_drvdata(&vdev->dev, port); /* It's possible to receive IRQs in the middle of vio_port_up. Disable @@ -470,6 +659,9 @@ static int vcc_remove(struct vio_dev *vdev) if (!port) return -ENODEV; + del_timer_sync(&port->rx_timer); + del_timer_sync(&port->tx_timer); + /* If there's a process with the device open, do a synchronous * hangup of the TTY. This *may* cause the process to call close * asynchronously, but it's not guaranteed. @@ -491,7 +683,6 @@ static int vcc_remove(struct vio_dev *vdev) vio_ldc_free(&port->vio); sysfs_remove_group(&vdev->dev.kobj, &vcc_attribute_group); dev_set_drvdata(&vdev->dev, NULL); - if (port->tty) { port->removed = true; vcc_put(port, true); -- cgit v1.2.3 From 103b9b0bc9e068d5e9432ee1da526d8feabc0216 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:03 -0400 Subject: sparc64: vcc: Enable LDC event processing engine Enable event processing engine to handle LDC events Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 27566d5f7df0..91f663d207b6 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -437,8 +437,42 @@ done: vcc_put(port, false); } +/** + * vcc_event() - LDC event processing engine + * @arg: VCC private data + * @event: LDC event + * + * Handles LDC events for VCC + */ static void vcc_event(void *arg, int event) { + struct vio_driver_state *vio; + struct vcc_port *port; + unsigned long flags; + int rv; + + port = arg; + vio = &port->vio; + + spin_lock_irqsave(&port->lock, flags); + + switch (event) { + case LDC_EVENT_RESET: + case LDC_EVENT_UP: + vio_link_state_change(vio, event); + break; + + case LDC_EVENT_DATA_READY: + rv = vcc_ldc_read(port); + if (rv == -ECONNRESET) + vio_conn_reset(vio); + break; + + default: + pr_err("VCC: unexpected LDC event(%d)\n", event); + } + + spin_unlock_irqrestore(&port->lock, flags); } static struct ldc_channel_config vcc_ldc_cfg = { -- cgit v1.2.3 From 63a7174484223f4ba35a3bad8db43234ab56ecea Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:04 -0400 Subject: sparc64: vcc: Add open & close TTY operations Add handlers to support TTY open & close operations Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 91f663d207b6..f964fb20e4ba 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -746,7 +746,68 @@ static struct vio_driver vcc_driver = { .name = "vcc", }; -static const struct tty_operations vcc_ops; +static int vcc_open(struct tty_struct *tty, struct file *vcc_file) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: open: Invalid TTY handle\n"); + return -ENXIO; + } + + if (tty->count > 1) + return -EBUSY; + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: open: Failed to find VCC port\n"); + return -ENODEV; + } + + if (unlikely(!port->vio.lp)) { + pr_err("VCC: open: LDC channel not configured\n"); + vcc_put(port, false); + return -EPIPE; + } + vccdbgl(port->vio.lp); + + vcc_put(port, false); + + if (unlikely(!tty->port)) { + pr_err("VCC: open: TTY port not found\n"); + return -ENXIO; + } + + if (unlikely(!tty->port->ops)) { + pr_err("VCC: open: TTY ops not defined\n"); + return -ENXIO; + } + + return tty_port_open(tty->port, tty, vcc_file); +} + +static void vcc_close(struct tty_struct *tty, struct file *vcc_file) +{ + if (unlikely(!tty)) { + pr_err("VCC: close: Invalid TTY handle\n"); + return; + } + + if (unlikely(tty->count > 1)) + return; + + if (unlikely(!tty->port)) { + pr_err("VCC: close: TTY port not found\n"); + return; + } + + tty_port_close(tty->port, tty, vcc_file); +} + +static const struct tty_operations vcc_ops = { + .open = vcc_open, + .close = vcc_close, +}; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From 6a3ff25bc6cda6293eeed6056f73d76eb35e3076 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:05 -0400 Subject: sparc64: vcc: Add hangup TTY operation Add handler to support TTY hangup operation Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index f964fb20e4ba..67be5a57c1b5 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -804,9 +804,50 @@ static void vcc_close(struct tty_struct *tty, struct file *vcc_file) tty_port_close(tty->port, tty, vcc_file); } +static void vcc_ldc_hup(struct vcc_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + if (vcc_send_ctl(port, VCC_CTL_HUP) < 0) + vcc_kick_tx(port); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void vcc_hangup(struct tty_struct *tty) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: hangup: Invalid TTY handle\n"); + return; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: hangup: Failed to find VCC port\n"); + return; + } + + if (unlikely(!tty->port)) { + pr_err("VCC: hangup: TTY port not found\n"); + vcc_put(port, false); + return; + } + + vcc_ldc_hup(port); + + vcc_put(port, false); + + tty_port_hangup(tty->port); +} + static const struct tty_operations vcc_ops = { .open = vcc_open, .close = vcc_close, + .hangup = vcc_hangup, }; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From cf0452600b6dbf8bf23b6f7884c7937a0ef60017 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:06 -0400 Subject: sparc64: vcc: Add write & write_room TTY operations Add handlers to support TTY write & write_room operations Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 3 deletions(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 67be5a57c1b5..0c0fe228546b 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -844,10 +844,105 @@ static void vcc_hangup(struct tty_struct *tty) tty_port_hangup(tty->port); } +static int vcc_write(struct tty_struct *tty, const unsigned char *buf, + int count) +{ + struct vcc_port *port; + struct vio_vcc *pkt; + unsigned long flags; + int total_sent = 0; + int tosend = 0; + int rv = -EINVAL; + + if (unlikely(!tty)) { + pr_err("VCC: write: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: write: Failed to find VCC port"); + return -ENODEV; + } + + spin_lock_irqsave(&port->lock, flags); + + pkt = &port->buffer; + pkt->tag.type = VIO_TYPE_DATA; + + while (count > 0) { + /* Minimum of data to write and space available */ + tosend = min(count, (VCC_BUFF_LEN - port->chars_in_buffer)); + + if (!tosend) + break; + + memcpy(&pkt->data[port->chars_in_buffer], &buf[total_sent], + tosend); + port->chars_in_buffer += tosend; + pkt->tag.stype = tosend; + + vccdbg("TAG [%02x:%02x:%04x:%08x]\n", pkt->tag.type, + pkt->tag.stype, pkt->tag.stype_env, pkt->tag.sid); + vccdbg("DATA [%s]\n", pkt->data); + vccdbgl(port->vio.lp); + + /* Since we know we have enough room in VCC buffer for tosend + * we record that it was sent regardless of whether the + * hypervisor actually took it because we have it buffered. + */ + rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend)); + vccdbg("VCC: write: ldc_write(%d)=%d\n", + (VIO_TAG_SIZE + tosend), rv); + + total_sent += tosend; + count -= tosend; + if (rv < 0) { + vcc_kick_tx(port); + break; + } + + port->chars_in_buffer = 0; + } + + spin_unlock_irqrestore(&port->lock, flags); + + vcc_put(port, false); + + vccdbg("VCC: write: total=%d rv=%d", total_sent, rv); + + return total_sent ? total_sent : rv; +} + +static int vcc_write_room(struct tty_struct *tty) +{ + struct vcc_port *port; + u64 num; + + if (unlikely(!tty)) { + pr_err("VCC: write_room: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: write_room: Failed to find VCC port\n"); + return -ENODEV; + } + + num = VCC_BUFF_LEN - port->chars_in_buffer; + + vcc_put(port, false); + + return num; +} + static const struct tty_operations vcc_ops = { - .open = vcc_open, - .close = vcc_close, - .hangup = vcc_hangup, + .open = vcc_open, + .close = vcc_close, + .hangup = vcc_hangup, + .write = vcc_write, + .write_room = vcc_write_room, }; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From aeee7debf30b43b276f8aad2e889cd50dd3f0945 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:07 -0400 Subject: sparc64: vcc: Add chars_in_buffer TTY operation Add handler to support TTY chars_in_buffer operation Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 34 +++++++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 0c0fe228546b..070c54188971 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -937,12 +937,36 @@ static int vcc_write_room(struct tty_struct *tty) return num; } +static int vcc_chars_in_buffer(struct tty_struct *tty) +{ + struct vcc_port *port; + u64 num; + + if (unlikely(!tty)) { + pr_err("VCC: chars_in_buffer: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: chars_in_buffer: Failed to find VCC port\n"); + return -ENODEV; + } + + num = port->chars_in_buffer; + + vcc_put(port, false); + + return num; +} + static const struct tty_operations vcc_ops = { - .open = vcc_open, - .close = vcc_close, - .hangup = vcc_hangup, - .write = vcc_write, - .write_room = vcc_write_room, + .open = vcc_open, + .close = vcc_close, + .hangup = vcc_hangup, + .write = vcc_write, + .write_room = vcc_write_room, + .chars_in_buffer = vcc_chars_in_buffer, }; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From e942e5775c87aee2b21179aa45e78b81e062b246 Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:08 -0400 Subject: sparc64: vcc: Add break_ctl TTY operation Add handler to support TTY break_ctl operation Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 070c54188971..f913217159ee 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -960,6 +960,40 @@ static int vcc_chars_in_buffer(struct tty_struct *tty) return num; } +static int vcc_break_ctl(struct tty_struct *tty, int state) +{ + struct vcc_port *port; + unsigned long flags; + + if (unlikely(!tty)) { + pr_err("VCC: break_ctl: Invalid TTY handle\n"); + return -ENXIO; + } + + port = vcc_get_ne(tty->index); + if (unlikely(!port)) { + pr_err("VCC: break_ctl: Failed to find VCC port\n"); + return -ENODEV; + } + + /* Turn off break */ + if (state == 0) { + vcc_put(port, false); + return 0; + } + + spin_lock_irqsave(&port->lock, flags); + + if (vcc_send_ctl(port, VCC_CTL_BREAK) < 0) + vcc_kick_tx(port); + + spin_unlock_irqrestore(&port->lock, flags); + + vcc_put(port, false); + + return 0; +} + static const struct tty_operations vcc_ops = { .open = vcc_open, .close = vcc_close, @@ -967,6 +1001,7 @@ static const struct tty_operations vcc_ops = { .write = vcc_write, .write_room = vcc_write_room, .chars_in_buffer = vcc_chars_in_buffer, + .break_ctl = vcc_break_ctl, }; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From 8f03f948ba356b7cb254b45a4e78b5b3d86fd3bb Mon Sep 17 00:00:00 2001 From: Jag Raman Date: Tue, 15 Aug 2017 17:03:09 -0400 Subject: sparc64: vcc: Add install & cleanup TTY operations Add handlers to support TTY install & cleanup operations Signed-off-by: Jagannathan Raman Reviewed-by: Liam Merwick Reviewed-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index f913217159ee..5e608173b091 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -487,6 +487,8 @@ static struct vio_version vcc_versions[] = { { .major = 1, .minor = 0 }, }; +static struct tty_port_operations vcc_port_ops = { 0 }; + static ssize_t vcc_sysfs_domain_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -994,6 +996,75 @@ static int vcc_break_ctl(struct tty_struct *tty, int state) return 0; } +static int vcc_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct vcc_port *port_vcc; + struct tty_port *port_tty; + int ret; + + if (unlikely(!tty)) { + pr_err("VCC: install: Invalid TTY handle\n"); + return -ENXIO; + } + + if (tty->index >= VCC_MAX_PORTS) + return -EINVAL; + + ret = tty_standard_install(driver, tty); + if (ret) + return ret; + + port_tty = kzalloc(sizeof(struct tty_port), GFP_KERNEL); + if (!port_tty) + return -ENOMEM; + + port_vcc = vcc_get(tty->index, true); + if (!port_vcc) { + pr_err("VCC: install: Failed to find VCC port\n"); + tty->port = NULL; + kfree(port_tty); + return -ENODEV; + } + + tty_port_init(port_tty); + port_tty->ops = &vcc_port_ops; + tty->port = port_tty; + + port_vcc->tty = tty; + + vcc_put(port_vcc, true); + + return 0; +} + +static void vcc_cleanup(struct tty_struct *tty) +{ + struct vcc_port *port; + + if (unlikely(!tty)) { + pr_err("VCC: cleanup: Invalid TTY handle\n"); + return; + } + + port = vcc_get(tty->index, true); + if (port) { + port->tty = NULL; + + if (port->removed) { + vcc_table_remove(tty->index); + kfree(port->vio.name); + kfree(port->domain); + kfree(port); + } else { + vcc_put(port, true); + } + } + + tty_port_destroy(tty->port); + kfree(tty->port); + tty->port = NULL; +} + static const struct tty_operations vcc_ops = { .open = vcc_open, .close = vcc_close, @@ -1002,6 +1073,8 @@ static const struct tty_operations vcc_ops = { .write_room = vcc_write_room, .chars_in_buffer = vcc_chars_in_buffer, .break_ctl = vcc_break_ctl, + .install = vcc_install, + .cleanup = vcc_cleanup, }; #define VCC_TTY_FLAGS (TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_REAL_RAW) -- cgit v1.2.3 From 44382b0195e608732b0bed14d51b5e5f79f46471 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Fri, 11 Aug 2017 16:46:49 -0700 Subject: sparc64: Support huge PUD case in get_user_pages get_user_pages() is used to do direct IO. It already handles the case where the address range is backed by PMD huge pages. This patch now adds the case where the range could be backed by PUD huge pages. Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 15 +++++++++++-- arch/sparc/mm/gup.c | 45 ++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 3 deletions(-) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 6fbd931f0570..2579f5a22f46 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -687,6 +687,8 @@ static inline unsigned long pmd_write(pmd_t pmd) return pte_write(pte); } +#define pud_write(pud) pte_write(__pte(pud_val(pud))) + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline unsigned long pmd_dirty(pmd_t pmd) { @@ -823,9 +825,18 @@ static inline unsigned long __pmd_page(pmd_t pmd) return ((unsigned long) __va(pfn << PAGE_SHIFT)); } + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + pte_t pte = __pte(pud_val(pud)); + unsigned long pfn; + + pfn = pte_pfn(pte); + + return ((unsigned long) __va(pfn << PAGE_SHIFT)); +} + #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) -#define pud_page_vaddr(pud) \ - ((unsigned long) __va(pud_val(pud))) #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud)) #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) #define pud_present(pud) (pud_val(pud) != 0U) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index f80cfc64c55b..d809099ffd47 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -103,6 +103,45 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 1; } +static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, + unsigned long end, int write, struct page **pages, + int *nr) +{ + struct page *head, *page; + int refs; + + if (!(pud_val(pud) & _PAGE_VALID)) + return 0; + + if (write && !pud_write(pud)) + return 0; + + refs = 0; + page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); + head = compound_head(page); + do { + VM_BUG_ON(compound_head(page) != head); + pages[*nr] = page; + (*nr)++; + page++; + refs++; + } while (addr += PAGE_SIZE, addr != end); + + if (!page_cache_add_speculative(head, refs)) { + *nr -= refs; + return 0; + } + + if (unlikely(pud_val(pud) != pud_val(*pudp))) { + *nr -= refs; + while (refs--) + put_page(head); + return 0; + } + + return 1; +} + static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { @@ -141,7 +180,11 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; - if (!gup_pmd_range(pud, addr, next, write, pages, nr)) + if (unlikely(pud_large(pud))) { + if (!gup_huge_pud(pudp, pud, addr, next, + write, pages, nr)) + return 0; + } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); -- cgit v1.2.3 From df7b2155bbe75525531e47c8a67ae4f1dbbae976 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Fri, 11 Aug 2017 16:46:50 -0700 Subject: sparc64: Add 16GB hugepage support Adds support for 16GB hugepage size. To use this page size use kernel parameters as: default_hugepagesz=16G hugepagesz=16G hugepages=10 Testing: Tested with the stream benchmark which allocates 48G of arrays backed by 16G hugepages and does RW operation on them in parallel. Orabug: 25362942 Cc: Anthony Yznaga Reviewed-by: Bob Picco Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/include/asm/hugetlb.h | 7 ++++ arch/sparc/include/asm/page_64.h | 3 +- arch/sparc/include/asm/pgtable_64.h | 5 +++ arch/sparc/include/asm/tsb.h | 36 ++++++++++++++++++ arch/sparc/kernel/head_64.S | 2 +- arch/sparc/kernel/tsb.S | 2 +- arch/sparc/kernel/vmlinux.lds.S | 5 +++ arch/sparc/mm/hugetlbpage.c | 74 ++++++++++++++++++++++++++----------- arch/sparc/mm/init_64.c | 54 +++++++++++++++++++++++---- 9 files changed, 157 insertions(+), 31 deletions(-) diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index d1f837dc77a4..0ca7caab1b06 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -4,6 +4,13 @@ #include #include +#ifdef CONFIG_HUGETLB_PAGE +struct pud_huge_patch_entry { + unsigned int addr; + unsigned int insn; +}; +extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end; +#endif void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 5961b2d8398a..8ee1f97589a1 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -17,6 +17,7 @@ #define HPAGE_SHIFT 23 #define REAL_HPAGE_SHIFT 22 +#define HPAGE_16GB_SHIFT 34 #define HPAGE_2GB_SHIFT 31 #define HPAGE_256MB_SHIFT 28 #define HPAGE_64K_SHIFT 16 @@ -28,7 +29,7 @@ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) -#define HUGE_MAX_HSTATE 4 +#define HUGE_MAX_HSTATE 5 #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 2579f5a22f46..4fefe3762083 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -414,6 +414,11 @@ static inline bool is_hugetlb_pmd(pmd_t pmd) return !!(pmd_val(pmd) & _PAGE_PMD_HUGE); } +static inline bool is_hugetlb_pud(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_PUD_HUGE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline pmd_t pmd_mkhuge(pmd_t pmd) { diff --git a/arch/sparc/include/asm/tsb.h b/arch/sparc/include/asm/tsb.h index 32258e08da03..acf55063aa3d 100644 --- a/arch/sparc/include/asm/tsb.h +++ b/arch/sparc/include/asm/tsb.h @@ -195,6 +195,41 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; nop; \ 699: + /* PUD has been loaded into REG1, interpret the value, seeing + * if it is a HUGE PUD or a normal one. If it is not valid + * then jump to FAIL_LABEL. If it is a HUGE PUD, and it + * translates to a valid PTE, branch to PTE_LABEL. + * + * We have to propagate bits [32:22] from the virtual address + * to resolve at 4M granularity. + */ +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ +700: ba 700f; \ + nop; \ + .section .pud_huge_patch, "ax"; \ + .word 700b; \ + nop; \ + .previous; \ + brz,pn REG1, FAIL_LABEL; \ + sethi %uhi(_PAGE_PUD_HUGE), REG2; \ + sllx REG2, 32, REG2; \ + andcc REG1, REG2, %g0; \ + be,pt %xcc, 700f; \ + sethi %hi(0x1ffc0000), REG2; \ + sllx REG2, 1, REG2; \ + brgez,pn REG1, FAIL_LABEL; \ + andn REG1, REG2, REG1; \ + and VADDR, REG2, REG2; \ + brlz,pt REG1, PTE_LABEL; \ + or REG1, REG2, REG1; \ +700: +#else +#define USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, PTE_LABEL) \ + brz,pn REG1, FAIL_LABEL; \ + nop; +#endif + /* PMD has been loaded into REG1, interpret the value, seeing * if it is a HUGE PMD or a normal one. If it is not valid * then jump to FAIL_LABEL. If it is a HUGE PMD, and it @@ -242,6 +277,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; srlx REG2, 64 - PAGE_SHIFT, REG2; \ andn REG2, 0x7, REG2; \ ldxa [REG1 + REG2] ASI_PHYS_USE_EC, REG1; \ + USER_PGTABLE_CHECK_PUD_HUGE(VADDR, REG1, REG2, FAIL_LABEL, 800f) \ brz,pn REG1, FAIL_LABEL; \ sllx VADDR, 64 - (PMD_SHIFT + PMD_BITS), REG2; \ srlx REG2, 64 - PAGE_SHIFT, REG2; \ diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index bf9a5acc8432..4de9fbd1a177 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -893,7 +893,6 @@ sparc64_boot_end: #include "misctrap.S" #include "syscalls.S" #include "helpers.S" -#include "hvcalls.S" #include "sun4v_tlb_miss.S" #include "sun4v_ivec.S" #include "ktlb.S" @@ -938,6 +937,7 @@ swapper_4m_tsb: ! 0x0000000000428000 +#include "hvcalls.S" #include "systbls_64.S" .data diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db872dbfafe9..f74115364b1e 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -117,7 +117,7 @@ tsb_miss_page_table_walk_sun4v_fastpath: /* Valid PTE is now in %g5. */ #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - sethi %uhi(_PAGE_PMD_HUGE), %g7 + sethi %uhi(_PAGE_PMD_HUGE | _PAGE_PUD_HUGE), %g7 sllx %g7, 32, %g7 andcc %g5, %g7, %g0 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 03b3d65d1266..34d37e6c2d06 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -154,6 +154,11 @@ SECTIONS *(.get_tick_patch) __get_tick_patch_end = .; } + .pud_huge_patch : { + __pud_huge_patch = .; + *(.pud_huge_patch) + __pud_huge_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 28ee8d8ffa07..7acb84db952c 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift) pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V; switch (shift) { + case HPAGE_16GB_SHIFT: + hugepage_size = _PAGE_SZ16GB_4V; + pte_val(entry) |= _PAGE_PUD_HUGE; + break; case HPAGE_2GB_SHIFT: hugepage_size = _PAGE_SZ2GB_4V; pte_val(entry) |= _PAGE_PMD_HUGE; @@ -187,6 +191,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry) unsigned int shift; switch (tte_szbits) { + case _PAGE_SZ16GB_4V: + shift = HPAGE_16GB_SHIFT; + break; case _PAGE_SZ2GB_4V: shift = HPAGE_2GB_SHIFT; break; @@ -263,7 +270,12 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); - if (pud) { + if (!pud) + return NULL; + + if (sz >= PUD_SIZE) + pte = (pte_t *)pud; + else { pmd = pmd_alloc(mm, pud, addr); if (!pmd) return NULL; @@ -289,12 +301,16 @@ pte_t *huge_pte_offset(struct mm_struct *mm, if (!pgd_none(*pgd)) { pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); + if (is_hugetlb_pud(*pud)) + pte = (pte_t *)pud; + else { + pmd = pmd_offset(pud, addr); + if (!pmd_none(*pmd)) { + if (is_hugetlb_pmd(*pmd)) + pte = (pte_t *)pmd; + else + pte = pte_offset_map(pmd, addr); + } } } } @@ -305,12 +321,20 @@ pte_t *huge_pte_offset(struct mm_struct *mm, void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t entry) { - unsigned int i, nptes, orig_shift, shift; - unsigned long size; + unsigned int nptes, orig_shift, shift; + unsigned long i, size; pte_t orig; size = huge_tte_to_size(entry); - shift = size >= HPAGE_SIZE ? PMD_SHIFT : PAGE_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; + else + shift = PAGE_SHIFT; + nptes = size >> shift; if (!pte_present(*ptep) && pte_present(entry)) @@ -333,19 +357,23 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned int i, nptes, hugepage_shift; + unsigned int i, nptes, orig_shift, shift; unsigned long size; pte_t entry; entry = *ptep; size = huge_tte_to_size(entry); - if (size >= HPAGE_SIZE) - nptes = size >> PMD_SHIFT; + + shift = PAGE_SHIFT; + if (size >= PUD_SIZE) + shift = PUD_SHIFT; + else if (size >= PMD_SIZE) + shift = PMD_SHIFT; else - nptes = size >> PAGE_SHIFT; + shift = PAGE_SHIFT; - hugepage_shift = pte_none(entry) ? PAGE_SHIFT : - huge_tte_to_shift(entry); + nptes = size >> shift; + orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry); if (pte_present(entry)) mm->context.hugetlb_pte_count -= nptes; @@ -354,11 +382,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, for (i = 0; i < nptes; i++) ptep[i] = __pte(0UL); - maybe_tlb_batch_add(mm, addr, ptep, entry, 0, hugepage_shift); + maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift); /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */ if (size == HPAGE_SIZE) maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0, - hugepage_shift); + orig_shift); return entry; } @@ -371,7 +399,8 @@ int pmd_huge(pmd_t pmd) int pud_huge(pud_t pud) { - return 0; + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID; } static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, @@ -435,8 +464,11 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; - hugetlb_free_pmd_range(tlb, pud, addr, next, floor, - ceiling); + if (is_hugetlb_pud(*pud)) + pud_clear(pud); + else + hugetlb_free_pmd_range(tlb, pud, addr, next, floor, + ceiling); } while (pud++, addr = next, addr != end); start &= PGDIR_MASK; diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index afa0099f3748..b2ba410b26f4 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -348,6 +348,18 @@ static int __init hugetlbpage_init(void) arch_initcall(hugetlbpage_init); +static void __init pud_huge_patch(void) +{ + struct pud_huge_patch_entry *p; + unsigned long addr; + + p = &__pud_huge_patch; + addr = p->addr; + *(unsigned int *)addr = p->insn; + + __asm__ __volatile__("flush %0" : : "r" (addr)); +} + static int __init setup_hugepagesz(char *string) { unsigned long long hugepage_size; @@ -360,6 +372,11 @@ static int __init setup_hugepagesz(char *string) hugepage_shift = ilog2(hugepage_size); switch (hugepage_shift) { + case HPAGE_16GB_SHIFT: + hv_pgsz_mask = HV_PGSZ_MASK_16GB; + hv_pgsz_idx = HV_PGSZ_IDX_16GB; + pud_huge_patch(); + break; case HPAGE_2GB_SHIFT: hv_pgsz_mask = HV_PGSZ_MASK_2GB; hv_pgsz_idx = HV_PGSZ_IDX_2GB; @@ -400,6 +417,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * { struct mm_struct *mm; unsigned long flags; + bool is_huge_tsb; pte_t pte = *ptep; if (tlb_type != hypervisor) { @@ -417,15 +435,37 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t * spin_lock_irqsave(&mm->context.lock, flags); + is_huge_tsb = false; #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) - if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) && - is_hugetlb_pmd(__pmd(pte_val(pte)))) { - /* We are fabricating 8MB pages using 4MB real hw pages. */ - pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); - __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT, - address, pte_val(pte)); - } else + if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) { + unsigned long hugepage_size = PAGE_SIZE; + + if (is_vm_hugetlb_page(vma)) + hugepage_size = huge_page_size(hstate_vma(vma)); + + if (hugepage_size >= PUD_SIZE) { + unsigned long mask = 0x1ffc00000UL; + + /* Transfer bits [32:22] from address to resolve + * at 4M granularity. + */ + pte_val(pte) &= ~mask; + pte_val(pte) |= (address & mask); + } else if (hugepage_size >= PMD_SIZE) { + /* We are fabricating 8MB pages using 4MB + * real hw pages. + */ + pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT)); + } + + if (hugepage_size >= PMD_SIZE) { + __update_mmu_tsb_insert(mm, MM_TSB_HUGE, + REAL_HPAGE_SHIFT, address, pte_val(pte)); + is_huge_tsb = true; + } + } #endif + if (!is_huge_tsb) __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT, address, pte_val(pte)); -- cgit v1.2.3 From 4dbe87d5a70215af714b8ad44303ae7b333a2d20 Mon Sep 17 00:00:00 2001 From: Nitin Gupta Date: Fri, 11 Aug 2017 16:46:51 -0700 Subject: sparc64: Cleanup hugepage table walk functions Flatten out nested code structure in huge_pte_offset() and huge_pte_alloc(). Signed-off-by: Nitin Gupta Signed-off-by: David S. Miller --- arch/sparc/mm/hugetlbpage.c | 54 +++++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 7acb84db952c..bcd8cdbc377f 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -266,27 +266,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (!pud) return NULL; - if (sz >= PUD_SIZE) - pte = (pte_t *)pud; - else { - pmd = pmd_alloc(mm, pud, addr); - if (!pmd) - return NULL; - - if (sz >= PMD_SIZE) - pte = (pte_t *)pmd; - else - pte = pte_alloc_map(mm, pmd, addr); - } - - return pte; + return (pte_t *)pud; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return NULL; + if (sz >= PMD_SIZE) + return (pte_t *)pmd; + return pte_alloc_map(mm, pmd, addr); } pte_t *huge_pte_offset(struct mm_struct *mm, @@ -295,27 +287,21 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pgd_t *pgd; pud_t *pud; pmd_t *pmd; - pte_t *pte = NULL; pgd = pgd_offset(mm, addr); - if (!pgd_none(*pgd)) { - pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { - if (is_hugetlb_pud(*pud)) - pte = (pte_t *)pud; - else { - pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { - if (is_hugetlb_pmd(*pmd)) - pte = (pte_t *)pmd; - else - pte = pte_offset_map(pmd, addr); - } - } - } - } - - return pte; + if (pgd_none(*pgd)) + return NULL; + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) + return NULL; + if (is_hugetlb_pud(*pud)) + return (pte_t *)pud; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + return NULL; + if (is_hugetlb_pmd(*pmd)) + return (pte_t *)pmd; + return pte_offset_map(pmd, addr); } void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, -- cgit v1.2.3 From c6b4ee9eba97b64750d5e8be31c04b70592592b7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 26 Aug 2017 09:12:05 +0300 Subject: sparc64: vcc: Check for IS_ERR() instead of NULL The tty_alloc_driver() function never returns NULL, it returns error pointers on error. Fixes: ce808b746325 ("sparc64: vcc: TTY driver initialization and cleanup") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 5e608173b091..716729129e01 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -1086,9 +1086,9 @@ static int vcc_tty_init(void) pr_info("VCC: %s\n", version); vcc_tty_driver = tty_alloc_driver(VCC_MAX_PORTS, VCC_TTY_FLAGS); - if (!vcc_tty_driver) { + if (IS_ERR(vcc_tty_driver)) { pr_err("VCC: TTY driver alloc failed\n"); - return -ENOMEM; + return PTR_ERR(vcc_tty_driver); } vcc_tty_driver->driver_name = vcc_driver_name; -- cgit v1.2.3 From 03949b1cb9f1aa34379ad34d388668e40e79fb9b Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 26 Aug 2017 16:58:34 +0530 Subject: sparc: leon: grpci2: constify of_device_id of_device_id are not supposed to change at runtime. All functions working with of_device_id provided by work with const of_device_ids. So mark the const and __initconst. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- arch/sparc/kernel/leon_pci_grpci2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index f727c4de1316..ff0e5c90310f 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -886,7 +886,7 @@ err1: return err; } -static struct of_device_id grpci2_of_match[] = { +static const struct of_device_id grpci2_of_match[] __initconst = { { .name = "GAISLER_GRPCI2", }, -- cgit v1.2.3 From 4154bb821f0bc63eab4de67f59058c5c2a718862 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 26 Aug 2017 16:58:48 +0530 Subject: sparc: leon: grpci1: constify of_device_id of_device_id are not supposed to change at runtime. All functions working with of_device_id provided by work with const of_device_ids. So mark the const and __initconst. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- arch/sparc/kernel/leon_pci_grpci1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 1e77128a8f88..83ba5005d44c 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -695,7 +695,7 @@ err1: return err; } -static struct of_device_id grpci1_of_match[] = { +static const struct of_device_id grpci1_of_match[] __initconst = { { .name = "GAISLER_PCIFBRG", }, -- cgit v1.2.3 From 5bd0ea9107dca975dc4ba4d9de39b4938d2cb36d Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 28 Aug 2017 23:58:44 +0530 Subject: sparc64: vcc: make ktermios const Make this const as it is not modified anywhere. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/tty/vcc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c index 716729129e01..ef01d24858cd 100644 --- a/drivers/tty/vcc.c +++ b/drivers/tty/vcc.c @@ -102,7 +102,7 @@ module_param(vcc_dbg_vio, uint, 0664); * add anything that will cause echoing or we'll go into recursive * loop echoing chars back and forth with the console drivers. */ -static struct ktermios vcc_tty_termios = { +static const struct ktermios vcc_tty_termios = { .c_iflag = IGNBRK | IGNPAR, .c_oflag = OPOST, .c_cflag = B38400 | CS8 | CREAD | HUPCL, -- cgit v1.2.3 From a7159a87a3836f61a97882e671d2d66bbb96c62e Mon Sep 17 00:00:00 2001 From: Anthony Yznaga Date: Fri, 18 Aug 2017 12:40:36 -0700 Subject: sparc64: speed up etrap/rtrap on NG2 and later processors For many sun4v processor types, reading or writing a privileged register has a latency of 40 to 70 cycles. Use a combination of the low-latency allclean, otherw, normalw, and nop instructions in etrap and rtrap to replace 2 rdpr and 5 wrpr instructions and improve etrap/rtrap performance. allclean, otherw, and normalw are available on NG2 and later processors. The average ticks to execute the flush windows trap ("ta 0x3") with and without this patch on select platforms: CPU Not patched Patched % Latency Reduction NG2 1762 1558 -11.58 NG4 3619 3204 -11.47 M7 3015 2624 -12.97 SPARC64-X 829 770 -7.12 Signed-off-by: Anthony Yznaga Signed-off-by: David S. Miller --- arch/sparc/include/asm/trap_block.h | 2 ++ arch/sparc/kernel/etrap_64.S | 26 ++++++++++++++++++++++---- arch/sparc/kernel/rtrap_64.S | 13 +++++++++++-- arch/sparc/kernel/setup_64.c | 5 +++++ arch/sparc/kernel/vmlinux.lds.S | 5 +++++ 5 files changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index ff05992dae7a..dfc538609eb2 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -73,6 +73,8 @@ struct sun4v_1insn_patch_entry { }; extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, __sun4v_1insn_patch_end; +extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch, + __fast_win_ctrl_1insn_patch_end; struct sun4v_2insn_patch_entry { unsigned int addr; diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 1276ca2567ba..5c237467d156 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -38,7 +38,11 @@ etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1) or %g1, %g3, %g1 bne,pn %xcc, 1f sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2 - wrpr %g0, 7, %cleanwin +661: wrpr %g0, 7, %cleanwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x85880000 ! allclean + .previous sethi %hi(TASK_REGOFF), %g2 sethi %hi(TSTATE_PEF), %g3 @@ -88,16 +92,30 @@ etrap_save: save %g2, -STACK_BIAS, %sp bne,pn %xcc, 3f mov PRIMARY_CONTEXT, %l4 - rdpr %canrestore, %g3 +661: rdpr %canrestore, %g3 + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous + rdpr %wstate, %g2 - wrpr %g0, 0, %canrestore +661: wrpr %g0, 0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous sll %g2, 3, %g2 /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ mov 1, %l5 sth %l5, [%l6 + TI_SYS_NOERROR] - wrpr %g3, 0, %otherwin +661: wrpr %g3, 0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x87880000 ! otherw + .previous + wrpr %g2, 0, %wstate sethi %hi(sparc64_kern_pri_context), %g2 ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3 diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 709a82ebd294..dff86fad0a1f 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -224,10 +224,19 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 rdpr %otherwin, %l2 srl %l1, 3, %l1 - wrpr %l2, %g0, %canrestore +661: wrpr %l2, %g0, %canrestore + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + .word 0x89880000 ! normalw + .previous + wrpr %l1, %g0, %wstate brnz,pt %l2, user_rtt_restore - wrpr %g0, %g0, %otherwin +661: wrpr %g0, %g0, %otherwin + .section .fast_win_ctrl_1insn_patch, "ax" + .word 661b + nop + .previous ldx [%g6 + TI_FLAGS], %g3 wr %g0, ASI_AIUP, %asi diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index c4088a3b1051..db4c4d7e28a0 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -300,6 +300,11 @@ static void __init sun4v_patch(void) break; } + if (sun4v_chip_type != SUN4V_CHIP_NIAGARA1) { + sun4v_patch_1insn_range(&__fast_win_ctrl_1insn_patch, + &__fast_win_ctrl_1insn_patch_end); + } + sun4v_hvapi_init(); } diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 34d37e6c2d06..d78847d56a4b 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -159,6 +159,11 @@ SECTIONS *(.pud_huge_patch) __pud_huge_patch_end = .; } + .fast_win_ctrl_1insn_patch : { + __fast_win_ctrl_1insn_patch = .; + *(.fast_win_ctrl_1insn_patch) + __fast_win_ctrl_1insn_patch_end = .; + } PERCPU_SECTION(SMP_CACHE_BYTES) #ifdef CONFIG_JUMP_LABEL -- cgit v1.2.3 From b6fe1089667a7afcc2cf92cdaec590c7b8381715 Mon Sep 17 00:00:00 2001 From: Rob Gardner Date: Fri, 8 Sep 2017 16:34:21 -0600 Subject: sparc64: Handle additional cases of no fault loads Load instructions using ASI_PNF or other no-fault ASIs should not cause a SIGSEGV or SIGBUS. A garden variety unmapped address follows the TSB miss path, and when no valid mapping is found in the process page tables, the miss handler checks to see if the access was via a no-fault ASI. It then fixes up the target register with a zero, and skips the no-fault load instruction. But different paths are taken for data access exceptions and alignment traps, and these do not respect the no-fault ASI. We add checks in these paths for the no-fault ASI, and fix up the target register and TPC just like in the TSB miss case. Signed-off-by: Rob Gardner Acked-by: Sam Ravnborg Signed-off-by: David S. Miller --- arch/sparc/kernel/traps_64.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index ad31af1dd726..c74f2dffcc13 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -265,6 +265,45 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u sun4v_insn_access_exception(regs, addr, type_ctx); } +bool is_no_fault_exception(struct pt_regs *regs) +{ + unsigned char asi; + u32 insn; + + if (get_user(insn, (u32 __user *)regs->tpc) == -EFAULT) + return false; + + /* + * Must do a little instruction decoding here in order to + * decide on a course of action. The bits of interest are: + * insn[31:30] = op, where 3 indicates the load/store group + * insn[24:19] = op3, which identifies individual opcodes + * insn[13] indicates an immediate offset + * op3[4]=1 identifies alternate space instructions + * op3[5:4]=3 identifies floating point instructions + * op3[2]=1 identifies stores + * See "Opcode Maps" in the appendix of any Sparc V9 + * architecture spec for full details. + */ + if ((insn & 0xc0800000) == 0xc0800000) { /* op=3, op3[4]=1 */ + if (insn & 0x2000) /* immediate offset */ + asi = (regs->tstate >> 24); /* saved %asi */ + else + asi = (insn >> 5); /* immediate asi */ + if ((asi & 0xf2) == ASI_PNF) { + if (insn & 0x1000000) { /* op3[5:4]=3 */ + handle_ldf_stq(insn, regs); + return true; + } else if (insn & 0x200000) { /* op3[2], stores */ + return false; + } + handle_ld_nf(insn, regs); + return true; + } + } + return false; +} + void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { enum ctx_state prev_state = exception_enter(); @@ -296,6 +335,9 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un die_if_kernel("Dax", regs); } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -352,6 +394,9 @@ void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsig regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; @@ -2575,6 +2620,9 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); goto out; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; @@ -2597,6 +2645,9 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); return; } + if (is_no_fault_exception(regs)) + return; + info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; -- cgit v1.2.3