summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/cache.c419
-rw-r--r--arch/parisc/kernel/drivers.c4
-rw-r--r--arch/parisc/kernel/entry.S6
-rw-r--r--arch/parisc/kernel/ftrace.c7
-rw-r--r--arch/parisc/kernel/jump_label.c2
-rw-r--r--arch/parisc/kernel/kgdb.c2
-rw-r--r--arch/parisc/kernel/kprobes.c2
-rw-r--r--arch/parisc/kernel/module.c12
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c5
-rw-r--r--arch/parisc/kernel/patch.c2
-rw-r--r--arch/parisc/kernel/pdt.c2
-rw-r--r--arch/parisc/kernel/perf.c7
-rw-r--r--arch/parisc/kernel/smp.c4
-rw-r--r--arch/parisc/kernel/sys_parisc.c11
-rw-r--r--arch/parisc/kernel/sys_parisc32.c9
-rw-r--r--arch/parisc/kernel/syscall.S14
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl12
-rw-r--r--arch/parisc/kernel/time.c261
-rw-r--r--arch/parisc/kernel/traps.c6
-rw-r--r--arch/parisc/kernel/unaligned.c5
-rw-r--r--arch/parisc/kernel/unaligned.h3
-rw-r--r--arch/parisc/kernel/vdso32/Makefile27
-rw-r--r--arch/parisc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/parisc/kernel/vdso32/vdso32_generic.c32
-rw-r--r--arch/parisc/kernel/vdso64/Makefile29
-rw-r--r--arch/parisc/kernel/vdso64/vdso64.lds.S2
-rw-r--r--arch/parisc/kernel/vdso64/vdso64_generic.c24
27 files changed, 525 insertions, 387 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 422f3e1e6d9c..db531e58d70e 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
+#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
+
+/*
+ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
+ * of page flushes done flush_cache_page_if_present. There are some
+ * pros and cons in using this option. It may increase the risk of
+ * random segmentation faults.
+ */
+#define CONFIG_FLUSH_PAGE_ACCESSED 0
+
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
+/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
-/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
+static void flush_kernel_dcache_page_addr(const void *addr);
+
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
+
+ /*
+ * The TLB is the engine of coherence on parisc. The CPU is
+ * entitled to speculate any page with a TLB mapping, so here
+ * we kill the mapping then flush the page along a special flush
+ * only alias mapping. This guarantees that the page is no-longer
+ * in the cache for any process and nor may it be speculatively
+ * read in (until the user or kernel specifically accesses it,
+ * of course).
+ */
+ flush_tlb_page(vma, vmaddr);
+
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+static void flush_kernel_dcache_page_addr(const void *addr)
{
- unsigned long flags, space, pgd, prot;
-#ifdef CONFIG_TLB_PTLOCK
- unsigned long pgd_lock;
-#endif
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- vmaddr &= PAGE_MASK;
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
+ flush_dcache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+}
- /* Set context for flush */
- local_irq_save(flags);
- prot = mfctl(8);
- space = mfsp(SR_USER);
- pgd = mfctl(25);
-#ifdef CONFIG_TLB_PTLOCK
- pgd_lock = mfctl(28);
-#endif
- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
- local_irq_restore(flags);
-
- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- flush_tlb_page(vma, vmaddr);
+static void flush_kernel_icache_page_addr(const void *addr)
+{
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- /* Restore previous context */
- local_irq_save(flags);
-#ifdef CONFIG_TLB_PTLOCK
- mtctl(pgd_lock, 28);
-#endif
- mtctl(pgd, 25);
- mtsp(space, SR_USER);
- mtctl(prot, 8);
- local_irq_restore(flags);
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent instruction cache move-in */
+ preempt_disable();
+ flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
+void kunmap_flush_on_unmap(const void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_flush_on_unmap);
+
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
- flush_kernel_icache_page(kaddr);
+ flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
+/*
+ * Walk page directory for MM to find PTEP pointer for address ADDR.
+ */
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
+/*
+ * Return user physical address. Returns 0 if page is not present.
+ */
+static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags, space, pgd, prot, pa;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ /* Save context */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+
+ /* Set context for lpa_user */
+ switch_mm_irqs_off(NULL, mm, NULL);
+ pa = lpa_user(addr);
+
+ /* Restore previous context */
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
+ return pa;
+}
+
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
- if (parisc_requires_coherency()) {
- for (i = 0; i < nr; i++) {
- pte_t *ptep = get_ptep(vma->vm_mm,
- addr + i * PAGE_SIZE);
- if (!ptep)
- continue;
- if (pte_needs_flush(*ptep))
- flush_user_cache_page(vma,
- addr + i * PAGE_SIZE);
- /* Optimise accesses to the same table? */
- pte_unmap(ptep);
- }
- } else {
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
- * The TLB is the engine of coherence on parisc:
- * The CPU is entitled to speculate any page
- * with a TLB mapping, so here we kill the
- * mapping then flush the page along a special
- * flush only alias mapping. This guarantees that
- * the page is no-longer in the cache for any
- * process and nor may it be speculatively read
- * in (until the user or kernel specifically
- * accesses it, of course)
+ * Software is allowed to have any number
+ * of private mappings to a page.
*/
- for (i = 0; i < nr; i++)
- flush_tlb_page(vma, addr + i * PAGE_SIZE);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- for (i = 0; i < nr; i++)
- __flush_cache_page(vma,
- addr + i * PAGE_SIZE,
- (pfn + i) * PAGE_SIZE);
- /*
- * Software is allowed to have any number
- * of private mappings to a page.
- */
- if (!(vma->vm_flags & VM_SHARED))
- continue;
- if (old_addr)
- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, vma->vm_file);
- if (nr == folio_nr_pages(folio))
- old_addr = addr;
- }
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
+ old_addr = addr;
}
WARN_ON(++count == 4096);
}
@@ -578,11 +611,7 @@ void __init parisc_setup_cache_timing(void)
threshold/1024);
set_tlb_threshold:
- if (threshold > FLUSH_TLB_THRESHOLD)
- parisc_tlb_flush_threshold = threshold;
- else
- parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
-
+ parisc_tlb_flush_threshold = max(threshold, FLUSH_TLB_THRESHOLD);
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
parisc_tlb_flush_threshold/1024);
}
@@ -591,35 +620,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
-void flush_kernel_dcache_page_addr(const void *addr)
-{
- unsigned long flags;
-
- flush_kernel_dcache_page_asm(addr);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-
static void flush_cache_page_if_present(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn)
+ unsigned long vmaddr)
{
+#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
- pte_t *ptep;
+ pte_t *ptep, pte;
- /*
- * The pte check is racy and sometimes the flush will trigger
- * a non-access TLB miss. Hopefully, the page has already been
- * flushed.
- */
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
+ pte = ptep_get(ptep);
+ needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
- flush_cache_page(vma, vmaddr, pfn);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
+#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physaddr = get_upa(mm, vmaddr);
+
+ if (physaddr)
+ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
+#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@@ -629,7 +651,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@@ -638,16 +660,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@@ -681,32 +704,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- pte_t *ptep;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- bool needs_flush = false;
- /*
- * The vma can contain pages that aren't present. Although
- * the pte search is expensive, we need the pte to find the
- * page pfn and to check whether the page should be flushed.
- */
- ptep = get_ptep(vma->vm_mm, addr);
- if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
- pfn = pte_pfn(*ptep);
- pte_unmap(ptep);
- }
- if (needs_flush) {
- if (parisc_requires_coherency()) {
- flush_user_cache_page(vma, addr);
- } else {
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@@ -757,21 +758,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
- flush_cache_all();
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_all();
+ else
+ flush_data_cache();
return;
}
- flush_cache_pages(vma, start, end);
+ flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- if (parisc_requires_coherency())
- flush_user_cache_page(vma, vmaddr);
- else
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -779,34 +778,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
- if (parisc_requires_coherency()) {
- if (vma->vm_flags & VM_SHARED)
- flush_data_cache();
- else
- flush_user_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
+#endif
+ return 1;
+}
+
+/*
+ * After a PTE is cleared, we have no way to flush the cache for
+ * the physical page. On PA8800 and PA8900 processors, these lines
+ * can cause random cache corruption. Thus, we must flush the cache
+ * as well as the TLB when clearing a PTE that's valid.
+ */
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ else if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, addr);
+
+ return pte;
+}
+
+/*
+ * The physical address for pages in the ioremap case can be obtained
+ * from the vm_struct struct. I wasn't able to successfully handle the
+ * vmalloc and vmap cases. We have an array of struct page pointers in
+ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
+ */
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long addr, physaddr;
+ struct vm_struct *vm;
+
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+
+ if (end - start >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
+ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
+ flush_cache_all();
+ return;
+ }
+
+ vm = find_vm_area((void *)start);
+ if (WARN_ON_ONCE(!vm)) {
+ flush_cache_all();
+ return;
+ }
+
+ /* The physical addresses of IOREMAP regions are contiguous */
+ if (vm->flags & VM_IOREMAP) {
+ physaddr = vm->phys_addr;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, start);
+ flush_icache_page_asm(physaddr, start);
+ preempt_enable();
+ physaddr += PAGE_SIZE;
+ }
+ return;
+ }
+
+ flush_cache_all();
}
+EXPORT_SYMBOL(flush_cache_vmap);
+/*
+ * The vm_struct has been retired and the page table is set up. The
+ * last page in the range is a guard page. Its physical address can't
+ * be determined using lpa, so there is no way to flush the range
+ * using flush_dcache_page_asm.
+ */
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+ flush_data_cache();
+}
+EXPORT_SYMBOL(flush_cache_vunmap);
+
+/*
+ * On systems with PA8800/PA8900 processors, there is no way to flush
+ * a vmap range other than using the architected loop to flush the
+ * entire cache. The page directory is not set up, so we can't use
+ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
+ * L2 is physically indexed but FDCE/FICE instructions in virtual
+ * mode output their virtual address on the core bus, not their
+ * real address. As a result, the L2 cache index formed from the
+ * virtual address will most likely not be the same as the L2 index
+ * formed from the real address.
+ */
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -818,15 +916,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index ac19d685e4a5..1e793f770f71 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -97,7 +97,7 @@ static int for_each_padev(int (*fn)(struct device *, void *), void * data)
* @driver: the PA-RISC driver to try
* @dev: the PA-RISC device to try
*/
-static int match_device(struct parisc_driver *driver, struct parisc_device *dev)
+static int match_device(const struct parisc_driver *driver, struct parisc_device *dev)
{
const struct parisc_device_id *ids;
@@ -548,7 +548,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
return dev;
}
-static int parisc_generic_match(struct device *dev, struct device_driver *drv)
+static int parisc_generic_match(struct device *dev, const struct device_driver *drv)
{
return match_device(to_parisc_driver(drv), to_parisc_device(dev));
}
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index ab23e61a6f01..ea57bcc21dc5 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -1051,8 +1051,7 @@ ENTRY_CFI(intr_save) /* for os_hpmc */
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
-#if 0 && defined(CONFIG_64BIT)
- /* Revisit when we have 64-bit code above 4Gb */
+#if defined(CONFIG_64BIT)
b,n intr_save2
skip_save_ior:
@@ -1060,8 +1059,7 @@ skip_save_ior:
* need to adjust iasq/iaoq here in the same way we adjusted isr/ior
* above.
*/
- extrd,u,* %r8,PSW_W_BIT,1,%r1
- cmpib,COND(=),n 1,%r1,intr_save2
+ bb,COND(>=),n %r8,PSW_W_BIT,intr_save2
LDREG PT_IASQ0(%r29), %r16
LDREG PT_IAOQ0(%r29), %r17
/* adjust iasq/iaoq */
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 621a4b386ae4..10fd5b3e63e7 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -20,7 +20,7 @@
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#define __hot __section(".text.hot")
@@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void)
int ftrace_disable_ftrace_graph_caller(void)
{
- static_key_enable(&ftrace_graph_enable.key);
+ static_key_disable(&ftrace_graph_enable.key);
return 0;
}
#endif
@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/parisc/kernel/jump_label.c b/arch/parisc/kernel/jump_label.c
index e253b134500d..ea51f15bf0e6 100644
--- a/arch/parisc/kernel/jump_label.c
+++ b/arch/parisc/kernel/jump_label.c
@@ -8,7 +8,7 @@
#include <linux/jump_label.h>
#include <linux/bug.h>
#include <asm/alternative.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
static inline int reassemble_17(int as17)
{
diff --git a/arch/parisc/kernel/kgdb.c b/arch/parisc/kernel/kgdb.c
index b16fa9bac5f4..fee81f877525 100644
--- a/arch/parisc/kernel/kgdb.c
+++ b/arch/parisc/kernel/kgdb.c
@@ -16,7 +16,7 @@
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/processor.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#include <asm/cacheflush.h>
const struct kgdb_arch arch_kgdb_ops = {
diff --git a/arch/parisc/kernel/kprobes.c b/arch/parisc/kernel/kprobes.c
index 6e0b86652f30..9255adba67a3 100644
--- a/arch/parisc/kernel/kprobes.c
+++ b/arch/parisc/kernel/kprobes.c
@@ -12,7 +12,7 @@
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index d214bbe3c2af..4e5d991b2b65 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -41,7 +41,6 @@
#include <linux/moduleloader.h>
#include <linux/elf.h>
-#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/string.h>
@@ -173,17 +172,6 @@ static inline int reassemble_22(int as22)
((as22 & 0x0003ff) << 3));
}
-void *module_alloc(unsigned long size)
-{
- /* using RWX means less protection for modules, but it's
- * easier than trying to map the text, data, init_text and
- * init_data correctly */
- return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL,
- PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
#ifndef CONFIG_64BIT
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
{
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6f0c92e8149d..509146a52725 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
+EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
@@ -41,9 +43,6 @@ EXPORT_SYMBOL($global$);
#endif
#include <asm/io.h>
-EXPORT_SYMBOL(memcpy_toio);
-EXPORT_SYMBOL(memcpy_fromio);
-EXPORT_SYMBOL(memset_io);
extern void $$divI(void);
extern void $$divU(void);
diff --git a/arch/parisc/kernel/patch.c b/arch/parisc/kernel/patch.c
index e59574f65e64..35dd764b871e 100644
--- a/arch/parisc/kernel/patch.c
+++ b/arch/parisc/kernel/patch.c
@@ -13,7 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
struct patch {
void *addr;
diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c
index 0f9b3b5914cf..b70b67adb855 100644
--- a/arch/parisc/kernel/pdt.c
+++ b/arch/parisc/kernel/pdt.c
@@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
#define PDT_ADDR_SINGLE_ERR 1UL
+#ifdef CONFIG_PROC_FS
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
@@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "PDT_cur_entries: %7lu\n",
pdt_status.pdt_entries);
}
+#endif
static int get_info_pat_new(void)
{
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
index b0f0816879df..5e10f98ce7b5 100644
--- a/arch/parisc/kernel/perf.c
+++ b/arch/parisc/kernel/perf.c
@@ -466,7 +466,6 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
static const struct file_operations perf_fops = {
- .llseek = no_llseek,
.read = perf_read,
.write = perf_write,
.unlocked_ioctl = perf_ioctl,
@@ -476,9 +475,9 @@ static const struct file_operations perf_fops = {
};
static struct miscdevice perf_dev = {
- MISC_DYNAMIC_MINOR,
- PA_PERF_DEV,
- &perf_fops
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = PA_PERF_DEV,
+ .fops = &perf_fops,
};
/*
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 444154271f23..b2d12ab728b1 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -297,7 +297,7 @@ smp_cpu_init(int cpunum)
enter_lazy_tlb(&init_mm, current);
init_IRQ(); /* make sure no IRQs are enabled or pending */
- start_cpu_itimer();
+ parisc_clockevent_init();
}
@@ -344,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
struct irq_desc *desc = irq_to_desc(i);
if (desc && desc->kstat_irqs)
- *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+ *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { };
}
#endif
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 98af719d5f85..f852fe274abe 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -104,7 +104,9 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
struct vm_area_struct *vma, *prev;
unsigned long filp_pgoff;
int do_color_align;
- struct vm_unmapped_area_info info;
+ struct vm_unmapped_area_info info = {
+ .length = len
+ };
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
@@ -139,7 +141,6 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
return addr;
}
- info.length = len;
info.align_mask = do_color_align ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(filp_pgoff, pgoff);
@@ -160,14 +161,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
*/
}
- info.flags = 0;
info.low_limit = mm->mmap_base;
info.high_limit = mmap_upper_limit(NULL);
return vm_unmapped_area(&info);
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
+ unsigned long len, unsigned long pgoff, unsigned long flags,
+ vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, UP);
@@ -175,7 +176,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
- unsigned long flags)
+ unsigned long flags, vm_flags_t vm_flags)
{
return arch_get_unmapped_area_common(filp,
addr, len, pgoff, flags, DOWN);
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index 2a12a547b447..826c8e51b585 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -23,12 +23,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
current->comm, current->pid, r20);
return -ENOSYS;
}
-
-asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
- compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
- const char __user * pathname)
-{
- return sys_fanotify_mark(fanotify_fd, flags,
- ((__u64)mask1 << 32) | mask0,
- dfd, pathname);
-}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 1f51aa9c8230..0fa81bf1466b 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -243,10 +243,10 @@ linux_gateway_entry:
#ifdef CONFIG_64BIT
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -379,10 +379,10 @@ tracesys_next:
extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */
ldil L%sys_call_table, %r1
- or,= %r2,%r2,%r2
- addil L%(sys_call_table64-sys_call_table), %r1
+ or,ev %r2,%r2,%r2
+ ldil L%sys_call_table64, %r1
ldo R%sys_call_table(%r1), %r19
- or,= %r2,%r2,%r2
+ or,ev %r2,%r2,%r2
ldo R%sys_call_table64(%r1), %r19
#else
load32 sys_call_table, %r19
@@ -1327,6 +1327,8 @@ ENTRY(sys_call_table)
END(sys_call_table)
#ifdef CONFIG_64BIT
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.align 8
ENTRY(sys_call_table64)
#include <asm/syscall_table_64.h> /* 64-bit syscalls */
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index b236a84c4e12..94df3cb957e9 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -108,7 +108,7 @@
95 common fchown sys_fchown
96 common getpriority sys_getpriority
97 common setpriority sys_setpriority
-98 common recv sys_recv
+98 common recv sys_recv compat_sys_recv
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 common stat64 sys_stat64
@@ -135,7 +135,7 @@
120 common clone sys_clone_wrapper
121 common setdomainname sys_setdomainname
122 common sendfile sys_sendfile compat_sys_sendfile
-123 common recvfrom sys_recvfrom
+123 common recvfrom sys_recvfrom compat_sys_recvfrom
124 32 adjtimex sys_adjtimex_time32
124 64 adjtimex sys_adjtimex
125 common mprotect sys_mprotect
@@ -364,7 +364,7 @@
320 common accept4 sys_accept4
321 common prlimit64 sys_prlimit64
322 common fanotify_init sys_fanotify_init
-323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+323 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
324 32 clock_adjtime sys_clock_adjtime32
324 64 clock_adjtime sys_clock_adjtime
325 common name_to_handle_at sys_name_to_handle_at
@@ -460,3 +460,9 @@
459 common lsm_get_self_attr sys_lsm_get_self_attr
460 common lsm_set_self_attr sys_lsm_set_self_attr
461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 9714fbd7c42d..c17e2249115f 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -1,166 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/arch/parisc/kernel/time.c
+ * Common time service routines for parisc machines.
+ * based on arch/loongarch/kernel/time.c
*
- * Copyright (C) 1991, 1992, 1995 Linus Torvalds
- * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
- * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
- *
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * Copyright (C) 2024 Helge Deller <deller@gmx.de>
*/
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/rtc.h>
-#include <linux/sched.h>
-#include <linux/sched/clock.h>
-#include <linux/sched_clock.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/profile.h>
-#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+#include <linux/rtc.h>
#include <linux/platform_device.h>
-#include <linux/ftrace.h>
+#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/param.h>
-#include <asm/pdc.h>
-#include <asm/led.h>
+static u64 cr16_clock_freq;
+static unsigned long clocktick;
-#include <linux/timex.h>
+int time_keeper_id; /* CPU used for timekeeping */
-int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
+static DEFINE_PER_CPU(struct clock_event_device, parisc_clockevent_device);
-static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
+static void parisc_event_handler(struct clock_event_device *dev)
+{
+}
-/*
- * We keep time on PA-RISC Linux by using the Interval Timer which is
- * a pair of registers; one is read-only and one is write-only; both
- * accessed through CR16. The read-only register is 32 or 64 bits wide,
- * and increments by 1 every CPU clock tick. The architecture only
- * guarantees us a rate between 0.5 and 2, but all implementations use a
- * rate of 1. The write-only register is 32-bits wide. When the lowest
- * 32 bits of the read-only register compare equal to the write-only
- * register, it raises a maskable external interrupt. Each processor has
- * an Interval Timer of its own and they are not synchronised.
- *
- * We want to generate an interrupt every 1/HZ seconds. So we program
- * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
- * is programmed with the intended time of the next tick. We can be
- * held off for an arbitrarily long period of time by interrupts being
- * disabled, so we may miss one or more ticks.
- */
-irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
+static int parisc_timer_next_event(unsigned long delta, struct clock_event_device *evt)
{
- unsigned long now;
- unsigned long next_tick;
- unsigned long ticks_elapsed = 0;
- unsigned int cpu = smp_processor_id();
- struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
-
- /* gcc can optimize for "read-only" case with a local clocktick */
- unsigned long cpt = clocktick;
-
- /* Initialize next_tick to the old expected tick time. */
- next_tick = cpuinfo->it_value;
-
- /* Calculate how many ticks have elapsed. */
- now = mfctl(16);
- do {
- ++ticks_elapsed;
- next_tick += cpt;
- } while (next_tick - now > cpt);
-
- /* Store (in CR16 cycles) up to when we are accounting right now. */
- cpuinfo->it_value = next_tick;
-
- /* Go do system house keeping. */
- if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
- ticks_elapsed = 0;
- legacy_timer_tick(ticks_elapsed);
-
- /* Skip clockticks on purpose if we know we would miss those.
- * The new CR16 must be "later" than current CR16 otherwise
- * itimer would not fire until CR16 wrapped - e.g 4 seconds
- * later on a 1Ghz processor. We'll account for the missed
- * ticks on the next timer interrupt.
- * We want IT to fire modulo clocktick even if we miss/skip some.
- * But those interrupts don't in fact get delivered that regularly.
- *
- * "next_tick - now" will always give the difference regardless
- * if one or the other wrapped. If "now" is "bigger" we'll end up
- * with a very large unsigned number.
- */
- now = mfctl(16);
- while (next_tick - now > cpt)
- next_tick += cpt;
-
- /* Program the IT when to deliver the next interrupt.
- * Only bottom 32-bits of next_tick are writable in CR16!
- * Timer interrupt will be delivered at least a few hundred cycles
- * after the IT fires, so if we are too close (<= 8000 cycles) to the
- * next cycle, simply skip it.
- */
- if (next_tick - now <= 8000)
- next_tick += cpt;
- mtctl(next_tick, 16);
+ unsigned long new_cr16;
- return IRQ_HANDLED;
-}
+ new_cr16 = mfctl(16) + delta;
+ mtctl(new_cr16, 16);
+ return 0;
+}
-unsigned long profile_pc(struct pt_regs *regs)
+irqreturn_t timer_interrupt(int irq, void *data)
{
- unsigned long pc = instruction_pointer(regs);
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
- if (regs->gr[0] & PSW_N)
- pc -= 4;
+ cd = &per_cpu(parisc_clockevent_device, cpu);
-#ifdef CONFIG_SMP
- if (in_lock_functions(pc))
- pc = regs->gr[2];
-#endif
+ if (clockevent_state_periodic(cd))
+ parisc_timer_next_event(clocktick, cd);
- return pc;
+ if (clockevent_state_periodic(cd) || clockevent_state_oneshot(cd))
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
}
-EXPORT_SYMBOL(profile_pc);
+static int parisc_set_state_oneshot(struct clock_event_device *evt)
+{
+ parisc_timer_next_event(clocktick, evt);
-/* clock source code */
+ return 0;
+}
-static u64 notrace read_cr16(struct clocksource *cs)
+static int parisc_set_state_periodic(struct clock_event_device *evt)
{
- return get_cycles();
+ parisc_timer_next_event(clocktick, evt);
+
+ return 0;
}
-static struct clocksource clocksource_cr16 = {
- .name = "cr16",
- .rating = 300,
- .read = read_cr16,
- .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
+static int parisc_set_state_shutdown(struct clock_event_device *evt)
+{
+ return 0;
+}
-void start_cpu_itimer(void)
+void parisc_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
- unsigned long next_tick = mfctl(16) + clocktick;
+ unsigned long min_delta = 0x600; /* XXX */
+ unsigned long max_delta = (1UL << (BITS_PER_LONG - 1));
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(parisc_clockevent_device, cpu);
+
+ cd->name = "cr16_clockevent";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = TIMER_IRQ;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = parisc_set_state_oneshot;
+ cd->set_state_oneshot_stopped = parisc_set_state_shutdown;
+ cd->set_state_periodic = parisc_set_state_periodic;
+ cd->set_state_shutdown = parisc_set_state_shutdown;
+ cd->set_next_event = parisc_timer_next_event;
+ cd->event_handler = parisc_event_handler;
+
+ clockevents_config_and_register(cd, cr16_clock_freq, min_delta, max_delta);
+}
+
+unsigned long notrace profile_pc(struct pt_regs *regs)
+{
+ unsigned long pc = instruction_pointer(regs);
- mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+ if (regs->gr[0] & PSW_N)
+ pc -= 4;
+
+#ifdef CONFIG_SMP
+ if (in_lock_functions(pc))
+ pc = regs->gr[2];
+#endif
- per_cpu(cpu_data, cpu).it_value = next_tick;
+ return pc;
}
+EXPORT_SYMBOL(profile_pc);
#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
@@ -224,12 +177,27 @@ void read_persistent_clock64(struct timespec64 *ts)
}
}
-
static u64 notrace read_cr16_sched_clock(void)
{
return get_cycles();
}
+static u64 notrace read_cr16(struct clocksource *cs)
+{
+ return get_cycles();
+}
+
+static struct clocksource clocksource_cr16 = {
+ .name = "cr16",
+ .rating = 300,
+ .read = read_cr16,
+ .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS |
+ CLOCK_SOURCE_VALID_FOR_HRES |
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
+};
+
/*
* timer interrupt and sched_clock() initialization
@@ -237,33 +205,14 @@ static u64 notrace read_cr16_sched_clock(void)
void __init time_init(void)
{
- unsigned long cr16_hz;
-
- clocktick = (100 * PAGE0->mem_10msec) / HZ;
- start_cpu_itimer(); /* get CPU 0 started */
-
- cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
+ cr16_clock_freq = 100 * PAGE0->mem_10msec; /* Hz */
+ clocktick = cr16_clock_freq / HZ;
/* register as sched_clock source */
- sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
-}
+ sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_clock_freq);
-static int __init init_cr16_clocksource(void)
-{
- /*
- * The cr16 interval timers are not synchronized across CPUs.
- */
- if (num_online_cpus() > 1 && !running_on_qemu) {
- clocksource_cr16.name = "cr16_unstable";
- clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
- clocksource_cr16.rating = 0;
- }
+ parisc_clockevent_init();
/* register at clocksource framework */
- clocksource_register_hz(&clocksource_cr16,
- 100 * PAGE0->mem_10msec);
-
- return 0;
+ clocksource_register_hz(&clocksource_cr16, cr16_clock_freq);
}
-
-device_initcall(init_cr16_clocksource);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 1107ca819ac8..b9b3d527bc90 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -36,7 +36,7 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/traps.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/pdc.h>
@@ -47,6 +47,8 @@
#include <linux/kgdb.h>
#include <linux/kprobes.h>
+#include "unaligned.h"
+
#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
#include <asm/spinlock.h>
#endif
@@ -504,7 +506,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
if (((unsigned long)regs->iaoq[0] & 3) &&
((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
/* Kill the user process later */
- regs->iaoq[0] = 0 | 3;
+ regs->iaoq[0] = 0 | PRIV_USER;
regs->iaoq[1] = regs->iaoq[0] + 4;
regs->iasq[0] = regs->iasq[1] = regs->sr[7];
regs->gr[0] &= ~PSW_B;
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 71e596ca5a86..f4626943633a 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -12,9 +12,10 @@
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/sysctl.h>
-#include <asm/unaligned.h>
+#include <linux/unaligned.h>
#include <asm/hardirq.h>
#include <asm/traps.h>
+#include "unaligned.h"
/* #define DEBUG_UNALIGNED 1 */
@@ -104,6 +105,7 @@
#define ERR_NOTHANDLED -1
int unaligned_enabled __read_mostly = 1;
+int no_unaligned_warning __read_mostly;
static int emulate_ldh(struct pt_regs *regs, int toreg)
{
@@ -399,6 +401,7 @@ void handle_unaligned(struct pt_regs *regs)
} else {
static DEFINE_RATELIMIT_STATE(kernel_ratelimit, 5 * HZ, 5);
if (!(current->thread.flags & PARISC_UAC_NOPRINT) &&
+ !no_unaligned_warning &&
__ratelimit(&kernel_ratelimit))
pr_warn("Kernel: unaligned access to " RFMT " in %pS "
"(iir " RFMT ")\n",
diff --git a/arch/parisc/kernel/unaligned.h b/arch/parisc/kernel/unaligned.h
new file mode 100644
index 000000000000..c1aa4b12e284
--- /dev/null
+++ b/arch/parisc/kernel/unaligned.h
@@ -0,0 +1,3 @@
+struct pt_regs;
+void handle_unaligned(struct pt_regs *regs);
+int check_unaligned(struct pt_regs *regs);
diff --git a/arch/parisc/kernel/vdso32/Makefile b/arch/parisc/kernel/vdso32/Makefile
index 4459a48d2303..4ee8d17da229 100644
--- a/arch/parisc/kernel/vdso32/Makefile
+++ b/arch/parisc/kernel/vdso32/Makefile
@@ -1,11 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso32 = note.o sigtramp.o restart_syscall.o
+obj-cvdso32 = vdso32_generic.o
# Build rules
-targets := $(obj-vdso32) vdso32.so
+targets := $(obj-vdso32) $(obj-cvdso32) vdso32.so
obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+obj-cvdso32 := $(addprefix $(obj)/, $(obj-cvdso32))
+
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso32_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fbuiltin -mno-fast-indirect-calls -O2 -mno-long-calls
# -march=1.1 -mschedule=7100LC
@@ -19,20 +33,19 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CROSS32CC) -print-libgcc-file-name)
obj-y += vdso32_wrapper.o
-extra-y += vdso32.lds
+targets += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C # -U$(ARCH)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so: $(src)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso32.so: $(obj)/vdso32.lds $(obj-vdso32) $(obj-cvdso32) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso32ld)
# assembly rules for the .S files
$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
-
$(obj-cvdso32): %.o: %.c FORCE
$(call if_changed_dep,vdso32cc)
@@ -42,10 +55,10 @@ quiet_cmd_vdso32ld = VDSO32L $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CROSS32CC) $(a_flags) -c -o $@ $<
quiet_cmd_vdso32cc = VDSO32C $@
- cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -fPIC -mno-fast-indirect-calls -o $@ $<
+ cmd_vdso32cc = $(CROSS32CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/kernel/vdso32/vdso32.lds.S b/arch/parisc/kernel/vdso32/vdso32.lds.S
index d4aff3af5262..4273baa26b65 100644
--- a/arch/parisc/kernel/vdso32/vdso32.lds.S
+++ b/arch/parisc/kernel/vdso32/vdso32.lds.S
@@ -106,6 +106,9 @@ VERSION
global:
__kernel_sigtramp_rt32;
__kernel_restart_syscall32;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
+ __vdso_clock_gettime64;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso32/vdso32_generic.c b/arch/parisc/kernel/vdso32/vdso32_generic.c
new file mode 100644
index 000000000000..8d5bd59e8646
--- /dev/null
+++ b/arch/parisc/kernel/vdso32/vdso32_generic.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+#include <uapi/asm/unistd_32.h>
+
+struct timezone;
+struct old_timespec32;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts);
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct old_timespec32 *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}
+
+int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime64, (long)clock, (long)ts);
+}
diff --git a/arch/parisc/kernel/vdso64/Makefile b/arch/parisc/kernel/vdso64/Makefile
index f3d6045793f4..c63f4069170f 100644
--- a/arch/parisc/kernel/vdso64/Makefile
+++ b/arch/parisc/kernel/vdso64/Makefile
@@ -1,12 +1,25 @@
-# List of files in the vdso, has to be asm only for now
+# Include the generic Makefile to check the built vdso.
+include $(srctree)/lib/vdso/Makefile.include
+
+KCOV_INSTRUMENT := n
+
+# Disable gcov profiling, ubsan and kasan for VDSO code
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
obj-vdso64 = note.o sigtramp.o restart_syscall.o
+obj-cvdso64 = vdso64_generic.o
# Build rules
-targets := $(obj-vdso64) vdso64.so
-obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+targets := $(obj-vdso64) $(obj-cvdso64) vdso64.so
+obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+obj-cvdso64 := $(addprefix $(obj)/, $(obj-cvdso64))
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
@@ -19,28 +32,32 @@ KBUILD_CFLAGS += -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING
VDSO_LIBGCC := $(shell $(CC) -print-libgcc-file-name)
obj-y += vdso64_wrapper.o
-extra-y += vdso64.lds
+targets += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so FORCE
# Force dependency (incbin is bad)
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so: $(src)/vdso64.lds $(obj-vdso64) $(VDSO_LIBGCC) FORCE
+$(obj)/vdso64.so: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) $(VDSO_LIBGCC) FORCE
$(call if_changed,vdso64ld)
# assembly rules for the .S files
$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
+$(obj-cvdso64): %.o: %.c FORCE
+ $(call if_changed_dep,vdso64cc)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter-out FORCE, $^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdso64cc = VDSO64C $@
+ cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $<
# Generate VDSO offsets using helper script
-gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+gen-vdsosym := $(src)/gen_vdso_offsets.sh
quiet_cmd_vdsosym = VDSOSYM $@
cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
diff --git a/arch/parisc/kernel/vdso64/vdso64.lds.S b/arch/parisc/kernel/vdso64/vdso64.lds.S
index de1fb4b19286..10f25e4e1554 100644
--- a/arch/parisc/kernel/vdso64/vdso64.lds.S
+++ b/arch/parisc/kernel/vdso64/vdso64.lds.S
@@ -104,6 +104,8 @@ VERSION
global:
__kernel_sigtramp_rt64;
__kernel_restart_syscall64;
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
local: *;
};
}
diff --git a/arch/parisc/kernel/vdso64/vdso64_generic.c b/arch/parisc/kernel/vdso64/vdso64_generic.c
new file mode 100644
index 000000000000..fc6836a0075b
--- /dev/null
+++ b/arch/parisc/kernel/vdso64/vdso64_generic.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/unistd.h"
+#include <linux/types.h>
+
+struct timezone;
+struct __kernel_timespec;
+struct __kernel_old_timeval;
+
+/* forward declarations */
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+
+
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+{
+ return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
+}
+
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return syscall2(__NR_clock_gettime, (long)clock, (long)ts);
+}