diff options
Diffstat (limited to 'arch/arc/include/asm')
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 99 | ||||
-rw-r--r-- | arch/arc/include/asm/atomic-llsc.h | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/atomic64-arcv2.h | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/cacheflush.h | 14 | ||||
-rw-r--r-- | arch/arc/include/asm/current.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/dwarf.h | 32 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-arcv2.h | 66 | ||||
-rw-r--r-- | arch/arc/include/asm/entry-compact.h | 50 | ||||
-rw-r--r-- | arch/arc/include/asm/entry.h | 128 | ||||
-rw-r--r-- | arch/arc/include/asm/hugepage.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/io.h | 7 | ||||
-rw-r--r-- | arch/arc/include/asm/irq.h | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/mmu.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/page.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable-bits-arcv2.h | 14 | ||||
-rw-r--r-- | arch/arc/include/asm/pgtable-levels.h | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/processor.h | 7 | ||||
-rw-r--r-- | arch/arc/include/asm/ptrace.h | 65 | ||||
-rw-r--r-- | arch/arc/include/asm/setup.h | 8 | ||||
-rw-r--r-- | arch/arc/include/asm/smp.h | 2 | ||||
-rw-r--r-- | arch/arc/include/asm/thread_info.h | 10 | ||||
-rw-r--r-- | arch/arc/include/asm/uaccess.h | 21 |
22 files changed, 261 insertions, 284 deletions
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 2162023195c5..4b13f60fe7ca 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -23,7 +23,7 @@ #define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */ #define ARC_REG_XY_MEM_BCR 0x79 #define ARC_REG_MAC_BCR 0x7a -#define ARC_REG_MUL_BCR 0x7b +#define ARC_REG_MPY_BCR 0x7b #define ARC_REG_SWAP_BCR 0x7c #define ARC_REG_NORM_BCR 0x7d #define ARC_REG_MIXMAX_BCR 0x7e @@ -177,7 +177,7 @@ struct bcr_isa_arcv2 { #endif }; -struct bcr_uarch_build_arcv2 { +struct bcr_uarch_build { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, prod:8, maj:8, min:8; #else @@ -185,6 +185,59 @@ struct bcr_uarch_build_arcv2 { #endif }; +struct bcr_mmu_3 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4, + u_itlb:4, u_dtlb:4; +#else + unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4, + ways:4, ver:8; +#endif +}; + +struct bcr_mmu_4 { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, + n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; +#else + /* DTLB ITLB JES JE JA */ + unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, + pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; +#endif +}; + +struct bcr_cache { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; +#else + unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; +#endif +}; + +struct bcr_slc_cfg { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:24, way:2, lsz:2, sz:4; +#else + unsigned int sz:4, lsz:2, way:2, pad:24; +#endif +}; + +struct bcr_clust_cfg { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; +#else + unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; +#endif +}; + +struct bcr_volatile { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int start:4, limit:4, pad:22, order:1, disable:1; +#else + unsigned int disable:1, order:1, pad:22, limit:4, start:4; +#endif +}; + struct bcr_mpy { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; @@ -302,48 +355,6 @@ struct bcr_generic { #endif }; -/* - ******************************************************************* - * Generic structures to hold build configuration used at runtime - */ - -struct cpuinfo_arc_mmu { - unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1; - unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8; -}; - -struct cpuinfo_arc_cache { - unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4; -}; - -struct cpuinfo_arc_bpu { - unsigned int ver, full, num_cache, num_pred, ret_stk; -}; - -struct cpuinfo_arc_ccm { - unsigned int base_addr, sz; -}; - -struct cpuinfo_arc { - struct cpuinfo_arc_cache icache, dcache, slc; - struct cpuinfo_arc_mmu mmu; - struct cpuinfo_arc_bpu bpu; - struct bcr_identity core; - struct bcr_isa_arcv2 isa; - const char *release, *name; - unsigned int vec_base; - struct cpuinfo_arc_ccm iccm, dccm; - struct { - unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, - fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, - ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1, - timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; - } extn; - struct bcr_mpy extn_mpy; -}; - -extern struct cpuinfo_arc cpuinfo_arc700[]; - static inline int is_isa_arcv2(void) { return IS_ENABLED(CONFIG_ISA_ARCV2); diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h index 1b0ffaeee16d..5258cb81a16b 100644 --- a/arch/arc/include/asm/atomic-llsc.h +++ b/arch/arc/include/asm/atomic-llsc.h @@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \ : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ [i] "ir" (i) \ - : "cc"); \ + : "cc", "memory"); \ } \ #define ATOMIC_OP_RETURN(op, asm_op) \ @@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ : [val] "=&r" (val) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ - : "cc"); \ + : "cc", "memory"); \ \ return val; \ } @@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ [orig] "=&r" (orig) \ : [ctr] "r" (&v->counter), \ [i] "ir" (i) \ - : "cc"); \ + : "cc", "memory"); \ \ return orig; \ } diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h index 6b6db981967a..9b5791b85471 100644 --- a/arch/arc/include/asm/atomic64-arcv2.h +++ b/arch/arc/include/asm/atomic64-arcv2.h @@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \ " bnz 1b \n" \ : "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ - : "cc"); \ + : "cc", "memory"); \ } \ #define ATOMIC64_OP_RETURN(op, op1, op2) \ @@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \ " bnz 1b \n" \ : [val] "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ - : "cc"); /* memory clobber comes from smp_mb() */ \ + : "cc", "memory"); \ \ return val; \ } @@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \ " bnz 1b \n" \ : "=&r"(orig), "=&r"(val) \ : "r"(&v->counter), "ir"(a) \ - : "cc"); /* memory clobber comes from smp_mb() */ \ + : "cc", "memory"); \ \ return orig; \ } diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h index e201b4b1655a..bd5b1a9a0544 100644 --- a/arch/arc/include/asm/cacheflush.h +++ b/arch/arc/include/asm/cacheflush.h @@ -18,24 +18,18 @@ #include <linux/mm.h> #include <asm/shmparam.h> -/* - * Semantically we need this because icache doesn't snoop dcache/dma. - * However ARC Cache flush requires paddr as well as vaddr, latter not available - * in the flush_icache_page() API. So we no-op it but do the equivalent work - * in update_mmu_cache() - */ -#define flush_icache_page(vma, page) - void flush_cache_all(void); void flush_icache_range(unsigned long kstart, unsigned long kend); void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len); -void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr); -void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); +void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr); +void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 void flush_dcache_page(struct page *page); +void flush_dcache_folio(struct folio *folio); +#define flush_dcache_folio flush_dcache_folio void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); void dma_cache_inv(phys_addr_t start, unsigned long sz); diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h index 9b9bdd3e6538..06be89f6f2f0 100644 --- a/arch/arc/include/asm/current.h +++ b/arch/arc/include/asm/current.h @@ -13,7 +13,7 @@ #ifdef CONFIG_ARC_CURR_IN_REG -register struct task_struct *curr_arc asm("r25"); +register struct task_struct *curr_arc asm("gp"); #define current (curr_arc) #else diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h index 5f4de05bd4ee..a0d5ebe1bc3f 100644 --- a/arch/arc/include/asm/dwarf.h +++ b/arch/arc/include/asm/dwarf.h @@ -10,23 +10,31 @@ #ifdef ARC_DW2_UNWIND_AS_CFI -#define CFI_STARTPROC .cfi_startproc -#define CFI_ENDPROC .cfi_endproc -#define CFI_DEF_CFA .cfi_def_cfa -#define CFI_REGISTER .cfi_register -#define CFI_REL_OFFSET .cfi_rel_offset -#define CFI_UNDEFINED .cfi_undefined +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset +#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register +#define CFI_OFFSET .cfi_offset +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_REGISTER .cfi_register +#define CFI_RESTORE .cfi_restore +#define CFI_UNDEFINED .cfi_undefined #else #define CFI_IGNORE # -#define CFI_STARTPROC CFI_IGNORE -#define CFI_ENDPROC CFI_IGNORE -#define CFI_DEF_CFA CFI_IGNORE -#define CFI_REGISTER CFI_IGNORE -#define CFI_REL_OFFSET CFI_IGNORE -#define CFI_UNDEFINED CFI_IGNORE +#define CFI_STARTPROC CFI_IGNORE +#define CFI_ENDPROC CFI_IGNORE +#define CFI_DEF_CFA CFI_IGNORE +#define CFI_DEF_CFA_OFFSET CFI_IGNORE +#define CFI_DEF_CFA_REGISTER CFI_IGNORE +#define CFI_OFFSET CFI_IGNORE +#define CFI_REL_OFFSET CFI_IGNORE +#define CFI_REGISTER CFI_IGNORE +#define CFI_RESTORE CFI_IGNORE +#define CFI_UNDEFINED CFI_IGNORE #endif /* !ARC_DW2_UNWIND_AS_CFI */ diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 0ff4c0610561..4d13320e0c1b 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h @@ -18,7 +18,6 @@ * | orig_r0 | * | event/ECR | * | bta | - * | user_r25 | * | gp | * | fp | * | sp | @@ -49,14 +48,18 @@ /*------------------------------------------------------------------------*/ .macro INTERRUPT_PROLOGUE - ; (A) Before jumping to Interrupt Vector, hardware micro-ops did following: + ; Before jumping to Interrupt Vector, hardware micro-ops did following: ; 1. SP auto-switched to kernel mode stack ; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0) ; 3. Auto save: (mandatory) Push PC and STAT32 on stack ; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE - ; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI + ; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI ; - ; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair + ; Now + ; 4b. If Auto-save (optional) not enabled in hw, manually save them + ; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair + ; + ; At the end, SP points to pt_regs #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE ; carve pt_regs on stack (case #3), PC/STAT32 already on stack @@ -72,15 +75,16 @@ .endm /*------------------------------------------------------------------------*/ -.macro EXCEPTION_PROLOGUE +.macro EXCEPTION_PROLOGUE_KEEP_AE - ; (A) Before jumping to Exception Vector, hardware micro-ops did following: + ; Before jumping to Exception Vector, hardware micro-ops did following: ; 1. SP auto-switched to kernel mode stack ; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0) ; - ; (B) Manually save the complete reg file below + ; Now manually save rest of reg file + ; At the end, SP points to pt_regs - sub sp, sp, SZ_PT_REGS ; carve pt_regs + sub sp, sp, SZ_PT_REGS ; carve space for pt_regs ; _HARD saves r10 clobbered by _SOFT as scratch hence comes first @@ -100,6 +104,16 @@ ; OUTPUT: r10 has ECR expected by EV_Trap .endm +.macro EXCEPTION_PROLOGUE + + EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10 + + lr r0, [efa] + mov r1, sp + + FAKE_RET_FROM_EXCPN ; clobbers r9 +.endm + /*------------------------------------------------------------------------ * This macro saves the registers manually which would normally be autosaved * by hardware on taken interrupts. It is used by @@ -135,10 +149,10 @@ */ .macro __SAVE_REGFILE_SOFT - ST2 gp, fp, PT_r26 ; gp (r26), fp (r27) - - st r12, [sp, PT_sp + 4] - st r30, [sp, PT_sp + 8] + st fp, [sp, PT_fp] ; r27 + st r30, [sp, PT_r30] + st r12, [sp, PT_r12] + st r26, [sp, PT_r26] ; gp ; Saving pt_regs->sp correctly requires some extra work due to the way ; Auto stack switch works @@ -153,30 +167,30 @@ ; ISA requires ADD.nz to have same dest and src reg operands mov.nz r10, sp - add.nz r10, r10, SZ_PT_REGS ; K mode SP + add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP st r10, [sp, PT_sp] ; SP (pt_regs->sp) -#ifdef CONFIG_ARC_CURR_IN_REG - st r25, [sp, PT_user_r25] - GET_CURR_TASK_ON_CPU r25 -#endif - #ifdef CONFIG_ARC_HAS_ACCL_REGS ST2 r58, r59, PT_r58 #endif /* clobbers r10, r11 registers pair */ DSP_SAVE_REGFILE_IRQ + +#ifdef CONFIG_ARC_CURR_IN_REG + GET_CURR_TASK_ON_CPU gp +#endif + .endm /*------------------------------------------------------------------------*/ .macro __RESTORE_REGFILE_SOFT - LD2 gp, fp, PT_r26 ; gp (r26), fp (r27) - - ld r12, [sp, PT_r12] + ld fp, [sp, PT_fp] ld r30, [sp, PT_r30] + ld r12, [sp, PT_r12] + ld r26, [sp, PT_r26] ; Restore SP (into AUX_USER_SP) only if returning to U mode ; - for K mode, it will be implicitly restored as stack is unwound @@ -188,10 +202,6 @@ sr r10, [AUX_USER_SP] 1: -#ifdef CONFIG_ARC_CURR_IN_REG - ld r25, [sp, PT_user_r25] -#endif - /* clobbers r10, r11 registers pair */ DSP_RESTORE_REGFILE_IRQ @@ -249,7 +259,7 @@ btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP - ld r10, [sp, PT_event + 4] + ld r10, [sp, PT_bta] sr r10, [erbta] LD2 r10, r11, PT_ret @@ -264,8 +274,8 @@ .macro FAKE_RET_FROM_EXCPN lr r9, [status32] - bic r9, r9, STATUS_AE_MASK - or r9, r9, STATUS_IE_MASK + bclr r9, r9, STATUS_AE_BIT + bset r9, r9, STATUS_IE_BIT kflag r9 .endm diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index 67ff06e15cea..a0e760eb35a8 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h @@ -140,7 +140,7 @@ * * After this it is safe to call the "C" handlers *-------------------------------------------------------------*/ -.macro EXCEPTION_PROLOGUE +.macro EXCEPTION_PROLOGUE_KEEP_AE /* Need at least 1 reg to code the early exception prologue */ PROLOG_FREEUP_REG r9, @ex_saved_reg1 @@ -151,14 +151,6 @@ /* ARC700 doesn't provide auto-stack switching */ SWITCH_TO_KERNEL_STK -#ifdef CONFIG_ARC_CURR_IN_REG - /* Treat r25 as scratch reg (save on stack) and load with "current" */ - PUSH r25 - GET_CURR_TASK_ON_CPU r25 -#else - sub sp, sp, 4 -#endif - st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */ sub sp, sp, 4 /* skip pt_regs->sp, already saved above */ @@ -178,7 +170,23 @@ PUSHAX erbta lr r10, [ecr] - st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */ + st r10, [sp, PT_event] + +#ifdef CONFIG_ARC_CURR_IN_REG + /* gp already saved on stack: now load with "current" */ + GET_CURR_TASK_ON_CPU gp +#endif + ; OUTPUT: r10 has ECR expected by EV_Trap +.endm + +.macro EXCEPTION_PROLOGUE + + EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10 + + lr r0, [efa] + mov r1, sp + + FAKE_RET_FROM_EXCPN ; clobbers r9 .endm /*-------------------------------------------------------------- @@ -208,11 +216,8 @@ POP gp RESTORE_R12_TO_R0 -#ifdef CONFIG_ARC_CURR_IN_REG - ld r25, [sp, 12] -#endif ld sp, [sp] /* restore original sp */ - /* orig_r0, ECR, user_r25 skipped automatically */ + /* orig_r0, ECR skipped automatically */ .endm /* Dummy ECR values for Interrupts */ @@ -229,13 +234,6 @@ SWITCH_TO_KERNEL_STK -#ifdef CONFIG_ARC_CURR_IN_REG - /* Treat r25 as scratch reg (save on stack) and load with "current" */ - PUSH r25 - GET_CURR_TASK_ON_CPU r25 -#else - sub sp, sp, 4 -#endif PUSH 0x003\LVL\()abcd /* Dummy ECR */ sub sp, sp, 8 /* skip orig_r0 (not needed) @@ -255,6 +253,10 @@ PUSHAX lp_start PUSHAX bta_l\LVL\() +#ifdef CONFIG_ARC_CURR_IN_REG + /* gp already saved on stack: now load with "current" */ + GET_CURR_TASK_ON_CPU gp +#endif .endm /*-------------------------------------------------------------- @@ -282,11 +284,7 @@ POP gp RESTORE_R12_TO_R0 -#ifdef CONFIG_ARC_CURR_IN_REG - ld r25, [sp, 12] -#endif - ld sp, [sp] /* restore original sp */ - /* orig_r0, ECR, user_r25 skipped automatically */ + ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */ .endm /* Get thread_info of "current" tsk */ diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index fcdd59d77f42..49c2e090cb5c 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h @@ -13,6 +13,8 @@ #include <asm/processor.h> /* For VMALLOC_START */ #include <asm/mmu.h> +#ifdef __ASSEMBLY__ + #ifdef CONFIG_ISA_ARCOMPACT #include <asm/entry-compact.h> /* ISA specific bits */ #else @@ -89,7 +91,7 @@ * Helpers to save/restore callee-saved regs: * used by several macros below *-------------------------------------------------------------*/ -.macro SAVE_R13_TO_R24 +.macro SAVE_R13_TO_R25 PUSH r13 PUSH r14 PUSH r15 @@ -102,9 +104,11 @@ PUSH r22 PUSH r23 PUSH r24 + PUSH r25 .endm -.macro RESTORE_R24_TO_R13 +.macro RESTORE_R25_TO_R13 + POP r25 POP r24 POP r23 POP r22 @@ -119,81 +123,31 @@ POP r13 .endm -/*-------------------------------------------------------------- - * Collect User Mode callee regs as struct callee_regs - needed by - * fork/do_signal/unaligned-access-emulation. - * (By default only scratch regs are saved on entry to kernel) - * - * Special handling for r25 if used for caching Task Pointer. - * It would have been saved in task->thread.user_r25 already, but to keep - * the interface same it is copied into regular r25 placeholder in - * struct callee_regs. - *-------------------------------------------------------------*/ +/* + * save user mode callee regs as struct callee_regs + * - needed by fork/do_signal/unaligned-access-emulation. + */ .macro SAVE_CALLEE_SAVED_USER + SAVE_R13_TO_R25 +.endm - mov r12, sp ; save SP as ref to pt_regs - SAVE_R13_TO_R24 - -#ifdef CONFIG_ARC_CURR_IN_REG - ; Retrieve orig r25 and save it with rest of callee_regs - ld r12, [r12, PT_user_r25] - PUSH r12 -#else - PUSH r25 -#endif - +/* + * restore user mode callee regs as struct callee_regs + * - could have been changed by ptrace tracer or unaligned-access fixup + */ +.macro RESTORE_CALLEE_SAVED_USER + RESTORE_R25_TO_R13 .endm -/*-------------------------------------------------------------- - * Save kernel Mode callee regs at the time of Contect Switch. - * - * Special handling for r25 if used for caching Task Pointer. - * Kernel simply skips saving it since it will be loaded with - * incoming task pointer anyways - *-------------------------------------------------------------*/ +/* + * save/restore kernel mode callee regs at the time of context switch + */ .macro SAVE_CALLEE_SAVED_KERNEL - - SAVE_R13_TO_R24 - -#ifdef CONFIG_ARC_CURR_IN_REG - sub sp, sp, 4 -#else - PUSH r25 -#endif + SAVE_R13_TO_R25 .endm -/*-------------------------------------------------------------- - * Opposite of SAVE_CALLEE_SAVED_KERNEL - *-------------------------------------------------------------*/ .macro RESTORE_CALLEE_SAVED_KERNEL - -#ifdef CONFIG_ARC_CURR_IN_REG - add sp, sp, 4 /* skip usual r25 placeholder */ -#else - POP r25 -#endif - RESTORE_R24_TO_R13 -.endm - -/*-------------------------------------------------------------- - * Opposite of SAVE_CALLEE_SAVED_USER - * - * ptrace tracer or unaligned-access fixup might have changed a user mode - * callee reg which is saved back to usual r25 storage location - *-------------------------------------------------------------*/ -.macro RESTORE_CALLEE_SAVED_USER - -#ifdef CONFIG_ARC_CURR_IN_REG - POP r12 -#else - POP r25 -#endif - RESTORE_R24_TO_R13 - - ; SP is back to start of pt_regs -#ifdef CONFIG_ARC_CURR_IN_REG - st r12, [sp, PT_user_r25] -#endif + RESTORE_R25_TO_R13 .endm /*-------------------------------------------------------------- @@ -229,10 +183,10 @@ #ifdef CONFIG_SMP -/*------------------------------------------------- +/* * Retrieve the current running task on this CPU - * 1. Determine curr CPU id. - * 2. Use it to index into _current_task[ ] + * - loads it from backing _current_task[] (and can't use the + * caching reg for current task */ .macro GET_CURR_TASK_ON_CPU reg GET_CPU_ID \reg @@ -254,7 +208,7 @@ add2 \tmp, @_current_task, \tmp st \tsk, [\tmp] #ifdef CONFIG_ARC_CURR_IN_REG - mov r25, \tsk + mov gp, \tsk #endif .endm @@ -269,21 +223,20 @@ .macro SET_CURR_TASK_ON_CPU tsk, tmp st \tsk, [@_current_task] #ifdef CONFIG_ARC_CURR_IN_REG - mov r25, \tsk + mov gp, \tsk #endif .endm #endif /* SMP / UNI */ -/* ------------------------------------------------------------------ +/* * Get the ptr to some field of Current Task at @off in task struct - * -Uses r25 for Current task ptr if that is enabled + * - Uses current task cached in reg if enabled */ - #ifdef CONFIG_ARC_CURR_IN_REG .macro GET_CURR_TASK_FIELD_PTR off, reg - add \reg, r25, \off + add \reg, gp, \off .endm #else @@ -295,4 +248,23 @@ #endif /* CONFIG_ARC_CURR_IN_REG */ +#else /* !__ASSEMBLY__ */ + +extern void do_signal(struct pt_regs *); +extern void do_notify_resume(struct pt_regs *); +extern int do_privilege_fault(unsigned long, struct pt_regs *); +extern int do_extension_fault(unsigned long, struct pt_regs *); +extern int insterror_is_error(unsigned long, struct pt_regs *); +extern int do_memory_error(unsigned long, struct pt_regs *); +extern int trap_is_brkpt(unsigned long, struct pt_regs *); +extern int do_misaligned_error(unsigned long, struct pt_regs *); +extern int do_trap5_error(unsigned long, struct pt_regs *); +extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *); +extern void do_machine_check_fault(unsigned long, struct pt_regs *); +extern void do_non_swi_trap(unsigned long, struct pt_regs *); +extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *); +extern void do_page_fault(unsigned long, struct pt_regs *); + +#endif + #endif /* __ASM_ARC_ENTRY_H */ diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 5001b796fb8d..ef8d4166370c 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -21,7 +21,7 @@ static inline pmd_t pte_pmd(pte_t pte) } #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) -#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) +#define pmd_mkwrite_novma(pmd) pte_pmd(pte_mkwrite_novma(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h index 80347382a380..4fdb7350636c 100644 --- a/arch/arc/include/asm/io.h +++ b/arch/arc/include/asm/io.h @@ -21,8 +21,9 @@ #endif extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size); -extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, - unsigned long flags); +#define ioremap ioremap +#define ioremap_prot ioremap_prot +#define iounmap iounmap static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { return (void __iomem *)port; @@ -32,8 +33,6 @@ static inline void ioport_unmap(void __iomem *addr) { } -extern void iounmap(const volatile void __iomem *addr); - /* * io{read,write}{16,32}be() macros */ diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h index 0309cb405cfb..c574712ad865 100644 --- a/arch/arc/include/asm/irq.h +++ b/arch/arc/include/asm/irq.h @@ -25,5 +25,6 @@ #include <asm-generic/irq.h> extern void arc_init_IRQ(void); +extern void arch_do_IRQ(unsigned int, struct pt_regs *); #endif diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h index ca427c30f70e..9febf5bc3de6 100644 --- a/arch/arc/include/asm/mmu.h +++ b/arch/arc/include/asm/mmu.h @@ -14,6 +14,8 @@ typedef struct { unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */ } mm_context_t; +extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *); + #endif #include <asm/mmu-arcv2.h> diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h index e43fe27ec54d..02b53ad811fb 100644 --- a/arch/arc/include/asm/page.h +++ b/arch/arc/include/asm/page.h @@ -108,7 +108,7 @@ extern int pfn_valid(unsigned long pfn); #else /* CONFIG_HIGHMEM */ -#define ARCH_PFN_OFFSET virt_to_pfn(CONFIG_LINUX_RAM_BASE) +#define ARCH_PFN_OFFSET virt_to_pfn((void *)CONFIG_LINUX_RAM_BASE) #endif /* CONFIG_HIGHMEM */ diff --git a/arch/arc/include/asm/pgtable-bits-arcv2.h b/arch/arc/include/asm/pgtable-bits-arcv2.h index 6e9f8ca6d6a1..f3eea3f30b2e 100644 --- a/arch/arc/include/asm/pgtable-bits-arcv2.h +++ b/arch/arc/include/asm/pgtable-bits-arcv2.h @@ -87,7 +87,7 @@ PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); -PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); +PTE_BIT_FUNC(mkwrite_novma, |= (_PAGE_WRITE)); PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); @@ -100,14 +100,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); } -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pteval) -{ - set_pte(ptep, pteval); -} +struct vm_fault; +void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, unsigned int nr); -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t *ptep); +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) /* * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that diff --git a/arch/arc/include/asm/pgtable-levels.h b/arch/arc/include/asm/pgtable-levels.h index ef68758b69f7..fc417c75c24d 100644 --- a/arch/arc/include/asm/pgtable-levels.h +++ b/arch/arc/include/asm/pgtable-levels.h @@ -169,6 +169,7 @@ #define pte_ERROR(e) \ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define PFN_PTE_SHIFT PAGE_SHIFT #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0)) diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index fb844fce1ab6..d606658e2fe7 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h @@ -22,7 +22,6 @@ * struct thread_info */ struct thread_struct { - unsigned long ksp; /* kernel mode stack pointer */ unsigned long callee_reg; /* pointer to callee regs */ unsigned long fault_address; /* dbls as brkpt holder as well */ #ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS @@ -33,9 +32,7 @@ struct thread_struct { #endif }; -#define INIT_THREAD { \ - .ksp = sizeof(init_stack) + (unsigned long) init_stack, \ -} +#define INIT_THREAD { } /* Forward declaration, a strange C thing */ struct task_struct; @@ -56,7 +53,7 @@ struct task_struct; * Where about of Task's sp, fp, blink when it was last seen in kernel mode. * Look in process.c for details of kernel stack layout */ -#define TSK_K_ESP(tsk) (tsk->thread.ksp) +#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp) #define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \ sizeof(struct callee_regs) + off))) diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 5869a74c0db2..4a2b30fb5a98 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h @@ -12,6 +12,17 @@ #ifndef __ASSEMBLY__ +typedef union { + struct { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned long state:8, vec:8, cause:8, param:8; +#else + unsigned long param:8, cause:8, vec:8, state:8; +#endif + }; + unsigned long full; +} ecr_reg; + /* THE pt_regs: Defines how regs are saved during entry into kernel */ #ifdef CONFIG_ISA_ARCOMPACT @@ -40,23 +51,10 @@ struct pt_regs { * Last word used by Linux for extra state mgmt (syscall-restart) * For interrupts, use artificial ECR values to note current prio-level */ - union { - struct { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned long state:8, ecr_vec:8, - ecr_cause:8, ecr_param:8; -#else - unsigned long ecr_param:8, ecr_cause:8, - ecr_vec:8, state:8; -#endif - }; - unsigned long event; - }; - - unsigned long user_r25; + ecr_reg ecr; }; -#define MAX_REG_OFFSET offsetof(struct pt_regs, user_r25) +#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr) #else @@ -64,28 +62,14 @@ struct pt_regs { unsigned long orig_r0; - union { - struct { -#ifdef CONFIG_CPU_BIG_ENDIAN - unsigned long state:8, ecr_vec:8, - ecr_cause:8, ecr_param:8; -#else - unsigned long ecr_param:8, ecr_cause:8, - ecr_vec:8, state:8; -#endif - }; - unsigned long event; - }; - - unsigned long bta; /* bta_l1, bta_l2, erbta */ + ecr_reg ecr; /* Exception Cause Reg */ - unsigned long user_r25; + unsigned long bta; /* erbta */ - unsigned long r26; /* gp */ unsigned long fp; - unsigned long sp; /* user/kernel sp depending on where we came from */ - - unsigned long r12, r30; + unsigned long r30; + unsigned long r12; + unsigned long r26; /* gp */ #ifdef CONFIG_ARC_HAS_ACCL_REGS unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */ @@ -94,6 +78,8 @@ struct pt_regs { unsigned long DSP_CTRL; #endif + unsigned long sp; /* user/kernel sp depending on entry */ + /*------- Below list auto saved by h/w -----------*/ unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; @@ -134,13 +120,13 @@ struct callee_regs { /* return 1 if PC in delay slot */ #define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK) -#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param) -#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param) +#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param) +#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param) #define STATE_SCALL_RESTARTED 0x01 -#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED) -#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED) +#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED) +#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED) #define current_pt_regs() \ ({ \ @@ -181,6 +167,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, return *(unsigned long *)((unsigned long)regs + offset); } +extern int syscall_trace_entry(struct pt_regs *); +extern void syscall_trace_exit(struct pt_regs *); + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PTRACE_H */ diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h index 028a8cf76206..1c6db599e1fc 100644 --- a/arch/arc/include/asm/setup.h +++ b/arch/arc/include/asm/setup.h @@ -35,11 +35,11 @@ long __init arc_get_mem_sz(void); #define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2)) extern void arc_mmu_init(void); -extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); -extern void read_decode_mmu_bcr(void); +extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len); extern void arc_cache_init(void); -extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); -extern void read_decode_cache_bcr(void); +extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len); + +extern void __init handle_uboot_args(void); #endif /* __ASMARC_SETUP_H */ diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index d856491606ac..e0913f52c2cd 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h @@ -29,6 +29,8 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void __init smp_init_cpus(void); extern void first_lines_of_secondary(void); extern const char *arc_platform_smp_cpuinfo(void); +extern void arc_platform_smp_wait_to_boot(int); +extern void start_kernel_secondary(void); /* * API expected BY platform smp code (FROM arch smp code) diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 6ba7fe417095..4c530cf131f3 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -37,16 +37,16 @@ */ struct thread_info { unsigned long flags; /* low level flags */ + unsigned long ksp; /* kernel mode stack top in __switch_to */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct task_struct *task; /* main task structure */ - __u32 cpu; /* current CPU */ + int cpu; /* current CPU */ unsigned long thr_ptr; /* TLS ptr */ + struct task_struct *task; /* main task structure */ }; /* - * macros/functions for gaining access to the thread information structure - * - * preempt_count needs to be 1 initially, until the scheduler is functional. + * initilaize thread_info for any @tsk + * - this is not related to init_task per se */ #define INIT_THREAD_INFO(tsk) \ { \ diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 99712471c96a..1e8809ea000a 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -146,8 +146,9 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) if (n == 0) return 0; - /* unaligned */ - if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { + /* fallback for unaligned access when hardware doesn't support */ + if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) && + (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) { unsigned char tmp; @@ -373,8 +374,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) if (n == 0) return 0; - /* unaligned */ - if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) { + /* fallback for unaligned access when hardware doesn't support */ + if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) && + (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) { unsigned char tmp; @@ -584,7 +586,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) return res; } -static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) +static inline unsigned long __clear_user(void __user *to, unsigned long n) { long res = n; unsigned char *d_char = to; @@ -626,17 +628,10 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) return res; } -#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE - #define INLINE_COPY_TO_USER #define INLINE_COPY_FROM_USER -#define __clear_user(d, n) __arc_clear_user(d, n) -#else -extern unsigned long arc_clear_user_noinline(void __user *to, - unsigned long n); -#define __clear_user(d, n) arc_clear_user_noinline(d, n) -#endif +#define __clear_user __clear_user #include <asm-generic/uaccess.h> |