From db6f41063cbdb58b14846e600e6bc3f4e4c2e888 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 19 Jul 2013 15:37:12 +0100 Subject: arm64: mm: don't treat user cache maintenance faults as writes On arm64, cache maintenance faults appear as data aborts with the CM bit set in the ESR. The WnR bit, usually used to distinguish between faulting loads and stores, always reads as 1 and (slightly confusingly) the instructions are treated as reads by the architecture. This patch fixes our fault handling code to treat cache maintenance faults in the same way as loads. Signed-off-by: Will Deacon Cc: Signed-off-by: Catalin Marinas --- arch/arm64/mm/fault.c | 46 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) (limited to 'arch/arm64') diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 0ecac8980aae..6c8ba25bf6bb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) #define ESR_CM (1 << 8) #define ESR_LNX_EXEC (1 << 24) -/* - * Check that the permissions on the VMA allow for the fault which occurred. - * If we encountered a write fault, we must have write permission, otherwise - * we allow any permission. - */ -static inline bool access_error(unsigned int esr, struct vm_area_struct *vma) -{ - unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; - - if (esr & ESR_WRITE) - mask = VM_WRITE; - if (esr & ESR_LNX_EXEC) - mask = VM_EXEC; - - return vma->vm_flags & mask ? false : true; -} - static int __do_page_fault(struct mm_struct *mm, unsigned long addr, - unsigned int esr, unsigned int flags, + unsigned int mm_flags, unsigned long vm_flags, struct task_struct *tsk) { struct vm_area_struct *vma; @@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr, * it. */ good_area: - if (access_error(esr, vma)) { + /* + * Check that the permissions on the VMA allow for the fault which + * occurred. If we encountered a write or exec fault, we must have + * appropriate permissions, otherwise we allow any permission. + */ + if (!(vma->vm_flags & vm_flags)) { fault = VM_FAULT_BADACCESS; goto out; } - return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); + return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags); check_stack: if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) @@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | - (write ? FAULT_FLAG_WRITE : 0); + unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; + unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + + if (esr & ESR_LNX_EXEC) { + vm_flags = VM_EXEC; + } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } tsk = current; mm = tsk->mm; @@ -248,7 +242,7 @@ retry: #endif } - fault = __do_page_fault(mm, addr, esr, flags, tsk); + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk); /* * If we need to retry but a fatal signal is pending, handle the @@ -265,7 +259,7 @@ retry: */ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, @@ -280,7 +274,7 @@ retry: * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of * starvation. */ - flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; goto retry; } } -- cgit v1.2.3