From b332828c39326b1dca617f387dd15d12e81cd5f0 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:10 +0530 Subject: hw-breakpoints: prepare the code for Hardware Breakpoint interfaces The generic hardware breakpoint interface provides an abstraction of hardware breakpoints in front of specific arch implementations for both kernel and user side breakpoints. This includes execution breakpoints and read/write breakpoints, also known as "watchpoints". This patch introduces header files containing constants, structure definitions and declaration of functions used by the hardware breakpoint core and x86 specific code. It also introduces an array based storage for the debug-register values in 'struct thread_struct', while modifying all users of debugreg member in the structure. [ Impact: add headers for new hardware breakpoint interface ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/a.out-core.h | 8 +++--- arch/x86/include/asm/debugreg.h | 29 +++++++++++++++++++ arch/x86/include/asm/hw_breakpoint.h | 55 ++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/processor.h | 8 +++--- arch/x86/kernel/process.c | 16 +++++------ arch/x86/kernel/ptrace.c | 16 +++++------ arch/x86/power/cpu_32.c | 8 +++--- arch/x86/power/cpu_64.c | 8 +++--- 8 files changed, 116 insertions(+), 32 deletions(-) create mode 100644 arch/x86/include/asm/hw_breakpoint.h (limited to 'arch') diff --git a/arch/x86/include/asm/a.out-core.h b/arch/x86/include/asm/a.out-core.h index bb70e397aa84..fc4685dd6e4d 100644 --- a/arch/x86/include/asm/a.out-core.h +++ b/arch/x86/include/asm/a.out-core.h @@ -32,10 +32,10 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) >> PAGE_SHIFT; dump->u_dsize -= dump->u_tsize; dump->u_ssize = 0; - dump->u_debugreg[0] = current->thread.debugreg0; - dump->u_debugreg[1] = current->thread.debugreg1; - dump->u_debugreg[2] = current->thread.debugreg2; - dump->u_debugreg[3] = current->thread.debugreg3; + dump->u_debugreg[0] = current->thread.debugreg[0]; + dump->u_debugreg[1] = current->thread.debugreg[1]; + dump->u_debugreg[2] = current->thread.debugreg[2]; + dump->u_debugreg[3] = current->thread.debugreg[3]; dump->u_debugreg[4] = 0; dump->u_debugreg[5] = 0; dump->u_debugreg[6] = current->thread.debugreg6; diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h index 3ea6f37be9e2..23439fbb1d0e 100644 --- a/arch/x86/include/asm/debugreg.h +++ b/arch/x86/include/asm/debugreg.h @@ -18,6 +18,7 @@ #define DR_TRAP1 (0x2) /* db1 */ #define DR_TRAP2 (0x4) /* db2 */ #define DR_TRAP3 (0x8) /* db3 */ +#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3) #define DR_STEP (0x4000) /* single-step */ #define DR_SWITCH (0x8000) /* task switch */ @@ -49,6 +50,8 @@ #define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */ #define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */ +#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */ +#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */ #define DR_ENABLE_SIZE 2 /* 2 enable bits per register */ #define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */ @@ -67,4 +70,30 @@ #define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */ #define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */ +/* + * HW breakpoint additions + */ +#ifdef __KERNEL__ + +/* For process management */ +extern void flush_thread_hw_breakpoint(struct task_struct *tsk); +extern int copy_thread_hw_breakpoint(struct task_struct *tsk, + struct task_struct *child, unsigned long clone_flags); + +/* For CPU management */ +extern void load_debug_registers(void); +static inline void hw_breakpoint_disable(void) +{ + /* Zero the control register for HW Breakpoint */ + set_debugreg(0UL, 7); + + /* Zero-out the individual HW breakpoint address registers */ + set_debugreg(0UL, 0); + set_debugreg(0UL, 1); + set_debugreg(0UL, 2); + set_debugreg(0UL, 3); +} + +#endif /* __KERNEL__ */ + #endif /* _ASM_X86_DEBUGREG_H */ diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..1acb4d45de70 --- /dev/null +++ b/arch/x86/include/asm/hw_breakpoint.h @@ -0,0 +1,55 @@ +#ifndef _I386_HW_BREAKPOINT_H +#define _I386_HW_BREAKPOINT_H + +#ifdef __KERNEL__ +#define __ARCH_HW_BREAKPOINT_H + +struct arch_hw_breakpoint { + char *name; /* Contains name of the symbol to set bkpt */ + unsigned long address; + u8 len; + u8 type; +}; + +#include +#include + +/* Available HW breakpoint length encodings */ +#define HW_BREAKPOINT_LEN_1 0x40 +#define HW_BREAKPOINT_LEN_2 0x44 +#define HW_BREAKPOINT_LEN_4 0x4c +#define HW_BREAKPOINT_LEN_EXECUTE 0x40 + +#ifdef CONFIG_X86_64 +#define HW_BREAKPOINT_LEN_8 0x48 +#endif + +/* Available HW breakpoint type encodings */ + +/* trigger on instruction execute */ +#define HW_BREAKPOINT_EXECUTE 0x80 +/* trigger on memory write */ +#define HW_BREAKPOINT_WRITE 0x81 +/* trigger on memory read or write */ +#define HW_BREAKPOINT_RW 0x83 + +/* Total number of available HW breakpoint registers */ +#define HBP_NUM 4 + +extern struct hw_breakpoint *hbp_kernel[HBP_NUM]; +DECLARE_PER_CPU(struct hw_breakpoint*, this_hbp_kernel[HBP_NUM]); +extern unsigned int hbp_user_refcount[HBP_NUM]; + +extern void arch_install_thread_hw_breakpoint(struct task_struct *tsk); +extern void arch_uninstall_thread_hw_breakpoint(void); +extern int arch_check_va_in_userspace(unsigned long va, u8 hbp_len); +extern int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, + struct task_struct *tsk); +extern void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk); +extern void arch_flush_thread_hw_breakpoint(struct task_struct *tsk); +extern void arch_update_kernel_hw_breakpoint(void *); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); +#endif /* __KERNEL__ */ +#endif /* _I386_HW_BREAKPOINT_H */ + diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 0b2fab0051e0..448b34a8e393 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -29,6 +29,7 @@ struct mm_struct; #include #include +#define HBP_NUM 4 /* * Default implementation of macro that returns current * instruction pointer ("program counter"). @@ -431,12 +432,11 @@ struct thread_struct { unsigned long fs; unsigned long gs; /* Hardware debugging registers: */ - unsigned long debugreg0; - unsigned long debugreg1; - unsigned long debugreg2; - unsigned long debugreg3; + unsigned long debugreg[HBP_NUM]; unsigned long debugreg6; unsigned long debugreg7; + /* Hardware breakpoint info */ + struct hw_breakpoint *hbp[HBP_NUM]; /* Fault info: */ unsigned long cr2; unsigned long trap_no; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index fb5dfb891f0f..291527cb438a 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -106,10 +106,10 @@ void flush_thread(void) clear_tsk_thread_flag(tsk, TIF_DEBUG); - tsk->thread.debugreg0 = 0; - tsk->thread.debugreg1 = 0; - tsk->thread.debugreg2 = 0; - tsk->thread.debugreg3 = 0; + tsk->thread.debugreg[0] = 0; + tsk->thread.debugreg[1] = 0; + tsk->thread.debugreg[2] = 0; + tsk->thread.debugreg[3] = 0; tsk->thread.debugreg6 = 0; tsk->thread.debugreg7 = 0; memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); @@ -194,10 +194,10 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, update_debugctlmsr(next->debugctlmsr); if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - set_debugreg(next->debugreg0, 0); - set_debugreg(next->debugreg1, 1); - set_debugreg(next->debugreg2, 2); - set_debugreg(next->debugreg3, 3); + set_debugreg(next->debugreg[0], 0); + set_debugreg(next->debugreg[1], 1); + set_debugreg(next->debugreg[2], 2); + set_debugreg(next->debugreg[3], 3); /* no 4 and 5 */ set_debugreg(next->debugreg6, 6); set_debugreg(next->debugreg7, 7); diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 09ecbde91c13..313be40be55a 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -471,10 +471,10 @@ static int genregs_set(struct task_struct *target, static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) { switch (n) { - case 0: return child->thread.debugreg0; - case 1: return child->thread.debugreg1; - case 2: return child->thread.debugreg2; - case 3: return child->thread.debugreg3; + case 0: return child->thread.debugreg[0]; + case 1: return child->thread.debugreg[1]; + case 2: return child->thread.debugreg[2]; + case 3: return child->thread.debugreg[3]; case 6: return child->thread.debugreg6; case 7: return child->thread.debugreg7; } @@ -493,10 +493,10 @@ static int ptrace_set_debugreg(struct task_struct *child, return -EIO; switch (n) { - case 0: child->thread.debugreg0 = data; break; - case 1: child->thread.debugreg1 = data; break; - case 2: child->thread.debugreg2 = data; break; - case 3: child->thread.debugreg3 = data; break; + case 0: child->thread.debugreg[0] = data; break; + case 1: child->thread.debugreg[1] = data; break; + case 2: child->thread.debugreg[2] = data; break; + case 3: child->thread.debugreg[3] = data; break; case 6: if ((data & ~0xffffffffUL) != 0) diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index ce702c5b3a2c..519913948003 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -84,10 +84,10 @@ static void fix_processor_context(void) * Now maybe reload the debug registers */ if (current->thread.debugreg7) { - set_debugreg(current->thread.debugreg0, 0); - set_debugreg(current->thread.debugreg1, 1); - set_debugreg(current->thread.debugreg2, 2); - set_debugreg(current->thread.debugreg3, 3); + set_debugreg(current->thread.debugreg[0], 0); + set_debugreg(current->thread.debugreg[1], 1); + set_debugreg(current->thread.debugreg[2], 2); + set_debugreg(current->thread.debugreg[3], 3); /* no 4 and 5 */ set_debugreg(current->thread.debugreg6, 6); set_debugreg(current->thread.debugreg7, 7); diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 5343540f2607..1e3bdcc959ff 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -163,10 +163,10 @@ static void fix_processor_context(void) * Now maybe reload the debug registers */ if (current->thread.debugreg7){ - loaddebug(¤t->thread, 0); - loaddebug(¤t->thread, 1); - loaddebug(¤t->thread, 2); - loaddebug(¤t->thread, 3); + set_debugreg(current->thread.debugreg[0], 0); + set_debugreg(current->thread.debugreg[1], 1); + set_debugreg(current->thread.debugreg[2], 2); + set_debugreg(current->thread.debugreg[3], 3); /* no 4 and 5 */ loaddebug(¤t->thread, 6); loaddebug(¤t->thread, 7); -- cgit v1.2.3 From 62a038d34db26771756cf3689e36de638bedd2c4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:33 +0530 Subject: hw-breakpoints: introducing generic hardware breakpoint handler interfaces This patch introduces the generic Hardware Breakpoint interfaces for both user and kernel space requests. This core Api handles the hardware breakpoints through new helpers. It handles the user-space breakpoints and kernel breakpoints in front of arch implementation. One can choose kernel wide breakpoints using the following helpers and passing them a generic struct hw_breakpoint: - register_kernel_hw_breakpoint() - unregister_kernel_hw_breakpoint() - modify_kernel_hw_breakpoint() On the other side, you can choose per task breakpoints. - register_user_hw_breakpoint() - unregister_user_hw_breakpoint() - modify_user_hw_breakpoint() [ fweisbec@gmail.com: fix conflict against perfcounter ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index 78a35e9dc104..1adf2d0e6356 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -112,3 +112,7 @@ config HAVE_DMA_API_DEBUG config HAVE_DEFAULT_NO_SPIN_MUTEXES bool + +config HAVE_HW_BREAKPOINT + bool + -- cgit v1.2.3 From 0067f1297241ea567f2b22a455519752d70fcca9 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:43:57 +0530 Subject: hw-breakpoints: x86 architecture implementation of Hardware Breakpoint interfaces This patch introduces the arch-specific implementation of the generic hardware breakpoints in kernel/hw_breakpoint.c inside x86 specific directories. It contains functions which help to validate and serve requests using Hardware Breakpoint registers on x86 processors. [ fweisbec@gmail.com: fix conflict against kmemcheck ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/Kconfig | 1 + arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/hw_breakpoint.c | 382 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 384 insertions(+), 1 deletion(-) create mode 100644 arch/x86/kernel/hw_breakpoint.c (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index df9e885eee14..3033375ed6bc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -46,6 +46,7 @@ config X86 select HAVE_KERNEL_GZIP select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_LZMA + select HAVE_HW_BREAKPOINT config ARCH_DEFCONFIG string diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 77df4d654ff9..cbc781829173 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -36,7 +36,7 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o -obj-y += alternative.o i8253.o pci-nommu.o +obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o obj-y += tsc.o io_delay.o rtc.o obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..4867c9f3b5fb --- /dev/null +++ b/arch/x86/kernel/hw_breakpoint.c @@ -0,0 +1,382 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2007 Alan Stern + * Copyright (C) 2009 IBM Corporation + */ + +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Unmasked kernel DR7 value */ +static unsigned long kdr7; + +/* + * Masks for the bits corresponding to registers DR0 - DR3 in DR7 register. + * Used to clear and verify the status of bits corresponding to DR0 - DR3 + */ +static const unsigned long dr7_masks[HBP_NUM] = { + 0x000f0003, /* LEN0, R/W0, G0, L0 */ + 0x00f0000c, /* LEN1, R/W1, G1, L1 */ + 0x0f000030, /* LEN2, R/W2, G2, L2 */ + 0xf00000c0 /* LEN3, R/W3, G3, L3 */ +}; + + +/* + * Encode the length, type, Exact, and Enable bits for a particular breakpoint + * as stored in debug register 7. + */ +static unsigned long encode_dr7(int drnum, unsigned int len, unsigned int type) +{ + unsigned long bp_info; + + bp_info = (len | type) & 0xf; + bp_info <<= (DR_CONTROL_SHIFT + drnum * DR_CONTROL_SIZE); + bp_info |= (DR_GLOBAL_ENABLE << (drnum * DR_ENABLE_SIZE)) | + DR_GLOBAL_SLOWDOWN; + return bp_info; +} + +void arch_update_kernel_hw_breakpoint(void *unused) +{ + struct hw_breakpoint *bp; + int i, cpu = get_cpu(); + unsigned long temp_kdr7 = 0; + + /* Don't allow debug exceptions while we update the registers */ + set_debugreg(0UL, 7); + + for (i = hbp_kernel_pos; i < HBP_NUM; i++) { + per_cpu(this_hbp_kernel[i], cpu) = bp = hbp_kernel[i]; + if (bp) { + temp_kdr7 |= encode_dr7(i, bp->info.len, bp->info.type); + set_debugreg(bp->info.address, i); + } + } + + /* No need to set DR6. Update the debug registers with kernel-space + * breakpoint values from kdr7 and user-space requests from the + * current process + */ + kdr7 = temp_kdr7; + set_debugreg(kdr7 | current->thread.debugreg7, 7); + put_cpu_no_resched(); +} + +/* + * Install the thread breakpoints in their debug registers. + */ +void arch_install_thread_hw_breakpoint(struct task_struct *tsk) +{ + struct thread_struct *thread = &(tsk->thread); + + switch (hbp_kernel_pos) { + case 4: + set_debugreg(thread->debugreg[3], 3); + case 3: + set_debugreg(thread->debugreg[2], 2); + case 2: + set_debugreg(thread->debugreg[1], 1); + case 1: + set_debugreg(thread->debugreg[0], 0); + default: + break; + } + + /* No need to set DR6 */ + set_debugreg((kdr7 | thread->debugreg7), 7); +} + +/* + * Install the debug register values for just the kernel, no thread. + */ +void arch_uninstall_thread_hw_breakpoint() +{ + /* Clear the user-space portion of debugreg7 by setting only kdr7 */ + set_debugreg(kdr7, 7); + +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case HW_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case HW_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case HW_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; +#ifdef CONFIG_X86_64 + case HW_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; +#endif + } + return len_in_bytes; +} + +/* + * Check for virtual address in user space. + */ +int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va <= TASK_SIZE - len); +} + +/* + * Check for virtual address in kernel space. + */ +int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) +{ + unsigned int len; + + len = get_hbp_len(hbp_len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Store a breakpoint's encoded address, length, and type. + */ +static int arch_store_info(struct hw_breakpoint *bp, struct task_struct *tsk) +{ + /* + * User-space requests will always have the address field populated + * Symbol names from user-space are rejected + */ + if (tsk && bp->info.name) + return -EINVAL; + /* + * For kernel-addresses, either the address or symbol name can be + * specified. + */ + if (bp->info.name) + bp->info.address = (unsigned long) + kallsyms_lookup_name(bp->info.name); + if (bp->info.address) + return 0; + return -EINVAL; +} + +/* + * Validate the arch-specific HW Breakpoint register settings + */ +int arch_validate_hwbkpt_settings(struct hw_breakpoint *bp, + struct task_struct *tsk) +{ + unsigned int align; + int ret = -EINVAL; + + switch (bp->info.type) { + /* + * Ptrace-refactoring code + * For now, we'll allow instruction breakpoint only for user-space + * addresses + */ + case HW_BREAKPOINT_EXECUTE: + if ((!arch_check_va_in_userspace(bp->info.address, + bp->info.len)) && + bp->info.len != HW_BREAKPOINT_LEN_EXECUTE) + return ret; + break; + case HW_BREAKPOINT_WRITE: + break; + case HW_BREAKPOINT_RW: + break; + default: + return ret; + } + + switch (bp->info.len) { + case HW_BREAKPOINT_LEN_1: + align = 0; + break; + case HW_BREAKPOINT_LEN_2: + align = 1; + break; + case HW_BREAKPOINT_LEN_4: + align = 3; + break; +#ifdef CONFIG_X86_64 + case HW_BREAKPOINT_LEN_8: + align = 7; + break; +#endif + default: + return ret; + } + + if (bp->triggered) + ret = arch_store_info(bp, tsk); + + if (ret < 0) + return ret; + /* + * Check that the low-order bits of the address are appropriate + * for the alignment implied by len. + */ + if (bp->info.address & align) + return -EINVAL; + + /* Check that the virtual address is in the proper range */ + if (tsk) { + if (!arch_check_va_in_userspace(bp->info.address, bp->info.len)) + return -EFAULT; + } else { + if (!arch_check_va_in_kernelspace(bp->info.address, + bp->info.len)) + return -EFAULT; + } + return 0; +} + +void arch_update_user_hw_breakpoint(int pos, struct task_struct *tsk) +{ + struct thread_struct *thread = &(tsk->thread); + struct hw_breakpoint *bp = thread->hbp[pos]; + + thread->debugreg7 &= ~dr7_masks[pos]; + if (bp) { + thread->debugreg[pos] = bp->info.address; + thread->debugreg7 |= encode_dr7(pos, bp->info.len, + bp->info.type); + } else + thread->debugreg[pos] = 0; +} + +void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *thread = &(tsk->thread); + + thread->debugreg7 = 0; + for (i = 0; i < HBP_NUM; i++) + thread->debugreg[i] = 0; +} + +/* + * Handle debug exception notifications. + * + * Return value is either NOTIFY_STOP or NOTIFY_DONE as explained below. + * + * NOTIFY_DONE returned if one of the following conditions is true. + * i) When the causative address is from user-space and the exception + * is a valid one, i.e. not triggered as a result of lazy debug register + * switching + * ii) When there are more bits than trap set in DR6 register (such + * as BD, BS or BT) indicating that more than one debug condition is + * met and requires some more action in do_debug(). + * + * NOTIFY_STOP returned for all other cases + * + */ +int __kprobes hw_breakpoint_handler(struct die_args *args) +{ + int i, cpu, rc = NOTIFY_STOP; + struct hw_breakpoint *bp; + /* The DR6 value is stored in args->err */ + unsigned long dr7, dr6 = args->err; + + /* Do an early return if no trap bits are set in DR6 */ + if ((dr6 & DR_TRAP_BITS) == 0) + return NOTIFY_DONE; + + /* Lazy debug register switching */ + if (!test_tsk_thread_flag(current, TIF_DEBUG)) + arch_uninstall_thread_hw_breakpoint(); + + get_debugreg(dr7, 7); + /* Disable breakpoints during exception handling */ + set_debugreg(0UL, 7); + /* + * Assert that local interrupts are disabled + * Reset the DRn bits in the virtualized register value. + * The ptrace trigger routine will add in whatever is needed. + */ + current->thread.debugreg6 &= ~DR_TRAP_BITS; + cpu = get_cpu(); + + /* Handle all the breakpoints that were triggered */ + for (i = 0; i < HBP_NUM; ++i) { + if (likely(!(dr6 & (DR_TRAP0 << i)))) + continue; + /* + * Find the corresponding hw_breakpoint structure and + * invoke its triggered callback. + */ + if (i >= hbp_kernel_pos) + bp = per_cpu(this_hbp_kernel[i], cpu); + else { + bp = current->thread.hbp[i]; + if (bp) + rc = NOTIFY_DONE; + } + /* + * bp can be NULL due to lazy debug register switching + * or due to the delay between updates of hbp_kernel_pos + * and this_hbp_kernel. + */ + if (!bp) + continue; + + (bp->triggered)(bp, args->regs); + } + if (dr6 & (~DR_TRAP_BITS)) + rc = NOTIFY_DONE; + + set_debugreg(dr7, 7); + put_cpu_no_resched(); + return rc; +} + +/* + * Handle debug exception notifications. + */ +int __kprobes hw_breakpoint_exceptions_notify( + struct notifier_block *unused, unsigned long val, void *data) +{ + if (val != DIE_DEBUG) + return NOTIFY_DONE; + + return hw_breakpoint_handler(data); +} -- cgit v1.2.3 From 08d68323d1f0c34452e614263b212ca556dae47f Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:08 +0530 Subject: hw-breakpoints: modifying generic debug exception to use thread-specific debug registers This patch modifies the breakpoint exception handler code to use the new abstract debug register names. [ fweisbec@gmail.com: fix conflict against kmemcheck ] [ Impact: refactor and cleanup x86 debug exception handler ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/traps.c | 69 +++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index a1d288327ff0..de9913247dd0 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -529,73 +529,52 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; - unsigned long condition; + unsigned long dr6; int si_code; - get_debugreg(condition, 6); + get_debugreg(dr6, 6); + /* DR6 may or may not be cleared by the CPU */ + set_debugreg(0, 6); /* * The processor cleared BTF, so don't mark that we need it set. */ clear_tsk_thread_flag(tsk, TIF_DEBUGCTLMSR); tsk->thread.debugctlmsr = 0; - if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code, + /* Store the virtualized DR6 value */ + tsk->thread.debugreg6 = dr6; + + if (notify_die(DIE_DEBUG, "debug", regs, dr6, error_code, SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ preempt_conditional_sti(regs); - /* Mask out spurious debug traps due to lazy DR7 setting */ - if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) { - if (!tsk->thread.debugreg7) - goto clear_dr7; + if (regs->flags & X86_VM_MASK) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, 1); + return; } -#ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) - goto debug_vm86; -#endif - - /* Save debug status register where ptrace can see it */ - tsk->thread.debugreg6 = condition; - /* - * Single-stepping through TF: make sure we ignore any events in - * kernel space (but re-enable TF when returning to user mode). + * Single-stepping through system calls: ignore any exceptions in + * kernel space, but re-enable TF when returning to user mode. + * + * We already checked v86 mode above, so we can check for kernel mode + * by just checking the CPL of CS. */ - if (condition & DR_STEP) { - if (!user_mode(regs)) - goto clear_TF_reenable; + if ((dr6 & DR_STEP) && !user_mode(regs)) { + tsk->thread.debugreg6 &= ~DR_STEP; + set_tsk_thread_flag(tsk, TIF_SINGLESTEP); + regs->flags &= ~X86_EFLAGS_TF; } - - si_code = get_si_code(condition); - /* Ok, finally something we can handle */ - send_sigtrap(tsk, regs, error_code, si_code); - - /* - * Disable additional traps. They'll be re-enabled when - * the signal is delivered. - */ -clear_dr7: - set_debugreg(0, 7); + si_code = get_si_code(tsk->thread.debugreg6); + if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS)) + send_sigtrap(tsk, regs, error_code, si_code); preempt_conditional_cli(regs); - return; -#ifdef CONFIG_X86_32 -debug_vm86: - /* reenable preemption: handle_vm86_trap() might sleep */ - dec_preempt_count(); - handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); - conditional_cli(regs); - return; -#endif - -clear_TF_reenable: - set_tsk_thread_flag(tsk, TIF_SINGLESTEP); - regs->flags &= ~X86_EFLAGS_TF; - preempt_conditional_cli(regs); return; } -- cgit v1.2.3 From 1e3500666f7c5daaadadb8431a2927cdbbdb7dd4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:26 +0530 Subject: hw-breakpoints: use wrapper routines around debug registers in processor related functions This patch enables the use of wrapper routines to access the debug/breakpoint registers on cpu management. The hardcoded debug registers save and restore operations for threads breakpoints are replaced by wrappers. And now that we handle the kernel breakpoints too, we also need to handle them on cpu hotplug operations. [ Impact: adapt new hardware breakpoint api to cpu hotplug ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/smpboot.c | 3 +++ arch/x86/power/cpu_32.c | 13 +++---------- arch/x86/power/cpu_64.c | 12 +++--------- 3 files changed, 9 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 58d24ef917d8..2b2652d205c0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include @@ -326,6 +327,7 @@ notrace static void __cpuinit start_secondary(void *unused) setup_secondary_clock(); wmb(); + load_debug_registers(); cpu_idle(); } @@ -1250,6 +1252,7 @@ void cpu_disable_common(void) remove_cpu_from_maps(cpu); unlock_vector_lock(); fixup_irqs(); + hw_breakpoint_disable(); } int native_cpu_disable(void) diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c index 519913948003..2bc3b016de90 100644 --- a/arch/x86/power/cpu_32.c +++ b/arch/x86/power/cpu_32.c @@ -13,6 +13,7 @@ #include #include #include +#include static struct saved_context saved_context; @@ -48,6 +49,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr2 = read_cr2(); ctxt->cr3 = read_cr3(); ctxt->cr4 = read_cr4_safe(); + hw_breakpoint_disable(); } /* Needed by apm.c */ @@ -83,16 +85,7 @@ static void fix_processor_context(void) /* * Now maybe reload the debug registers */ - if (current->thread.debugreg7) { - set_debugreg(current->thread.debugreg[0], 0); - set_debugreg(current->thread.debugreg[1], 1); - set_debugreg(current->thread.debugreg[2], 2); - set_debugreg(current->thread.debugreg[3], 3); - /* no 4 and 5 */ - set_debugreg(current->thread.debugreg6, 6); - set_debugreg(current->thread.debugreg7, 7); - } - + load_debug_registers(); } static void __restore_processor_state(struct saved_context *ctxt) diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu_64.c index 1e3bdcc959ff..46866a13a93a 100644 --- a/arch/x86/power/cpu_64.c +++ b/arch/x86/power/cpu_64.c @@ -16,6 +16,7 @@ #include #include #include +#include static void fix_processor_context(void); @@ -71,6 +72,7 @@ static void __save_processor_state(struct saved_context *ctxt) ctxt->cr3 = read_cr3(); ctxt->cr4 = read_cr4(); ctxt->cr8 = read_cr8(); + hw_breakpoint_disable(); } void save_processor_state(void) @@ -162,13 +164,5 @@ static void fix_processor_context(void) /* * Now maybe reload the debug registers */ - if (current->thread.debugreg7){ - set_debugreg(current->thread.debugreg[0], 0); - set_debugreg(current->thread.debugreg[1], 1); - set_debugreg(current->thread.debugreg[2], 2); - set_debugreg(current->thread.debugreg[3], 3); - /* no 4 and 5 */ - loaddebug(¤t->thread, 6); - loaddebug(¤t->thread, 7); - } + load_debug_registers(); } -- cgit v1.2.3 From 66cb5917295958652ff6ba36d83f98f2379c46b4 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:44:55 +0530 Subject: hw-breakpoints: use the new wrapper routines to access debug registers in process/thread code This patch enables the use of abstract debug registers in process-handling routines, according to the new hardware breakpoint Api. [ Impact: adapt thread breakpoints handling code to the new breakpoint Api ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/process.c | 22 ++++++---------------- arch/x86/kernel/process_32.c | 28 ++++++++++++++++++++++++++++ arch/x86/kernel/process_64.c | 31 +++++++++++++++++++++++++++++++ 3 files changed, 65 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 291527cb438a..19a686c401b5 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include unsigned long idle_halt; EXPORT_SYMBOL(idle_halt); @@ -46,6 +48,8 @@ void free_thread_xstate(struct task_struct *tsk) kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); tsk->thread.xstate = NULL; } + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + flush_thread_hw_breakpoint(tsk); WARN(tsk->thread.ds_ctx, "leaking DS context\n"); } @@ -106,12 +110,8 @@ void flush_thread(void) clear_tsk_thread_flag(tsk, TIF_DEBUG); - tsk->thread.debugreg[0] = 0; - tsk->thread.debugreg[1] = 0; - tsk->thread.debugreg[2] = 0; - tsk->thread.debugreg[3] = 0; - tsk->thread.debugreg6 = 0; - tsk->thread.debugreg7 = 0; + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + flush_thread_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); /* * Forget coprocessor state.. @@ -193,16 +193,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, else if (next->debugctlmsr != prev->debugctlmsr) update_debugctlmsr(next->debugctlmsr); - if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { - set_debugreg(next->debugreg[0], 0); - set_debugreg(next->debugreg[1], 1); - set_debugreg(next->debugreg[2], 2); - set_debugreg(next->debugreg[3], 3); - /* no 4 and 5 */ - set_debugreg(next->debugreg6, 6); - set_debugreg(next->debugreg7, 7); - } - if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ test_tsk_thread_flag(next_p, TIF_NOTSC)) { /* prev and next are different */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b5e4bfef4472..297ffff2ffc2 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -61,6 +61,8 @@ #include #include #include +#include +#include asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); @@ -265,7 +267,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, task_user_gs(p) = get_user_gs(regs); + p->thread.io_bitmap_ptr = NULL; tsk = current; + err = -ENOMEM; + if (unlikely(test_tsk_thread_flag(tsk, TIF_DEBUG))) + if (copy_thread_hw_breakpoint(tsk, p, clone_flags)) + goto out; + if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); @@ -285,10 +293,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); +out: if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } + if (err) + flush_thread_hw_breakpoint(p); clear_tsk_thread_flag(p, TIF_DS_AREA_MSR); p->thread.ds_ctx = NULL; @@ -427,6 +438,23 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) lazy_load_gs(next->gs); percpu_write(current_task, next_p); + /* + * There's a problem with moving the arch_install_thread_hw_breakpoint() + * call before current is updated. Suppose a kernel breakpoint is + * triggered in between the two, the hw-breakpoint handler will see that + * the 'current' task does not have TIF_DEBUG flag set and will think it + * is leftover from an old task (lazy switching) and will erase it. Then + * until the next context switch, no user-breakpoints will be installed. + * + * The real problem is that it's impossible to update both current and + * physical debug registers at the same instant, so there will always be + * a window in which they disagree and a breakpoint might get triggered. + * Since we use lazy switching, we are forced to assume that a + * disagreement means that current is correct and the exception is due + * to lazy debug register switching. + */ + if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) + arch_install_thread_hw_breakpoint(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 5a1a1de292ec..f7b276d4b3fb 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -55,6 +55,8 @@ #include #include #include +#include +#include asmlinkage extern void ret_from_fork(void); @@ -248,6 +250,8 @@ void release_thread(struct task_struct *dead_task) BUG(); } } + if (unlikely(dead_task->thread.debugreg7)) + flush_thread_hw_breakpoint(dead_task); } static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) @@ -303,12 +307,18 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; + p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); savesegment(fs, p->thread.fsindex); savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); + err = -ENOMEM; + if (unlikely(test_tsk_thread_flag(me, TIF_DEBUG))) + if (copy_thread_hw_breakpoint(me, p, clone_flags)) + goto out; + if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { @@ -347,6 +357,9 @@ out: kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } + if (err) + flush_thread_hw_breakpoint(p); + return err; } @@ -492,6 +505,24 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ if (tsk_used_math(next_p) && next_p->fpu_counter > 5) math_state_restore(); + /* + * There's a problem with moving the arch_install_thread_hw_breakpoint() + * call before current is updated. Suppose a kernel breakpoint is + * triggered in between the two, the hw-breakpoint handler will see that + * the 'current' task does not have TIF_DEBUG flag set and will think it + * is leftover from an old task (lazy switching) and will erase it. Then + * until the next context switch, no user-breakpoints will be installed. + * + * The real problem is that it's impossible to update both current and + * physical debug registers at the same instant, so there will always be + * a window in which they disagree and a breakpoint might get triggered. + * Since we use lazy switching, we are forced to assume that a + * disagreement means that current is correct and the exception is due + * to lazy debug register switching. + */ + if (unlikely(test_tsk_thread_flag(next_p, TIF_DEBUG))) + arch_install_thread_hw_breakpoint(next_p); + return prev_p; } -- cgit v1.2.3 From da0cdc14f5f7e0faee6b2393fefed056cdb17146 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:45:03 +0530 Subject: hw-breakpoints: modify signal handling code to refrain from re-enabling HW Breakpoints This patch disables re-enabling of Hardware Breakpoint registers through the signal handling code. This is now done during from hw_breakpoint_handler(). Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/signal.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 14425166b8e3..f33d2e0ef095 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -800,15 +800,6 @@ static void do_signal(struct pt_regs *regs) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { - /* - * Re-enable any watchpoints before delivering the - * signal to user space. The processor register will - * have been cleared if the watchpoint triggered - * inside the kernel. - */ - if (current->thread.debugreg7) - set_debugreg(current->thread.debugreg7, 7); - /* Whee! Actually deliver the signal. */ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { /* -- cgit v1.2.3 From 72f674d203cd230426437cdcf7dd6f681dad8b0d Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:45:48 +0530 Subject: hw-breakpoints: modify Ptrace routines to access breakpoint registers This patch modifies the ptrace code to use the new wrapper routines around the debug/breakpoint registers. [ Impact: adapt x86 ptrace to the new breakpoint Api ] Original-patch-by: Alan Stern Signed-off-by: K.Prasad Signed-off-by: Maneesh Soni Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/ptrace.c | 231 +++++++++++++++++++++++++++++------------------ 1 file changed, 141 insertions(+), 90 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 313be40be55a..b457f78b7dbf 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -34,6 +34,7 @@ #include #include #include +#include #include @@ -136,11 +137,6 @@ static int set_segment_reg(struct task_struct *task, return 0; } -static unsigned long debugreg_addr_limit(struct task_struct *task) -{ - return TASK_SIZE - 3; -} - #else /* CONFIG_X86_64 */ #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) @@ -265,15 +261,6 @@ static int set_segment_reg(struct task_struct *task, return 0; } -static unsigned long debugreg_addr_limit(struct task_struct *task) -{ -#ifdef CONFIG_IA32_EMULATION - if (test_tsk_thread_flag(task, TIF_IA32)) - return IA32_PAGE_OFFSET - 3; -#endif - return TASK_SIZE_MAX - 7; -} - #endif /* CONFIG_X86_32 */ static unsigned long get_flags(struct task_struct *task) @@ -464,95 +451,159 @@ static int genregs_set(struct task_struct *target, } /* - * This function is trivial and will be inlined by the compiler. - * Having it separates the implementation details of debug - * registers from the interface details of ptrace. + * Decode the length and type bits for a particular breakpoint as + * stored in debug register 7. Return the "enabled" status. */ -static unsigned long ptrace_get_debugreg(struct task_struct *child, int n) +static int decode_dr7(unsigned long dr7, int bpnum, unsigned *len, + unsigned *type) { - switch (n) { - case 0: return child->thread.debugreg[0]; - case 1: return child->thread.debugreg[1]; - case 2: return child->thread.debugreg[2]; - case 3: return child->thread.debugreg[3]; - case 6: return child->thread.debugreg6; - case 7: return child->thread.debugreg7; - } - return 0; + int bp_info = dr7 >> (DR_CONTROL_SHIFT + bpnum * DR_CONTROL_SIZE); + + *len = (bp_info & 0xc) | 0x40; + *type = (bp_info & 0x3) | 0x80; + return (dr7 >> (bpnum * DR_ENABLE_SIZE)) & 0x3; } -static int ptrace_set_debugreg(struct task_struct *child, - int n, unsigned long data) +static void ptrace_triggered(struct hw_breakpoint *bp, struct pt_regs *regs) { + struct thread_struct *thread = &(current->thread); int i; - if (unlikely(n == 4 || n == 5)) - return -EIO; + /* + * Store in the virtual DR6 register the fact that the breakpoint + * was hit so the thread's debugger will see it. + */ + for (i = 0; i < hbp_kernel_pos; i++) + /* + * We will check bp->info.address against the address stored in + * thread's hbp structure and not debugreg[i]. This is to ensure + * that the corresponding bit for 'i' in DR7 register is enabled + */ + if (bp->info.address == thread->hbp[i]->info.address) + break; - if (n < 4 && unlikely(data >= debugreg_addr_limit(child))) - return -EIO; + thread->debugreg6 |= (DR_TRAP0 << i); +} - switch (n) { - case 0: child->thread.debugreg[0] = data; break; - case 1: child->thread.debugreg[1] = data; break; - case 2: child->thread.debugreg[2] = data; break; - case 3: child->thread.debugreg[3] = data; break; +/* + * Handle ptrace writes to debug register 7. + */ +static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) +{ + struct thread_struct *thread = &(tsk->thread); + unsigned long old_dr7 = thread->debugreg7; + int i, orig_ret = 0, rc = 0; + int enabled, second_pass = 0; + unsigned len, type; + struct hw_breakpoint *bp; + + data &= ~DR_CONTROL_RESERVED; +restore: + /* + * Loop through all the hardware breakpoints, making the + * appropriate changes to each. + */ + for (i = 0; i < HBP_NUM; i++) { + enabled = decode_dr7(data, i, &len, &type); + bp = thread->hbp[i]; + + if (!enabled) { + if (bp) { + /* Don't unregister the breakpoints right-away, + * unless all register_user_hw_breakpoint() + * requests have succeeded. This prevents + * any window of opportunity for debug + * register grabbing by other users. + */ + if (!second_pass) + continue; + unregister_user_hw_breakpoint(tsk, bp); + kfree(bp); + } + continue; + } + if (!bp) { + rc = -ENOMEM; + bp = kzalloc(sizeof(struct hw_breakpoint), GFP_KERNEL); + if (bp) { + bp->info.address = thread->debugreg[i]; + bp->triggered = ptrace_triggered; + bp->info.len = len; + bp->info.type = type; + rc = register_user_hw_breakpoint(tsk, bp); + if (rc) + kfree(bp); + } + } else + rc = modify_user_hw_breakpoint(tsk, bp); + if (rc) + break; + } + /* + * Make a second pass to free the remaining unused breakpoints + * or to restore the original breakpoints if an error occurred. + */ + if (!second_pass) { + second_pass = 1; + if (rc < 0) { + orig_ret = rc; + data = old_dr7; + } + goto restore; + } + return ((orig_ret < 0) ? orig_ret : rc); +} - case 6: - if ((data & ~0xffffffffUL) != 0) - return -EIO; - child->thread.debugreg6 = data; - break; +/* + * Handle PTRACE_PEEKUSR calls for the debug register area. + */ +unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) +{ + struct thread_struct *thread = &(tsk->thread); + unsigned long val = 0; + + if (n < HBP_NUM) + val = thread->debugreg[n]; + else if (n == 6) + val = thread->debugreg6; + else if (n == 7) + val = thread->debugreg7; + return val; +} - case 7: - /* - * Sanity-check data. Take one half-byte at once with - * check = (val >> (16 + 4*i)) & 0xf. It contains the - * R/Wi and LENi bits; bits 0 and 1 are R/Wi, and bits - * 2 and 3 are LENi. Given a list of invalid values, - * we do mask |= 1 << invalid_value, so that - * (mask >> check) & 1 is a correct test for invalid - * values. - * - * R/Wi contains the type of the breakpoint / - * watchpoint, LENi contains the length of the watched - * data in the watchpoint case. - * - * The invalid values are: - * - LENi == 0x10 (undefined), so mask |= 0x0f00. [32-bit] - * - R/Wi == 0x10 (break on I/O reads or writes), so - * mask |= 0x4444. - * - R/Wi == 0x00 && LENi != 0x00, so we have mask |= - * 0x1110. - * - * Finally, mask = 0x0f00 | 0x4444 | 0x1110 == 0x5f54. - * - * See the Intel Manual "System Programming Guide", - * 15.2.4 - * - * Note that LENi == 0x10 is defined on x86_64 in long - * mode (i.e. even for 32-bit userspace software, but - * 64-bit kernel), so the x86_64 mask value is 0x5454. - * See the AMD manual no. 24593 (AMD64 System Programming) - */ -#ifdef CONFIG_X86_32 -#define DR7_MASK 0x5f54 -#else -#define DR7_MASK 0x5554 -#endif - data &= ~DR_CONTROL_RESERVED; - for (i = 0; i < 4; i++) - if ((DR7_MASK >> ((data >> (16 + 4*i)) & 0xf)) & 1) - return -EIO; - child->thread.debugreg7 = data; - if (data) - set_tsk_thread_flag(child, TIF_DEBUG); - else - clear_tsk_thread_flag(child, TIF_DEBUG); - break; +/* + * Handle PTRACE_POKEUSR calls for the debug register area. + */ +int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) +{ + struct thread_struct *thread = &(tsk->thread); + int rc = 0; + + /* There are no DR4 or DR5 registers */ + if (n == 4 || n == 5) + return -EIO; + + if (n == 6) { + tsk->thread.debugreg6 = val; + goto ret_path; } + if (n < HBP_NUM) { + if (thread->hbp[n]) { + if (arch_check_va_in_userspace(val, + thread->hbp[n]->info.len) == 0) { + rc = -EIO; + goto ret_path; + } + thread->hbp[n]->info.address = val; + } + thread->debugreg[n] = val; + } + /* All that's left is DR7 */ + if (n == 7) + rc = ptrace_write_dr7(tsk, val); - return 0; +ret_path: + return rc; } /* -- cgit v1.2.3 From 17f557e5b5d43a2af66c969f6560ac7105020672 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:46:03 +0530 Subject: hw-breakpoints: cleanup HW Breakpoint registers before kexec This patch disables Hardware breakpoints before doing a 'kexec' on the machine so that the cpu doesn't keep debug registers values which would be out of sync for the new image. Original-patch-by: Alan Stern Signed-off-by: K.Prasad Reviewed-by: Alan Stern Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/machine_kexec_32.c | 2 ++ arch/x86/kernel/machine_kexec_64.c | 2 ++ 2 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index c1c429d00130..c843f8406da2 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -25,6 +25,7 @@ #include #include #include +#include static void set_idt(void *newidt, __u16 limit) { @@ -202,6 +203,7 @@ void machine_kexec(struct kimage *image) /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); + hw_breakpoint_disable(); if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 84c3bf209e98..4a8bb82248ae 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -18,6 +18,7 @@ #include #include #include +#include static int init_one_level2_page(struct kimage *image, pgd_t *pgd, unsigned long addr) @@ -282,6 +283,7 @@ void machine_kexec(struct kimage *image) /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); + hw_breakpoint_disable(); if (image->preserve_context) { #ifdef CONFIG_X86_IO_APIC -- cgit v1.2.3 From 62edab9056a6cf0c9207339c8892c923a5217e45 Mon Sep 17 00:00:00 2001 From: "K.Prasad" Date: Mon, 1 Jun 2009 23:47:06 +0530 Subject: hw-breakpoints: reset bits in dr6 after the corresponding exception is handled This patch resets the bit in dr6 after the corresponding exception is handled in code, so that we keep a clean track of the current virtual debug status register. [ Impact: keep track of breakpoints triggering completion ] Signed-off-by: K.Prasad Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/hw_breakpoint.c | 13 +++++++++++-- arch/x86/kernel/kgdb.c | 6 ++++++ arch/x86/kernel/kprobes.c | 9 ++++++++- arch/x86/kernel/traps.c | 4 ++-- arch/x86/mm/kmmio.c | 8 +++++++- 5 files changed, 34 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 4867c9f3b5fb..69451473dbd2 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -314,8 +314,12 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct hw_breakpoint *bp; - /* The DR6 value is stored in args->err */ - unsigned long dr7, dr6 = args->err; + unsigned long dr7, dr6; + unsigned long *dr6_p; + + /* The DR6 value is pointed by args->err */ + dr6_p = (unsigned long *)ERR_PTR(args->err); + dr6 = *dr6_p; /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) @@ -351,6 +355,11 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) if (bp) rc = NOTIFY_DONE; } + /* + * Reset the 'i'th TRAP bit in dr6 to denote completion of + * exception handling + */ + (*dr6_p) &= ~(DR_TRAP0 << i); /* * bp can be NULL due to lazy debug register switching * or due to the delay between updates of hbp_kernel_pos diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index b1f4dffb919e..f820b73c7f28 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -43,6 +43,7 @@ #include #include +#include #include #include @@ -434,6 +435,11 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) "resuming...\n"); kgdb_arch_handle_exception(args->trapnr, args->signr, args->err, "c", "", regs); + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; return NOTIFY_STOP; } diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b000..b5b1848c5336 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -54,6 +54,7 @@ #include #include #include +#include void jprobe_return_end(void); @@ -967,8 +968,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, ret = NOTIFY_STOP; break; case DIE_DEBUG: - if (post_kprobe_handler(args->regs)) + if (post_kprobe_handler(args->regs)) { + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; ret = NOTIFY_STOP; + } break; case DIE_GPF: /* diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index de9913247dd0..124a4d5a95b2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -545,8 +545,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; - if (notify_die(DIE_DEBUG, "debug", regs, dr6, error_code, - SIGTRAP) == NOTIFY_STOP) + if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, + SIGTRAP) == NOTIFY_STOP) return; /* It's safe to allow irq's after DR6 has been saved */ diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 16ccbd77917f..11a4ad4d6253 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -540,8 +540,14 @@ kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) struct die_args *arg = args; if (val == DIE_DEBUG && (arg->err & DR_STEP)) - if (post_kmmio_handler(arg->err, arg->regs) == 1) + if (post_kmmio_handler(arg->err, arg->regs) == 1) { + /* + * Reset the BS bit in dr6 (pointed by args->err) to + * denote completion of processing + */ + (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; return NOTIFY_STOP; + } return NOTIFY_DONE; } -- cgit v1.2.3 From 4555835b707d5c778ee1c9076670bc99b1eeaf61 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 17 Jun 2009 14:44:19 +0530 Subject: x86: hw_breakpoint.c arch_check_va_in_kernelspace and hw_breakpoint_handler should be static arch_check_va_in_kernelspace() and hw_breakpoint_handler() is used only by same file so it should be static. Also fixed non-ANSI function declaration of function 'arch_uninstall_thread_hw_breakpoint' Fixed following sparse warnings : arch/x86/kernel/hw_breakpoint.c:124:42: warning: non-ANSI function declaration of function 'arch_uninstall_thread_hw_breakpoint' arch/x86/kernel/hw_breakpoint.c:169:5: warning: symbol 'arch_check_va_in_kernelspace' was not declared. Should it be static? arch/x86/kernel/hw_breakpoint.c:313:15: warning: symbol 'hw_breakpoint_handler' was not declared. Should it be static? Signed-off-by: Jaswinder Singh Rajput Cc: Alan Stern Cc: "K.Prasad" Cc: Frederic Weisbecker LKML-Reference: <1245230059.2662.4.camel@ht.satnam> Signed-off-by: Ingo Molnar --- arch/x86/kernel/hw_breakpoint.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index 51d959528b1d..9316a9de4de3 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -121,7 +121,7 @@ void arch_install_thread_hw_breakpoint(struct task_struct *tsk) /* * Install the debug register values for just the kernel, no thread. */ -void arch_uninstall_thread_hw_breakpoint() +void arch_uninstall_thread_hw_breakpoint(void) { /* Clear the user-space portion of debugreg7 by setting only kdr7 */ set_debugreg(kdr7, 7); @@ -166,7 +166,7 @@ int arch_check_va_in_userspace(unsigned long va, u8 hbp_len) /* * Check for virtual address in kernel space. */ -int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) +static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len) { unsigned int len; @@ -310,7 +310,7 @@ void arch_flush_thread_hw_breakpoint(struct task_struct *tsk) * NOTIFY_STOP returned for all other cases * */ -int __kprobes hw_breakpoint_handler(struct die_args *args) +static int __kprobes hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct hw_breakpoint *bp; -- cgit v1.2.3 From 9d22b536609abf0d64648f99518676ea58245e3b Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Wed, 1 Jul 2009 19:52:30 +0530 Subject: x86: Mark ptrace_get_debugreg() as static This sparse warning: arch/x86/kernel/ptrace.c:560:15: warning: symbol 'ptrace_get_debugreg' was not declared. Should it be static? triggers because ptrace_get_debugreg() is global but is only used in a single .c file. change ptrace_get_debugreg() to static to fix that - this also addresses the sparse warning. Signed-off-by: Jaswinder Singh Rajput Cc: Steven Rostedt LKML-Reference: <1246458150.6940.19.camel@hpdv5.satnam> Signed-off-by: Ingo Molnar --- arch/x86/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index b457f78b7dbf..cabdabce3cb2 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -557,7 +557,7 @@ restore: /* * Handle PTRACE_PEEKUSR calls for the debug register area. */ -unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) +static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) { struct thread_struct *thread = &(tsk->thread); unsigned long val = 0; -- cgit v1.2.3 From 39fe05e58c5e448601ce46e6b03900d5bf31c4b0 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Wed, 12 Aug 2009 11:16:12 +0800 Subject: x86, hpet: Disable per-cpu hpet timer if ARAT is supported If CPU support always running local APIC timer, per-cpu hpet timer could be disabled, which is useless and wasteful in such case. Let's leave the timers to others. The effect is that we reserve less timers. Signed-off-by: Shaohua Li Cc: venkatesh.pallipadi@intel.com LKML-Reference: <20090812031612.GA10062@sli10-desk.sh.intel.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index dedc2bddf7a5..5969e1078fc2 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -584,6 +584,8 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) unsigned int num_timers_used = 0; int i; + if (boot_cpu_has(X86_FEATURE_ARAT)) + return; id = hpet_readl(HPET_ID); num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); @@ -872,10 +874,8 @@ int __init hpet_enable(void) if (id & HPET_ID_LEGSUP) { hpet_legacy_clockevent_register(); - hpet_msi_capability_lookup(2); return 1; } - hpet_msi_capability_lookup(0); return 0; out_nohpet: @@ -908,9 +908,17 @@ static __init int hpet_late_init(void) if (!hpet_virt_address) return -ENODEV; + if (hpet_readl(HPET_ID) & HPET_ID_LEGSUP) + hpet_msi_capability_lookup(2); + else + hpet_msi_capability_lookup(0); + hpet_reserve_platform_timers(hpet_readl(HPET_ID)); hpet_print_config(); + if (boot_cpu_has(X86_FEATURE_ARAT)) + return 0; + for_each_online_cpu(cpu) { hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); } -- cgit v1.2.3 From 50dcfa0234753c32e1c838cc0e6d7952dda73201 Mon Sep 17 00:00:00 2001 From: Andrew Victor Date: Tue, 4 Aug 2009 19:55:56 +0100 Subject: ARM: 5637/1: [KS8695] Don't reference CLOCK_TICK_RATE in drivers Stop referencing CLOCK_TICK_RATE in the KS8695 drivers, rather refer to a KS8695_CLOCK_RATE. Issue pointed out by Russell King on arm-linux-kernel mailing list. Signed-off-by: Andrew Victor Signed-off-by: Russell King --- arch/arm/mach-ks8695/include/mach/hardware.h | 5 +++++ arch/arm/mach-ks8695/include/mach/timex.h | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-ks8695/include/mach/hardware.h b/arch/arm/mach-ks8695/include/mach/hardware.h index 1d640d075b7e..e0f911d9e021 100644 --- a/arch/arm/mach-ks8695/include/mach/hardware.h +++ b/arch/arm/mach-ks8695/include/mach/hardware.h @@ -16,6 +16,11 @@ #include +/* + * Clocks are derived from MCLK, which is 25Mhz + */ +#define KS8695_CLOCK_RATE 25000000 + /* * Physical RAM address. */ diff --git a/arch/arm/mach-ks8695/include/mach/timex.h b/arch/arm/mach-ks8695/include/mach/timex.h index 4682e350369b..10f716371bd3 100644 --- a/arch/arm/mach-ks8695/include/mach/timex.h +++ b/arch/arm/mach-ks8695/include/mach/timex.h @@ -14,7 +14,8 @@ #ifndef __ASM_ARCH_TIMEX_H #define __ASM_ARCH_TIMEX_H -/* timers are derived from MCLK, which is 25MHz */ -#define CLOCK_TICK_RATE 25000000 +#include + +#define CLOCK_TICK_RATE KS8695_CLOCK_RATE #endif -- cgit v1.2.3 From 9e2a7e6f5f1091965c78e01e87af05607bbf937f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 18 Aug 2009 13:47:30 -0700 Subject: iop33x: update defconfig (default atu to on) By default the iop3xx configurations are set to boot the platforms over an nfs root configuration. Since commit c34002c1 "iop: unconditionally initialize the ATU on platforms known to be 'hosts'" this configuration also requires iop3xx_init_atu=y to be specified on the kernel command line. Signed-off-by: Dan Williams --- arch/arm/configs/iop33x_defconfig | 554 +++++++++++++++++++++++++++++--------- 1 file changed, 429 insertions(+), 125 deletions(-) (limited to 'arch') diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig index eec488298267..ed2d59d01829 100644 --- a/arch/arm/configs/iop33x_defconfig +++ b/arch/arm/configs/iop33x_defconfig @@ -1,29 +1,26 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.24-rc5 -# Wed Dec 12 16:11:27 2007 +# Linux kernel version: 2.6.31-rc6 +# Tue Aug 18 13:41:41 2009 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y -# CONFIG_GENERIC_GPIO is not set -# CONFIG_GENERIC_TIME is not set -# CONFIG_GENERIC_CLOCKEVENTS is not set +CONFIG_GENERIC_GPIO=y CONFIG_MMU=y -# CONFIG_NO_IOPORT is not set CONFIG_GENERIC_HARDIRQS=y CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_HARDIRQS_SW_RESEND=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_VECTORS_BASE=0xffff0000 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -40,21 +37,39 @@ CONFIG_SYSVIPC_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set # CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +# CONFIG_CLASSIC_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set # CONFIG_CGROUPS is not set -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_FAIR_USER_SCHED=y -# CONFIG_FAIR_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y +# CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y # CONFIG_EMBEDDED is not set CONFIG_UID16=y CONFIG_SYSCTL_SYSCALL=y @@ -67,29 +82,48 @@ CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Performance Counters +# CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y + +# +# GCOV-based kernel profiling +# +# CONFIG_SLOW_WORK is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set # # IO Schedulers @@ -103,6 +137,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_FREEZER is not set # # System Type @@ -112,15 +147,15 @@ CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set # CONFIG_ARCH_AT91 is not set -# CONFIG_ARCH_CLPS7500 is not set # CONFIG_ARCH_CLPS711X is not set -# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set # CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_STMP3XXX is not set # CONFIG_ARCH_NETX is not set # CONFIG_ARCH_H720X is not set -# CONFIG_ARCH_IMX is not set # CONFIG_ARCH_IOP13XX is not set # CONFIG_ARCH_IOP32X is not set CONFIG_ARCH_IOP33X=y @@ -128,19 +163,26 @@ CONFIG_ARCH_IOP33X=y # CONFIG_ARCH_IXP2000 is not set # CONFIG_ARCH_IXP4XX is not set # CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set # CONFIG_ARCH_KS8695 is not set # CONFIG_ARCH_NS9XXX is not set -# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_W90X900 is not set # CONFIG_ARCH_PNX4008 is not set # CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_MSM is not set # CONFIG_ARCH_RPC is not set # CONFIG_ARCH_SA1100 is not set # CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set -CONFIG_IOP3XX_ATU=y # # IOP33x Implementation Options @@ -151,14 +193,6 @@ CONFIG_IOP3XX_ATU=y # CONFIG_ARCH_IQ80331=y CONFIG_MACH_IQ80332=y - -# -# Boot options -# - -# -# Power management -# CONFIG_PLAT_IOP=y # @@ -168,6 +202,7 @@ CONFIG_CPU_32=y CONFIG_CPU_XSCALE=y CONFIG_CPU_32v5=y CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_PABRT_NOIFAR=y CONFIG_CPU_CACHE_VIVT=y CONFIG_CPU_TLB_V4WBI=y CONFIG_CPU_CP15=y @@ -178,7 +213,6 @@ CONFIG_CPU_CP15_MMU=y # # CONFIG_ARM_THUMB is not set # CONFIG_CPU_DCACHE_DISABLE is not set -# CONFIG_OUTER_CACHE is not set # CONFIG_IWMMXT is not set CONFIG_XSCALE_PMU=y @@ -190,40 +224,54 @@ CONFIG_PCI_SYSCALL=y # CONFIG_ARCH_SUPPORTS_MSI is not set CONFIG_PCI_LEGACY=y # CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set # CONFIG_PCCARD is not set # # Kernel Features # -# CONFIG_TICK_ONESHOT is not set +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 # CONFIG_PREEMPT is not set CONFIG_HZ=100 # CONFIG_AEABI is not set -# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y # CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4096 -# CONFIG_RESOURCES_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set # # Boot options # CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc" +CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc iop3xx_init_atu=y" # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set +# +# CPU Power Management +# +# CONFIG_CPU_IDLE is not set + # # Floating point emulation # @@ -239,6 +287,8 @@ CONFIG_FPE_NWFPE=y # Userspace binary formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_HAVE_AOUT=y CONFIG_BINFMT_AOUT=y # CONFIG_BINFMT_MISC is not set # CONFIG_ARTHUR is not set @@ -247,11 +297,7 @@ CONFIG_BINFMT_AOUT=y # Power management options # # CONFIG_PM is not set -CONFIG_SUSPEND_UP_POSSIBLE=y - -# -# Networking -# +CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y # @@ -264,6 +310,7 @@ CONFIG_XFRM=y # CONFIG_XFRM_USER is not set # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y @@ -310,6 +357,7 @@ CONFIG_IPV6=y # CONFIG_IPV6_SIT is not set # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set # CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set @@ -317,6 +365,7 @@ CONFIG_IPV6=y # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set @@ -326,24 +375,31 @@ CONFIG_IPV6=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +CONFIG_MAC80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -357,7 +413,9 @@ CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y -# CONFIG_FW_LOADER is not set +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set @@ -366,12 +424,14 @@ CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y CONFIG_MTD_REDBOOT_PARTS_READONLY=y # CONFIG_MTD_CMDLINE_PARTS is not set # CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set # # User Modules And Translation Layers @@ -421,9 +481,7 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x0 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=1 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_ARM_INTEGRATOR is not set # CONFIG_MTD_INTEL_VR_NOR is not set # CONFIG_MTD_PLATRAM is not set @@ -446,6 +504,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=1 # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -463,14 +526,29 @@ CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 +# CONFIG_BLK_DEV_XIP is not set # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set CONFIG_MISC_DEVICES=y # CONFIG_PHANTOM is not set -# CONFIG_EEPROM_93CX6 is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set +CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # @@ -492,10 +570,6 @@ CONFIG_BLK_DEV_SD=y # CONFIG_BLK_DEV_SR is not set CONFIG_CHR_DEV_SG=y # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -512,6 +586,8 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set # CONFIG_SCSI_3W_9XXX is not set # CONFIG_SCSI_ACARD is not set @@ -520,13 +596,18 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC79XX is not set # CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_DPT_I2O is not set # CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT2SAS is not set # CONFIG_SCSI_HPTIOP is not set +# CONFIG_LIBFC is not set +# CONFIG_LIBFCOE is not set +# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_IPS is not set @@ -543,15 +624,18 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_NSP32 is not set # CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_SRP is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set # CONFIG_ATA is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y CONFIG_MD_LINEAR=y CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y # CONFIG_MD_RAID10 is not set CONFIG_MD_RAID456=y -# CONFIG_MD_RAID5_RESHAPE is not set +CONFIG_MD_RAID6_PQ=y # CONFIG_MD_MULTIPATH is not set # CONFIG_MD_FAULTY is not set CONFIG_BLK_DEV_DM=y @@ -568,27 +652,34 @@ CONFIG_BLK_DEV_DM=y # # IEEE 1394 (FireWire) support # + +# +# You can enable one or both FireWire driver stacks. +# + +# +# See the help texts for more information. +# # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set # CONFIG_VETH is not set -# CONFIG_IP1000 is not set # CONFIG_ARCNET is not set # CONFIG_NET_ETHERNET is not set CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set CONFIG_E1000=y -CONFIG_E1000_NAPI=y -# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set # CONFIG_E1000E is not set +# CONFIG_IP1000 is not set +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -596,23 +687,34 @@ CONFIG_E1000_NAPI=y # CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SKY2 is not set -# CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set +# CONFIG_CNIC is not set # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_CHELSIO_T3 is not set +# CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set +# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set +# CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set +# CONFIG_BNX2X is not set +# CONFIG_QLGE is not set +# CONFIG_SFC is not set +# CONFIG_BE2NET is not set # CONFIG_TR is not set # @@ -620,13 +722,16 @@ CONFIG_NETDEV_10000=y # # CONFIG_WLAN_PRE80211 is not set # CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# # CONFIG_WAN is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # CONFIG_NET_FC is not set -# CONFIG_SHAPER is not set # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set @@ -670,10 +775,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # Character devices # CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y CONFIG_HW_CONSOLE=y # CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set # # Serial drivers @@ -692,11 +800,12 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y -# CONFIG_NVRAM is not set +# CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set # CONFIG_RAW_DRIVER is not set @@ -705,16 +814,14 @@ CONFIG_DEVPORT=y CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_CHARDEV=y +CONFIG_I2C_HELPER_AUTO=y # -# I2C Algorithms +# I2C Hardware Bus support # -# CONFIG_I2C_ALGOBIT is not set -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set # -# I2C Hardware Bus support +# PC SMBus host controller drivers # # CONFIG_I2C_ALI1535 is not set # CONFIG_I2C_ALI1563 is not set @@ -722,50 +829,82 @@ CONFIG_I2C_CHARDEV=y # CONFIG_I2C_AMD756 is not set # CONFIG_I2C_AMD8111 is not set # CONFIG_I2C_I801 is not set -# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISCH is not set # CONFIG_I2C_PIIX4 is not set -CONFIG_I2C_IOP3XX=y # CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_PROSAVAGE is not set -# CONFIG_I2C_SAVAGE4 is not set -# CONFIG_I2C_SIMTEC is not set # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set # CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_STUB is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_IOP3XX=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Graphics adapter I2C/DDC channel drivers +# # CONFIG_I2C_VOODOO3 is not set +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + # # Miscellaneous I2C Chip support # -# CONFIG_SENSORS_DS1337 is not set -# CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set # CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_SPI is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set # -# SPI support +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_BT8XX is not set + +# +# SPI GPIO expanders: # -# CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set # CONFIG_SENSORS_ADM1021 is not set # CONFIG_SENSORS_ADM1025 is not set @@ -773,13 +912,17 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_I5K_AMB is not set # CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set @@ -794,16 +937,23 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_ADS7828 is not set # CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_VIA686A is not set # CONFIG_SENSORS_VT1211 is not set # CONFIG_SENSORS_VT8231 is not set @@ -812,28 +962,38 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83792D is not set # CONFIG_SENSORS_W83793 is not set # CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set # CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # # Multifunction device drivers # +# CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -CONFIG_DAB=y +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -854,15 +1014,16 @@ CONFIG_DAB=y # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y - -# -# Sound -# # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y # CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -870,14 +1031,21 @@ CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_USB is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# Enable Host or Gadget support to see Inventra options # # -# USB Gadget Support +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_ACCESSIBILITY is not set # CONFIG_NEW_LEDS is not set CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set @@ -893,6 +1061,12 @@ CONFIG_DMA_ENGINE=y # DMA Clients # CONFIG_NET_DMA=y +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_REGULATOR is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set # # File systems @@ -901,10 +1075,11 @@ CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y # CONFIG_EXT3_FS_POSIX_ACL is not set # CONFIG_EXT3_FS_SECURITY is not set -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set @@ -913,16 +1088,22 @@ CONFIG_FS_MBCACHE=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set -CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -941,15 +1122,13 @@ CONFIG_DNOTIFY=y # CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -959,29 +1138,31 @@ CONFIG_TMPFS=y # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set CONFIG_CRAMFS=y +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set +CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y # CONFIG_NFSD_V3_ACL is not set # CONFIG_NFSD_V4 is not set -# CONFIG_NFSD_TCP is not set -CONFIG_ROOT_NFS=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_EXPORTFS=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_BIND34 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -1013,9 +1194,6 @@ CONFIG_MSDOS_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_NLS is not set # CONFIG_DLM is not set -CONFIG_INSTRUMENTATION=y -# CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set # # Kernel hacking @@ -1023,6 +1201,7 @@ CONFIG_INSTRUMENTATION=y # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set @@ -1030,10 +1209,17 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1047,16 +1233,41 @@ CONFIG_SCHED_DEBUG=y CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set CONFIG_FRAME_POINTER=y -# CONFIG_FORCED_INLINING is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set CONFIG_DEBUG_USER=y # CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set CONFIG_DEBUG_LL=y # CONFIG_DEBUG_ICEDCC is not set @@ -1065,24 +1276,117 @@ CONFIG_DEBUG_LL=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_XOR_BLOCKS=y CONFIG_ASYNC_CORE=y CONFIG_ASYNC_MEMCPY=y CONFIG_ASYNC_XOR=y -# CONFIG_CRYPTO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_MANAGER2 is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_HIFN_795X is not set +# CONFIG_BINARY_PRINTF is not set # # Library routines # +CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC_CCITT is not set # CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set # CONFIG_CRC32 is not set # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y -CONFIG_PLIST=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y -- cgit v1.2.3 From 5b9eda3313b678f20f2bec08e8173f93e85f6c14 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Fri, 31 Jul 2009 12:16:21 +0300 Subject: iop3xx: ATU and PCI memory configuration corrected There are two 64 MB outbound memory windows at bus addresses 0x80000000..0x83ffffff and 0x84000000..0x87ffffff for PCI memory. Currently, on iop32x, only the lower window is available for allocations, limiting the available space to 64 MB. On iop33x the full 128 MB can be allocated, but the translation value is wrong for the upper window. The patch enables the full 128 MB space on iop32x and corrects the initialization of OMWTVR1. Redundant definitions are deleted. Tested using a Thecus N2100 board with a graphics adapter in the expansion slot. Both windows are in use: 00:05.0 VGA compatible controller: XGI Technology Inc. (eXtreme Graphics Innovation) Volari Z7 (prog-if 00 [VGA controller]) [...] Region 0: Memory at 80000000 (32-bit, prefetchable) [size=64M] Region 1: Memory at 84080000 (32-bit, non-prefetchable) [size=256K] Signed-off-by: Aaro Koskinen Cc: Lennert Buytenhek Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx.h | 1 + arch/arm/mach-iop32x/include/mach/iop32x.h | 2 -- arch/arm/mach-iop33x/include/mach/iop33x.h | 2 -- arch/arm/plat-iop/pci.c | 3 ++- 4 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 4b8e7f559929..8d60ad267e3a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX I/O and Mem space regions for PCI autoconfiguration */ #define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 +#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000 #define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 #define IOP3XX_PCI_LOWER_IO_PA 0x90000000 diff --git a/arch/arm/mach-iop32x/include/mach/iop32x.h b/arch/arm/mach-iop32x/include/mach/iop32x.h index abd9eb49f103..941f363aca56 100644 --- a/arch/arm/mach-iop32x/include/mach/iop32x.h +++ b/arch/arm/mach-iop32x/include/mach/iop32x.h @@ -31,7 +31,5 @@ #define IOP32X_MAX_RAM_SIZE 0x40000000UL #define IOP3XX_MAX_RAM_SIZE IOP32X_MAX_RAM_SIZE #define IOP3XX_PCI_LOWER_MEM_BA 0x80000000 -#define IOP32X_PCI_MEM_WINDOW_SIZE 0x04000000 -#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP32X_PCI_MEM_WINDOW_SIZE #endif diff --git a/arch/arm/mach-iop33x/include/mach/iop33x.h b/arch/arm/mach-iop33x/include/mach/iop33x.h index 24567316ec88..a89c0a234bff 100644 --- a/arch/arm/mach-iop33x/include/mach/iop33x.h +++ b/arch/arm/mach-iop33x/include/mach/iop33x.h @@ -36,8 +36,6 @@ #define IOP33X_MAX_RAM_SIZE 0x80000000UL #define IOP3XX_MAX_RAM_SIZE IOP33X_MAX_RAM_SIZE #define IOP3XX_PCI_LOWER_MEM_BA (PHYS_OFFSET + IOP33X_MAX_RAM_SIZE) -#define IOP33X_PCI_MEM_WINDOW_SIZE 0x08000000 -#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP33X_PCI_MEM_WINDOW_SIZE #endif diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c index 77fa7cc7d162..ce31f316ac75 100644 --- a/arch/arm/plat-iop/pci.c +++ b/arch/arm/plat-iop/pci.c @@ -257,7 +257,8 @@ void __init iop3xx_atu_setup(void) *IOP3XX_OUMWTVR0 = 0; /* Outbound window 1 */ - *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + IOP3XX_PCI_MEM_WINDOW_SIZE; + *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + + IOP3XX_PCI_MEM_WINDOW_SIZE / 2; *IOP3XX_OUMWTVR1 = 0; /* BAR 3 ( Disabled ) */ -- cgit v1.2.3 From a692838dcaacb5f6f05fac73abb99d92dd7b1021 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 2 Aug 2009 10:46:45 +0200 Subject: arch/arm/plat-iop: Use DIV_ROUND_CLOSEST The kernel.h macro DIV_ROUND_CLOSEST performs the computation (x + d/2)/d but is perhaps more readable. The semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) // @haskernel@ @@ #include @depends on haskernel@ expression x,__divisor; @@ - (((x) + ((__divisor) / 2)) / (__divisor)) + DIV_ROUND_CLOSEST(x,__divisor) // Signed-off-by: Julia Lawall Signed-off-by: Dan Williams --- arch/arm/plat-iop/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 3695bbe3ee28..8da95d57c21f 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -85,7 +85,7 @@ void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; - ticks_per_jiffy = (tick_rate + HZ/2) / HZ; + ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); ticks_per_usec = tick_rate / 1000000; next_jiffy_time = 0xffffffff; iop_tick_rate = tick_rate; -- cgit v1.2.3 From 5946fa3d5cdeb846a647a1900026af9f8b08c8b5 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Wed, 19 Aug 2009 08:44:24 +0100 Subject: x86, hpet: Simplify the HPET code On 64-bits, using unsigned long when unsigned int suffices needlessly creates larger code (due to the need for REX prefixes), and most of the logic in hpet.c really doesn't need 64-bit operations. At once this avoids the need for a couple of type casts. Signed-off-by: Jan Beulich Cc: Shaohua Li Cc: Venkatesh Pallipadi LKML-Reference: <4A8BC9780200007800010832@vpn.id2.novell.com> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/hpet.h | 2 +- arch/x86/kernel/hpet.c | 45 +++++++++++++++++++++++---------------------- 2 files changed, 24 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 1c22cb05ad6a..65847c578b70 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -69,7 +69,7 @@ extern int hpet_force_user; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern void hpet_disable(void); -extern unsigned long hpet_readl(unsigned long a); +extern unsigned int hpet_readl(unsigned int a); extern void force_hpet_resume(void); extern void hpet_msi_unmask(unsigned int irq); diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 5969e1078fc2..ba575f0f2e34 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -47,12 +47,12 @@ struct hpet_dev { char name[10]; }; -unsigned long hpet_readl(unsigned long a) +inline unsigned int hpet_readl(unsigned int a) { return readl(hpet_virt_address + a); } -static inline void hpet_writel(unsigned long d, unsigned long a) +static inline void hpet_writel(unsigned int d, unsigned int a) { writel(d, hpet_virt_address + a); } @@ -167,7 +167,7 @@ do { \ static void hpet_reserve_msi_timers(struct hpet_data *hd); -static void hpet_reserve_platform_timers(unsigned long id) +static void hpet_reserve_platform_timers(unsigned int id) { struct hpet __iomem *hpet = hpet_virt_address; struct hpet_timer __iomem *timer = &hpet->hpet_timers[2]; @@ -205,7 +205,7 @@ static void hpet_reserve_platform_timers(unsigned long id) } #else -static void hpet_reserve_platform_timers(unsigned long id) { } +static void hpet_reserve_platform_timers(unsigned int id) { } #endif /* @@ -246,7 +246,7 @@ static void hpet_reset_counter(void) static void hpet_start_counter(void) { - unsigned long cfg = hpet_readl(HPET_CFG); + unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); } @@ -271,7 +271,7 @@ static void hpet_resume_counter(void) static void hpet_enable_legacy_int(void) { - unsigned long cfg = hpet_readl(HPET_CFG); + unsigned int cfg = hpet_readl(HPET_CFG); cfg |= HPET_CFG_LEGACY; hpet_writel(cfg, HPET_CFG); @@ -314,7 +314,7 @@ static int hpet_setup_msi_irq(unsigned int irq); static void hpet_set_mode(enum clock_event_mode mode, struct clock_event_device *evt, int timer) { - unsigned long cfg, cmp, now; + unsigned int cfg, cmp, now; uint64_t delta; switch (mode) { @@ -323,7 +323,7 @@ static void hpet_set_mode(enum clock_event_mode mode, delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; delta >>= evt->shift; now = hpet_readl(HPET_COUNTER); - cmp = now + (unsigned long) delta; + cmp = now + (unsigned int) delta; cfg = hpet_readl(HPET_Tn_CFG(timer)); /* Make sure we use edge triggered interrupts */ cfg &= ~HPET_TN_LEVEL; @@ -339,7 +339,7 @@ static void hpet_set_mode(enum clock_event_mode mode, * (See AMD-8111 HyperTransport I/O Hub Data Sheet, * Publication # 24674) */ - hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); + hpet_writel((unsigned int) delta, HPET_Tn_CMP(timer)); hpet_start_counter(); hpet_print_config(); break; @@ -387,9 +387,9 @@ static int hpet_next_event(unsigned long delta, * what we wrote hit the chip before we compare it to the * counter. */ - WARN_ON_ONCE((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt); + WARN_ON_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt); - return (s32)((u32)hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; + return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; } static void hpet_legacy_set_mode(enum clock_event_mode mode, @@ -415,7 +415,7 @@ static struct hpet_dev *hpet_devs; void hpet_msi_unmask(unsigned int irq) { struct hpet_dev *hdev = get_irq_data(irq); - unsigned long cfg; + unsigned int cfg; /* unmask it */ cfg = hpet_readl(HPET_Tn_CFG(hdev->num)); @@ -425,7 +425,7 @@ void hpet_msi_unmask(unsigned int irq) void hpet_msi_mask(unsigned int irq) { - unsigned long cfg; + unsigned int cfg; struct hpet_dev *hdev = get_irq_data(irq); /* mask it */ @@ -600,7 +600,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer) for (i = start_timer; i < num_timers - RESERVE_TIMERS; i++) { struct hpet_dev *hdev = &hpet_devs[num_timers_used]; - unsigned long cfg = hpet_readl(HPET_Tn_CFG(i)); + unsigned int cfg = hpet_readl(HPET_Tn_CFG(i)); /* Only consider HPET timer with MSI support */ if (!(cfg & HPET_TN_FSB_CAP)) @@ -815,7 +815,7 @@ static int hpet_clocksource_register(void) */ int __init hpet_enable(void) { - unsigned long id; + unsigned int id; int i; if (!is_hpet_capable()) @@ -933,7 +933,7 @@ fs_initcall(hpet_late_init); void hpet_disable(void) { if (is_hpet_capable()) { - unsigned long cfg = hpet_readl(HPET_CFG); + unsigned int cfg = hpet_readl(HPET_CFG); if (hpet_legacy_int_enabled) { cfg &= ~HPET_CFG_LEGACY; @@ -973,8 +973,8 @@ static int hpet_prev_update_sec; static struct rtc_time hpet_alarm_time; static unsigned long hpet_pie_count; static u32 hpet_t1_cmp; -static unsigned long hpet_default_delta; -static unsigned long hpet_pie_delta; +static u32 hpet_default_delta; +static u32 hpet_pie_delta; static unsigned long hpet_pie_limit; static rtc_irq_handler irq_handler; @@ -1025,7 +1025,8 @@ EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler); */ int hpet_rtc_timer_init(void) { - unsigned long cfg, cnt, delta, flags; + unsigned int cfg, cnt, delta; + unsigned long flags; if (!is_hpet_enabled()) return 0; @@ -1035,7 +1036,7 @@ int hpet_rtc_timer_init(void) clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; clc >>= hpet_clockevent.shift + DEFAULT_RTC_SHIFT; - hpet_default_delta = (unsigned long) clc; + hpet_default_delta = clc; } if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit) @@ -1121,7 +1122,7 @@ int hpet_set_periodic_freq(unsigned long freq) clc = (uint64_t) hpet_clockevent.mult * NSEC_PER_SEC; do_div(clc, freq); clc >>= hpet_clockevent.shift; - hpet_pie_delta = (unsigned long) clc; + hpet_pie_delta = clc; } return 1; } @@ -1135,7 +1136,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq); static void hpet_rtc_timer_reinit(void) { - unsigned long cfg, delta; + unsigned int cfg, delta; int lost_ints = -1; if (unlikely(!hpet_rtc_flags)) { -- cgit v1.2.3 From eb13296cfaf6c699566473669a96a38a90562384 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:13 -0400 Subject: x86: Instruction decoder API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add x86 instruction decoder to arch-specific libraries. This decoder can decode x86 instructions used in kernel into prefix, opcode, modrm, sib, displacement and immediates. This can also show the length of instructions. This version introduces instruction attributes for decoding instructions. The instruction attribute tables are generated from the opcode map file (x86-opcode-map.txt) by the generator script(gen-insn-attr-x86.awk). Currently, the opcode maps are based on opcode maps in Intel(R) 64 and IA-32 Architectures Software Developers Manual Vol.2: Appendix.A, and consist of below two types of opcode tables. 1-byte/2-bytes/3-bytes opcodes, which has 256 elements, are written as below; Table: table-name Referrer: escaped-name opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] (or) opcode: escape # escaped-name EndTable Group opcodes, which has 8 elements, are written as below; GrpTable: GrpXXX reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] EndTable These opcode maps include a few SSE and FP opcodes (for setup), because those opcodes are used in the kernel. Signed-off-by: Masami Hiramatsu Signed-off-by: Jim Keniston Acked-by: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203413.31965.49709.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/inat.h | 188 +++++++++ arch/x86/include/asm/inat_types.h | 29 ++ arch/x86/include/asm/insn.h | 143 +++++++ arch/x86/lib/Makefile | 13 + arch/x86/lib/inat.c | 78 ++++ arch/x86/lib/insn.c | 464 ++++++++++++++++++++++ arch/x86/lib/x86-opcode-map.txt | 719 +++++++++++++++++++++++++++++++++++ arch/x86/tools/gen-insn-attr-x86.awk | 314 +++++++++++++++ 8 files changed, 1948 insertions(+) create mode 100644 arch/x86/include/asm/inat.h create mode 100644 arch/x86/include/asm/inat_types.h create mode 100644 arch/x86/include/asm/insn.h create mode 100644 arch/x86/lib/inat.c create mode 100644 arch/x86/lib/insn.c create mode 100644 arch/x86/lib/x86-opcode-map.txt create mode 100644 arch/x86/tools/gen-insn-attr-x86.awk (limited to 'arch') diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h new file mode 100644 index 000000000000..2866fddd1848 --- /dev/null +++ b/arch/x86/include/asm/inat.h @@ -0,0 +1,188 @@ +#ifndef _ASM_X86_INAT_H +#define _ASM_X86_INAT_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include + +/* + * Internal bits. Don't use bitmasks directly, because these bits are + * unstable. You should use checking functions. + */ + +#define INAT_OPCODE_TABLE_SIZE 256 +#define INAT_GROUP_TABLE_SIZE 8 + +/* Legacy instruction prefixes */ +#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */ +#define INAT_PFX_REPNE 2 /* 0xF2 */ /* LPFX2 */ +#define INAT_PFX_REPE 3 /* 0xF3 */ /* LPFX3 */ +#define INAT_PFX_LOCK 4 /* 0xF0 */ +#define INAT_PFX_CS 5 /* 0x2E */ +#define INAT_PFX_DS 6 /* 0x3E */ +#define INAT_PFX_ES 7 /* 0x26 */ +#define INAT_PFX_FS 8 /* 0x64 */ +#define INAT_PFX_GS 9 /* 0x65 */ +#define INAT_PFX_SS 10 /* 0x36 */ +#define INAT_PFX_ADDRSZ 11 /* 0x67 */ + +#define INAT_LPREFIX_MAX 3 + +/* Immediate size */ +#define INAT_IMM_BYTE 1 +#define INAT_IMM_WORD 2 +#define INAT_IMM_DWORD 3 +#define INAT_IMM_QWORD 4 +#define INAT_IMM_PTR 5 +#define INAT_IMM_VWORD32 6 +#define INAT_IMM_VWORD 7 + +/* Legacy prefix */ +#define INAT_PFX_OFFS 0 +#define INAT_PFX_BITS 4 +#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1) +#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS) +/* Escape opcodes */ +#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS) +#define INAT_ESC_BITS 2 +#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1) +#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS) +/* Group opcodes (1-16) */ +#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS) +#define INAT_GRP_BITS 5 +#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1) +#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS) +/* Immediates */ +#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS) +#define INAT_IMM_BITS 3 +#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS) +/* Flags */ +#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS) +#define INAT_REXPFX (1 << INAT_FLAG_OFFS) +#define INAT_MODRM (1 << (INAT_FLAG_OFFS + 1)) +#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 2)) +#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 3)) +#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 4)) +#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 5)) +/* Attribute making macros for attribute tables */ +#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS) +#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS) +#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM) +#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS) + +/* Attribute search APIs */ +extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode); +extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, + insn_byte_t last_pfx, + insn_attr_t esc_attr); +extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm, + insn_byte_t last_pfx, + insn_attr_t esc_attr); + +/* Attribute checking functions */ +static inline int inat_is_prefix(insn_attr_t attr) +{ + return attr & INAT_PFX_MASK; +} + +static inline int inat_is_address_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ; +} + +static inline int inat_is_operand_size_prefix(insn_attr_t attr) +{ + return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ; +} + +static inline int inat_last_prefix_id(insn_attr_t attr) +{ + if ((attr & INAT_PFX_MASK) > INAT_LPREFIX_MAX) + return 0; + else + return attr & INAT_PFX_MASK; +} + +static inline int inat_is_escape(insn_attr_t attr) +{ + return attr & INAT_ESC_MASK; +} + +static inline int inat_escape_id(insn_attr_t attr) +{ + return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS; +} + +static inline int inat_is_group(insn_attr_t attr) +{ + return attr & INAT_GRP_MASK; +} + +static inline int inat_group_id(insn_attr_t attr) +{ + return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS; +} + +static inline int inat_group_common_attribute(insn_attr_t attr) +{ + return attr & ~INAT_GRP_MASK; +} + +static inline int inat_has_immediate(insn_attr_t attr) +{ + return attr & INAT_IMM_MASK; +} + +static inline int inat_immediate_size(insn_attr_t attr) +{ + return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS; +} + +static inline int inat_is_rex_prefix(insn_attr_t attr) +{ + return attr & INAT_REXPFX; +} + +static inline int inat_has_modrm(insn_attr_t attr) +{ + return attr & INAT_MODRM; +} + +static inline int inat_is_force64(insn_attr_t attr) +{ + return attr & INAT_FORCE64; +} + +static inline int inat_has_second_immediate(insn_attr_t attr) +{ + return attr & INAT_SCNDIMM; +} + +static inline int inat_has_moffset(insn_attr_t attr) +{ + return attr & INAT_MOFFSET; +} + +static inline int inat_has_variant(insn_attr_t attr) +{ + return attr & INAT_VARIANT; +} + +#endif diff --git a/arch/x86/include/asm/inat_types.h b/arch/x86/include/asm/inat_types.h new file mode 100644 index 000000000000..cb3c20ce39cf --- /dev/null +++ b/arch/x86/include/asm/inat_types.h @@ -0,0 +1,29 @@ +#ifndef _ASM_X86_INAT_TYPES_H +#define _ASM_X86_INAT_TYPES_H +/* + * x86 instruction attributes + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +/* Instruction attributes */ +typedef unsigned int insn_attr_t; +typedef unsigned char insn_byte_t; +typedef signed int insn_value_t; + +#endif diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h new file mode 100644 index 000000000000..12b4e3751d3f --- /dev/null +++ b/arch/x86/include/asm/insn.h @@ -0,0 +1,143 @@ +#ifndef _ASM_X86_INSN_H +#define _ASM_X86_INSN_H +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +/* insn_attr_t is defined in inat.h */ +#include + +struct insn_field { + union { + insn_value_t value; + insn_byte_t bytes[4]; + }; + /* !0 if we've run insn_get_xxx() for this field */ + unsigned char got; + unsigned char nbytes; +}; + +struct insn { + struct insn_field prefixes; /* + * Prefixes + * prefixes.bytes[3]: last prefix + */ + struct insn_field rex_prefix; /* REX prefix */ + struct insn_field opcode; /* + * opcode.bytes[0]: opcode1 + * opcode.bytes[1]: opcode2 + * opcode.bytes[2]: opcode3 + */ + struct insn_field modrm; + struct insn_field sib; + struct insn_field displacement; + union { + struct insn_field immediate; + struct insn_field moffset1; /* for 64bit MOV */ + struct insn_field immediate1; /* for 64bit imm or off16/32 */ + }; + union { + struct insn_field moffset2; /* for 64bit MOV */ + struct insn_field immediate2; /* for 64bit imm or seg16 */ + }; + + insn_attr_t attr; + unsigned char opnd_bytes; + unsigned char addr_bytes; + unsigned char length; + unsigned char x86_64; + + const insn_byte_t *kaddr; /* kernel address of insn to analyze */ + const insn_byte_t *next_byte; +}; + +#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6) +#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3) +#define X86_MODRM_RM(modrm) ((modrm) & 0x07) + +#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6) +#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3) +#define X86_SIB_BASE(sib) ((sib) & 0x07) + +#define X86_REX_W(rex) ((rex) & 8) +#define X86_REX_R(rex) ((rex) & 4) +#define X86_REX_X(rex) ((rex) & 2) +#define X86_REX_B(rex) ((rex) & 1) + +/* The last prefix is needed for two-byte and three-byte opcodes */ +static inline insn_byte_t insn_last_prefix(struct insn *insn) +{ + return insn->prefixes.bytes[3]; +} + +extern void insn_init(struct insn *insn, const void *kaddr, int x86_64); +extern void insn_get_prefixes(struct insn *insn); +extern void insn_get_opcode(struct insn *insn); +extern void insn_get_modrm(struct insn *insn); +extern void insn_get_sib(struct insn *insn); +extern void insn_get_displacement(struct insn *insn); +extern void insn_get_immediate(struct insn *insn); +extern void insn_get_length(struct insn *insn); + +/* Attribute will be determined after getting ModRM (for opcode groups) */ +static inline void insn_get_attribute(struct insn *insn) +{ + insn_get_modrm(insn); +} + +/* Instruction uses RIP-relative addressing */ +extern int insn_rip_relative(struct insn *insn); + +/* Init insn for kernel text */ +static inline void kernel_insn_init(struct insn *insn, const void *kaddr) +{ +#ifdef CONFIG_X86_64 + insn_init(insn, kaddr, 1); +#else /* CONFIG_X86_32 */ + insn_init(insn, kaddr, 0); +#endif +} + +/* Offset of each field from kaddr */ +static inline int insn_offset_rex_prefix(struct insn *insn) +{ + return insn->prefixes.nbytes; +} +static inline int insn_offset_opcode(struct insn *insn) +{ + return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes; +} +static inline int insn_offset_modrm(struct insn *insn) +{ + return insn_offset_opcode(insn) + insn->opcode.nbytes; +} +static inline int insn_offset_sib(struct insn *insn) +{ + return insn_offset_modrm(insn) + insn->modrm.nbytes; +} +static inline int insn_offset_displacement(struct insn *insn) +{ + return insn_offset_sib(insn) + insn->sib.nbytes; +} +static inline int insn_offset_immediate(struct insn *insn) +{ + return insn_offset_displacement(insn) + insn->displacement.nbytes; +} + +#endif /* _ASM_X86_INSN_H */ diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 07c31899c9c2..c77f8a7c531d 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -2,12 +2,25 @@ # Makefile for x86 specific library files. # +inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk +inat_tables_maps = $(srctree)/arch/x86/lib/x86-opcode-map.txt +quiet_cmd_inat_tables = GEN $@ + cmd_inat_tables = $(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ + +$(obj)/inat-tables.c: $(inat_tables_script) $(inat_tables_maps) + $(call cmd,inat_tables) + +$(obj)/inat.o: $(obj)/inat-tables.c + +clean-files := inat-tables.c + obj-$(CONFIG_SMP) := msr.o lib-y := delay.o lib-y += thunk_$(BITS).o lib-y += usercopy_$(BITS).o getuser.o putuser.o lib-y += memcpy_$(BITS).o +lib-y += insn.o inat.o ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c new file mode 100644 index 000000000000..054656a01dfd --- /dev/null +++ b/arch/x86/lib/inat.c @@ -0,0 +1,78 @@ +/* + * x86 instruction attribute tables + * + * Written by Masami Hiramatsu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ +#include + +/* Attribute tables are generated from opcode map */ +#include "inat-tables.c" + +/* Attribute search APIs */ +insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode) +{ + return inat_primary_table[opcode]; +} + +insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx, + insn_attr_t esc_attr) +{ + const insn_attr_t *table; + insn_attr_t lpfx_attr; + int n, m = 0; + + n = inat_escape_id(esc_attr); + if (last_pfx) { + lpfx_attr = inat_get_opcode_attribute(last_pfx); + m = inat_last_prefix_id(lpfx_attr); + } + table = inat_escape_tables[n][0]; + if (!table) + return 0; + if (inat_has_variant(table[opcode]) && m) { + table = inat_escape_tables[n][m]; + if (!table) + return 0; + } + return table[opcode]; +} + +insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx, + insn_attr_t grp_attr) +{ + const insn_attr_t *table; + insn_attr_t lpfx_attr; + int n, m = 0; + + n = inat_group_id(grp_attr); + if (last_pfx) { + lpfx_attr = inat_get_opcode_attribute(last_pfx); + m = inat_last_prefix_id(lpfx_attr); + } + table = inat_group_tables[n][0]; + if (!table) + return inat_group_common_attribute(grp_attr); + if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) { + table = inat_escape_tables[n][m]; + if (!table) + return inat_group_common_attribute(grp_attr); + } + return table[X86_MODRM_REG(modrm)] | + inat_group_common_attribute(grp_attr); +} + diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c new file mode 100644 index 000000000000..dfd56a30053f --- /dev/null +++ b/arch/x86/lib/insn.c @@ -0,0 +1,464 @@ +/* + * x86 instruction analysis + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2002, 2004, 2009 + */ + +#include +#include +#include + +#define get_next(t, insn) \ + ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) + +#define peek_next(t, insn) \ + ({t r; r = *(t*)insn->next_byte; r; }) + +/** + * insn_init() - initialize struct insn + * @insn: &struct insn to be initialized + * @kaddr: address (in kernel memory) of instruction (or copy thereof) + * @x86_64: !0 for 64-bit kernel or 64-bit app + */ +void insn_init(struct insn *insn, const void *kaddr, int x86_64) +{ + memset(insn, 0, sizeof(*insn)); + insn->kaddr = kaddr; + insn->next_byte = kaddr; + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) + insn->addr_bytes = 8; + else + insn->addr_bytes = 4; +} + +/** + * insn_get_prefixes - scan x86 instruction prefix bytes + * @insn: &struct insn containing instruction + * + * Populates the @insn->prefixes bitmap, and updates @insn->next_byte + * to point to the (first) opcode. No effect if @insn->prefixes.got + * is already set. + */ +void insn_get_prefixes(struct insn *insn) +{ + struct insn_field *prefixes = &insn->prefixes; + insn_attr_t attr; + insn_byte_t b, lb; + int i, nb; + + if (prefixes->got) + return; + + nb = 0; + lb = 0; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + while (inat_is_prefix(attr)) { + /* Skip if same prefix */ + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == b) + goto found; + if (nb == 4) + /* Invalid instruction */ + break; + prefixes->bytes[nb++] = b; + if (inat_is_address_size_prefix(attr)) { + /* address size switches 2/4 or 4/8 */ + if (insn->x86_64) + insn->addr_bytes ^= 12; + else + insn->addr_bytes ^= 6; + } else if (inat_is_operand_size_prefix(attr)) { + /* oprand size switches 2/4 */ + insn->opnd_bytes ^= 6; + } +found: + prefixes->nbytes++; + insn->next_byte++; + lb = b; + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + } + /* Set the last prefix */ + if (lb && lb != insn->prefixes.bytes[3]) { + if (unlikely(insn->prefixes.bytes[3])) { + /* Swap the last prefix */ + b = insn->prefixes.bytes[3]; + for (i = 0; i < nb; i++) + if (prefixes->bytes[i] == lb) + prefixes->bytes[i] = b; + } + insn->prefixes.bytes[3] = lb; + } + + if (insn->x86_64) { + b = peek_next(insn_byte_t, insn); + attr = inat_get_opcode_attribute(b); + if (inat_is_rex_prefix(attr)) { + insn->rex_prefix.value = b; + insn->rex_prefix.nbytes = 1; + insn->next_byte++; + if (X86_REX_W(b)) + /* REX.W overrides opnd_size */ + insn->opnd_bytes = 8; + } + } + insn->rex_prefix.got = 1; + prefixes->got = 1; + return; +} + +/** + * insn_get_opcode - collect opcode(s) + * @insn: &struct insn containing instruction + * + * Populates @insn->opcode, updates @insn->next_byte to point past the + * opcode byte(s), and set @insn->attr (except for groups). + * If necessary, first collects any preceding (prefix) bytes. + * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got + * is already 1. + */ +void insn_get_opcode(struct insn *insn) +{ + struct insn_field *opcode = &insn->opcode; + insn_byte_t op, pfx; + if (opcode->got) + return; + if (!insn->prefixes.got) + insn_get_prefixes(insn); + + /* Get first opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[0] = op; + opcode->nbytes = 1; + insn->attr = inat_get_opcode_attribute(op); + while (inat_is_escape(insn->attr)) { + /* Get escaped opcode */ + op = get_next(insn_byte_t, insn); + opcode->bytes[opcode->nbytes++] = op; + pfx = insn_last_prefix(insn); + insn->attr = inat_get_escape_attribute(op, pfx, insn->attr); + } + opcode->got = 1; +} + +/** + * insn_get_modrm - collect ModRM byte, if any + * @insn: &struct insn containing instruction + * + * Populates @insn->modrm and updates @insn->next_byte to point past the + * ModRM byte, if any. If necessary, first collects the preceding bytes + * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1. + */ +void insn_get_modrm(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + insn_byte_t pfx, mod; + if (modrm->got) + return; + if (!insn->opcode.got) + insn_get_opcode(insn); + + if (inat_has_modrm(insn->attr)) { + mod = get_next(insn_byte_t, insn); + modrm->value = mod; + modrm->nbytes = 1; + if (inat_is_group(insn->attr)) { + pfx = insn_last_prefix(insn); + insn->attr = inat_get_group_attribute(mod, pfx, + insn->attr); + } + } + + if (insn->x86_64 && inat_is_force64(insn->attr)) + insn->opnd_bytes = 8; + modrm->got = 1; +} + + +/** + * insn_rip_relative() - Does instruction use RIP-relative addressing mode? + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. No effect if @insn->x86_64 is 0. + */ +int insn_rip_relative(struct insn *insn) +{ + struct insn_field *modrm = &insn->modrm; + + if (!insn->x86_64) + return 0; + if (!modrm->got) + insn_get_modrm(insn); + /* + * For rip-relative instructions, the mod field (top 2 bits) + * is zero and the r/m field (bottom 3 bits) is 0x5. + */ + return (modrm->nbytes && (modrm->value & 0xc7) == 0x5); +} + +/** + * insn_get_sib() - Get the SIB byte of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * ModRM byte. + */ +void insn_get_sib(struct insn *insn) +{ + insn_byte_t modrm; + + if (insn->sib.got) + return; + if (!insn->modrm.got) + insn_get_modrm(insn); + if (insn->modrm.nbytes) { + modrm = (insn_byte_t)insn->modrm.value; + if (insn->addr_bytes != 2 && + X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) { + insn->sib.value = get_next(insn_byte_t, insn); + insn->sib.nbytes = 1; + } + } + insn->sib.got = 1; +} + + +/** + * insn_get_displacement() - Get the displacement of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * SIB byte. + * Displacement value is sign-expanded. + */ +void insn_get_displacement(struct insn *insn) +{ + insn_byte_t mod, rm, base; + + if (insn->displacement.got) + return; + if (!insn->sib.got) + insn_get_sib(insn); + if (insn->modrm.nbytes) { + /* + * Interpreting the modrm byte: + * mod = 00 - no displacement fields (exceptions below) + * mod = 01 - 1-byte displacement field + * mod = 10 - displacement field is 4 bytes, or 2 bytes if + * address size = 2 (0x67 prefix in 32-bit mode) + * mod = 11 - no memory operand + * + * If address size = 2... + * mod = 00, r/m = 110 - displacement field is 2 bytes + * + * If address size != 2... + * mod != 11, r/m = 100 - SIB byte exists + * mod = 00, SIB base = 101 - displacement field is 4 bytes + * mod = 00, r/m = 101 - rip-relative addressing, displacement + * field is 4 bytes + */ + mod = X86_MODRM_MOD(insn->modrm.value); + rm = X86_MODRM_RM(insn->modrm.value); + base = X86_SIB_BASE(insn->sib.value); + if (mod == 3) + goto out; + if (mod == 1) { + insn->displacement.value = get_next(char, insn); + insn->displacement.nbytes = 1; + } else if (insn->addr_bytes == 2) { + if ((mod == 0 && rm == 6) || mod == 2) { + insn->displacement.value = + get_next(short, insn); + insn->displacement.nbytes = 2; + } + } else { + if ((mod == 0 && rm == 5) || mod == 2 || + (mod == 0 && base == 5)) { + insn->displacement.value = get_next(int, insn); + insn->displacement.nbytes = 4; + } + } + } +out: + insn->displacement.got = 1; +} + +/* Decode moffset16/32/64 */ +static void __get_moffset(struct insn *insn) +{ + switch (insn->addr_bytes) { + case 2: + insn->moffset1.value = get_next(short, insn); + insn->moffset1.nbytes = 2; + break; + case 4: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + break; + case 8: + insn->moffset1.value = get_next(int, insn); + insn->moffset1.nbytes = 4; + insn->moffset2.value = get_next(int, insn); + insn->moffset2.nbytes = 4; + break; + } + insn->moffset1.got = insn->moffset2.got = 1; +} + +/* Decode imm v32(Iz) */ +static void __get_immv32(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case 4: + case 8: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + } +} + +/* Decode imm v64(Iv/Ov) */ +static void __get_immv(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + } + insn->immediate1.got = insn->immediate2.got = 1; +} + +/* Decode ptr16:16/32(Ap) */ +static void __get_immptr(struct insn *insn) +{ + switch (insn->opnd_bytes) { + case 2: + insn->immediate1.value = get_next(short, insn); + insn->immediate1.nbytes = 2; + break; + case 4: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + break; + case 8: + /* ptr16:64 is not exist (no segment) */ + return; + } + insn->immediate2.value = get_next(unsigned short, insn); + insn->immediate2.nbytes = 2; + insn->immediate1.got = insn->immediate2.got = 1; +} + +/** + * insn_get_immediate() - Get the immediates of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * displacement bytes. + * Basically, most of immediates are sign-expanded. Unsigned-value can be + * get by bit masking with ((1 << (nbytes * 8)) - 1) + */ +void insn_get_immediate(struct insn *insn) +{ + if (insn->immediate.got) + return; + if (!insn->displacement.got) + insn_get_displacement(insn); + + if (inat_has_moffset(insn->attr)) { + __get_moffset(insn); + goto done; + } + + if (!inat_has_immediate(insn->attr)) + /* no immediates */ + goto done; + + switch (inat_immediate_size(insn->attr)) { + case INAT_IMM_BYTE: + insn->immediate.value = get_next(char, insn); + insn->immediate.nbytes = 1; + break; + case INAT_IMM_WORD: + insn->immediate.value = get_next(short, insn); + insn->immediate.nbytes = 2; + break; + case INAT_IMM_DWORD: + insn->immediate.value = get_next(int, insn); + insn->immediate.nbytes = 4; + break; + case INAT_IMM_QWORD: + insn->immediate1.value = get_next(int, insn); + insn->immediate1.nbytes = 4; + insn->immediate2.value = get_next(int, insn); + insn->immediate2.nbytes = 4; + break; + case INAT_IMM_PTR: + __get_immptr(insn); + break; + case INAT_IMM_VWORD32: + __get_immv32(insn); + break; + case INAT_IMM_VWORD: + __get_immv(insn); + break; + default: + break; + } + if (inat_has_second_immediate(insn->attr)) { + insn->immediate2.value = get_next(char, insn); + insn->immediate2.nbytes = 1; + } +done: + insn->immediate.got = 1; +} + +/** + * insn_get_length() - Get the length of instruction + * @insn: &struct insn containing instruction + * + * If necessary, first collects the instruction up to and including the + * immediates bytes. + */ +void insn_get_length(struct insn *insn) +{ + if (insn->length) + return; + if (!insn->immediate.got) + insn_get_immediate(insn); + insn->length = (unsigned char)((unsigned long)insn->next_byte + - (unsigned long)insn->kaddr); +} diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt new file mode 100644 index 000000000000..083dd59dd74b --- /dev/null +++ b/arch/x86/lib/x86-opcode-map.txt @@ -0,0 +1,719 @@ +# x86 Opcode Maps +# +# +# Table: table-name +# Referrer: escaped-name +# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# (or) +# opcode: escape # escaped-name +# EndTable +# +# +# GrpTable: GrpXXX +# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...] +# EndTable +# + +Table: one byte opcode +Referrer: +# 0x00 - 0x0f +00: ADD Eb,Gb +01: ADD Ev,Gv +02: ADD Gb,Eb +03: ADD Gv,Ev +04: ADD AL,Ib +05: ADD rAX,Iz +06: PUSH ES (i64) +07: POP ES (i64) +08: OR Eb,Gb +09: OR Ev,Gv +0a: OR Gb,Eb +0b: OR Gv,Ev +0c: OR AL,Ib +0d: OR rAX,Iz +0e: PUSH CS (i64) +0f: escape # 2-byte escape +# 0x10 - 0x1f +10: ADC Eb,Gb +11: ADC Ev,Gv +12: ADC Gb,Eb +13: ADC Gv,Ev +14: ADC AL,Ib +15: ADC rAX,Iz +16: PUSH SS (i64) +17: POP SS (i64) +18: SBB Eb,Gb +19: SBB Ev,Gv +1a: SBB Gb,Eb +1b: SBB Gv,Ev +1c: SBB AL,Ib +1d: SBB rAX,Iz +1e: PUSH DS (i64) +1f: POP DS (i64) +# 0x20 - 0x2f +20: AND Eb,Gb +21: AND Ev,Gv +22: AND Gb,Eb +23: AND Gv,Ev +24: AND AL,Ib +25: AND rAx,Iz +26: SEG=ES (Prefix) +27: DAA (i64) +28: SUB Eb,Gb +29: SUB Ev,Gv +2a: SUB Gb,Eb +2b: SUB Gv,Ev +2c: SUB AL,Ib +2d: SUB rAX,Iz +2e: SEG=CS (Prefix) +2f: DAS (i64) +# 0x30 - 0x3f +30: XOR Eb,Gb +31: XOR Ev,Gv +32: XOR Gb,Eb +33: XOR Gv,Ev +34: XOR AL,Ib +35: XOR rAX,Iz +36: SEG=SS (Prefix) +37: AAA (i64) +38: CMP Eb,Gb +39: CMP Ev,Gv +3a: CMP Gb,Eb +3b: CMP Gv,Ev +3c: CMP AL,Ib +3d: CMP rAX,Iz +3e: SEG=DS (Prefix) +3f: AAS (i64) +# 0x40 - 0x4f +40: INC eAX (i64) | REX (o64) +41: INC eCX (i64) | REX.B (o64) +42: INC eDX (i64) | REX.X (o64) +43: INC eBX (i64) | REX.XB (o64) +44: INC eSP (i64) | REX.R (o64) +45: INC eBP (i64) | REX.RB (o64) +46: INC eSI (i64) | REX.RX (o64) +47: INC eDI (i64) | REX.RXB (o64) +48: DEC eAX (i64) | REX.W (o64) +49: DEC eCX (i64) | REX.WB (o64) +4a: DEC eDX (i64) | REX.WX (o64) +4b: DEC eBX (i64) | REX.WXB (o64) +4c: DEC eSP (i64) | REX.WR (o64) +4d: DEC eBP (i64) | REX.WRB (o64) +4e: DEC eSI (i64) | REX.WRX (o64) +4f: DEC eDI (i64) | REX.WRXB (o64) +# 0x50 - 0x5f +50: PUSH rAX/r8 (d64) +51: PUSH rCX/r9 (d64) +52: PUSH rDX/r10 (d64) +53: PUSH rBX/r11 (d64) +54: PUSH rSP/r12 (d64) +55: PUSH rBP/r13 (d64) +56: PUSH rSI/r14 (d64) +57: PUSH rDI/r15 (d64) +58: POP rAX/r8 (d64) +59: POP rCX/r9 (d64) +5a: POP rDX/r10 (d64) +5b: POP rBX/r11 (d64) +5c: POP rSP/r12 (d64) +5d: POP rBP/r13 (d64) +5e: POP rSI/r14 (d64) +5f: POP rDI/r15 (d64) +# 0x60 - 0x6f +60: PUSHA/PUSHAD (i64) +61: POPA/POPAD (i64) +62: BOUND Gv,Ma (i64) +63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64) +64: SEG=FS (Prefix) +65: SEG=GS (Prefix) +66: Operand-Size (Prefix) +67: Address-Size (Prefix) +68: PUSH Iz (d64) +69: IMUL Gv,Ev,Iz +6a: PUSH Ib (d64) +6b: IMUL Gv,Ev,Ib +6c: INS/INSB Yb,DX +6d: INS/INSW/INSD Yz,DX +6e: OUTS/OUTSB DX,Xb +6f: OUTS/OUTSW/OUTSD DX,Xz +# 0x70 - 0x7f +70: JO Jb +71: JNO Jb +72: JB/JNAE/JC Jb +73: JNB/JAE/JNC Jb +74: JZ/JE Jb +75: JNZ/JNE Jb +76: JBE/JNA Jb +77: JNBE/JA Jb +78: JS Jb +79: JNS Jb +7a: JP/JPE Jb +7b: JNP/JPO Jb +7c: JL/JNGE Jb +7d: JNL/JGE Jb +7e: JLE/JNG Jb +7f: JNLE/JG Jb +# 0x80 - 0x8f +80: Grp1 Eb,Ib (1A) +81: Grp1 Ev,Iz (1A) +82: Grp1 Eb,Ib (1A),(i64) +83: Grp1 Ev,Ib (1A) +84: TEST Eb,Gb +85: TEST Ev,Gv +86: XCHG Eb,Gb +87: XCHG Ev,Gv +88: MOV Eb,Gb +89: MOV Ev,Gv +8a: MOV Gb,Eb +8b: MOV Gv,Ev +8c: MOV Ev,Sw +8d: LEA Gv,M +8e: MOV Sw,Ew +8f: Grp1A (1A) | POP Ev (d64) +# 0x90 - 0x9f +90: NOP | PAUSE (F3) | XCHG r8,rAX +91: XCHG rCX/r9,rAX +92: XCHG rDX/r10,rAX +93: XCHG rBX/r11,rAX +94: XCHG rSP/r12,rAX +95: XCHG rBP/r13,rAX +96: XCHG rSI/r14,rAX +97: XCHG rDI/r15,rAX +98: CBW/CWDE/CDQE +99: CWD/CDQ/CQO +9a: CALLF Ap (i64) +9b: FWAIT/WAIT +9c: PUSHF/D/Q Fv (d64) +9d: POPF/D/Q Fv (d64) +9e: SAHF +9f: LAHF +# 0xa0 - 0xaf +a0: MOV AL,Ob +a1: MOV rAX,Ov +a2: MOV Ob,AL +a3: MOV Ov,rAX +a4: MOVS/B Xb,Yb +a5: MOVS/W/D/Q Xv,Yv +a6: CMPS/B Xb,Yb +a7: CMPS/W/D Xv,Yv +a8: TEST AL,Ib +a9: TEST rAX,Iz +aa: STOS/B Yb,AL +ab: STOS/W/D/Q Yv,rAX +ac: LODS/B AL,Xb +ad: LODS/W/D/Q rAX,Xv +ae: SCAS/B AL,Yb +af: SCAS/W/D/Q rAX,Xv +# 0xb0 - 0xbf +b0: MOV AL/R8L,Ib +b1: MOV CL/R9L,Ib +b2: MOV DL/R10L,Ib +b3: MOV BL/R11L,Ib +b4: MOV AH/R12L,Ib +b5: MOV CH/R13L,Ib +b6: MOV DH/R14L,Ib +b7: MOV BH/R15L,Ib +b8: MOV rAX/r8,Iv +b9: MOV rCX/r9,Iv +ba: MOV rDX/r10,Iv +bb: MOV rBX/r11,Iv +bc: MOV rSP/r12,Iv +bd: MOV rBP/r13,Iv +be: MOV rSI/r14,Iv +bf: MOV rDI/r15,Iv +# 0xc0 - 0xcf +c0: Grp2 Eb,Ib (1A) +c1: Grp2 Ev,Ib (1A) +c2: RETN Iw (f64) +c3: RETN +c4: LES Gz,Mp (i64) +c5: LDS Gz,Mp (i64) +c6: Grp11 Eb,Ib (1A) +c7: Grp11 Ev,Iz (1A) +c8: ENTER Iw,Ib +c9: LEAVE (d64) +ca: RETF Iw +cb: RETF +cc: INT3 +cd: INT Ib +ce: INTO (i64) +cf: IRET/D/Q +# 0xd0 - 0xdf +d0: Grp2 Eb,1 (1A) +d1: Grp2 Ev,1 (1A) +d2: Grp2 Eb,CL (1A) +d3: Grp2 Ev,CL (1A) +d4: AAM Ib (i64) +d5: AAD Ib (i64) +d6: +d7: XLAT/XLATB +d8: ESC +d9: ESC +da: ESC +db: ESC +dc: ESC +dd: ESC +de: ESC +df: ESC +# 0xe0 - 0xef +e0: LOOPNE/LOOPNZ Jb (f64) +e1: LOOPE/LOOPZ Jb (f64) +e2: LOOP Jb (f64) +e3: JrCXZ Jb (f64) +e4: IN AL,Ib +e5: IN eAX,Ib +e6: OUT Ib,AL +e7: OUT Ib,eAX +e8: CALL Jz (f64) +e9: JMP-near Jz (f64) +ea: JMP-far Ap (i64) +eb: JMP-short Jb (f64) +ec: IN AL,DX +ed: IN eAX,DX +ee: OUT DX,AL +ef: OUT DX,eAX +# 0xf0 - 0xff +f0: LOCK (Prefix) +f1: +f2: REPNE (Prefix) +f3: REP/REPE (Prefix) +f4: HLT +f5: CMC +f6: Grp3_1 Eb (1A) +f7: Grp3_2 Ev (1A) +f8: CLC +f9: STC +fa: CLI +fb: STI +fc: CLD +fd: STD +fe: Grp4 (1A) +ff: Grp5 (1A) +EndTable + +Table: 2-byte opcode # First Byte is 0x0f +Referrer: 2-byte escape +# 0x0f 0x00-0x0f +00: Grp6 (1A) +01: Grp7 (1A) +02: LAR Gv,Ew +03: LSL Gv,Ew +04: +05: SYSCALL (o64) +06: CLTS +07: SYSRET (o64) +08: INVD +09: WBINVD +0a: +0b: UD2 (1B) +0c: +0d: NOP Ev +0e: +0f: +# 0x0f 0x10-0x1f +10: +11: +12: +13: +14: +15: +16: +17: +18: Grp16 (1A) +19: +1a: +1b: +1c: +1d: +1e: +1f: NOP Ev +# 0x0f 0x20-0x2f +20: MOV Rd,Cd +21: MOV Rd,Dd +22: MOV Cd,Rd +23: MOV Dd,Rd +24: +25: +26: +27: +28: movaps Vps,Wps | movapd Vpd,Wpd (66) +29: movaps Wps,Vps | movapd Wpd,Vpd (66) +2a: +2b: +2c: +2d: +2e: +2f: +# 0x0f 0x30-0x3f +30: WRMSR +31: RDTSC +32: RDMSR +33: RDPMC +34: SYSENTER +35: SYSEXIT +36: +37: GETSEC +38: escape # 3-byte escape 1 +39: +3a: escape # 3-byte escape 2 +3b: +3c: +3d: +3e: +3f: +# 0x0f 0x40-0x4f +40: CMOVO Gv,Ev +41: CMOVNO Gv,Ev +42: CMOVB/C/NAE Gv,Ev +43: CMOVAE/NB/NC Gv,Ev +44: CMOVE/Z Gv,Ev +45: CMOVNE/NZ Gv,Ev +46: CMOVBE/NA Gv,Ev +47: CMOVA/NBE Gv,Ev +48: CMOVS Gv,Ev +49: CMOVNS Gv,Ev +4a: CMOVP/PE Gv,Ev +4b: CMOVNP/PO Gv,Ev +4c: CMOVL/NGE Gv,Ev +4d: CMOVNL/GE Gv,Ev +4e: CMOVLE/NG Gv,Ev +4f: CMOVNLE/G Gv,Ev +# 0x0f 0x50-0x5f +50: +51: +52: +53: +54: +55: +56: +57: +58: +59: +5a: +5b: +5c: +5d: +5e: +5f: +# 0x0f 0x60-0x6f +60: +61: +62: +63: +64: +65: +66: +67: +68: +69: +6a: +6b: +6c: +6d: +6e: +6f: +# 0x0f 0x70-0x7f +70: +71: Grp12 (1A) +72: Grp13 (1A) +73: Grp14 (1A) +74: +75: +76: +77: +78: VMREAD Ed/q,Gd/q +79: VMWRITE Gd/q,Ed/q +7a: +7b: +7c: +7d: +7e: +7f: +# 0x0f 0x80-0x8f +80: JO Jz (f64) +81: JNO Jz (f64) +82: JB/JNAE/JC Jz (f64) +83: JNB/JAE/JNC Jz (f64) +84: JZ/JE Jz (f64) +85: JNZ/JNE Jz (f64) +86: JBE/JNA Jz (f64) +87: JNBE/JA Jz (f64) +88: JS Jz (f64) +89: JNS Jz (f64) +8a: JP/JPE Jz (f64) +8b: JNP/JPO Jz (f64) +8c: JL/JNGE Jz (f64) +8d: JNL/JGE Jz (f64) +8e: JLE/JNG Jz (f64) +8f: JNLE/JG Jz (f64) +# 0x0f 0x90-0x9f +90: SETO Eb +91: SETNO Eb +92: SETB/C/NAE Eb +93: SETAE/NB/NC Eb +94: SETE/Z Eb +95: SETNE/NZ Eb +96: SETBE/NA Eb +97: SETA/NBE Eb +98: SETS Eb +99: SETNS Eb +9a: SETP/PE Eb +9b: SETNP/PO Eb +9c: SETL/NGE Eb +9d: SETNL/GE Eb +9e: SETLE/NG Eb +9f: SETNLE/G Eb +# 0x0f 0xa0-0xaf +a0: PUSH FS (d64) +a1: POP FS (d64) +a2: CPUID +a3: BT Ev,Gv +a4: SHLD Ev,Gv,Ib +a5: SHLD Ev,Gv,CL +a6: +a7: GrpRNG +a8: PUSH GS (d64) +a9: POP GS (d64) +aa: RSM +ab: BTS Ev,Gv +ac: SHRD Ev,Gv,Ib +ad: SHRD Ev,Gv,CL +ae: Grp15 (1A),(1C) +af: IMUL Gv,Ev +# 0x0f 0xb0-0xbf +b0: CMPXCHG Eb,Gb +b1: CMPXCHG Ev,Gv +b2: LSS Gv,Mp +b3: BTR Ev,Gv +b4: LFS Gv,Mp +b5: LGS Gv,Mp +b6: MOVZX Gv,Eb +b7: MOVZX Gv,Ew +b8: JMPE | POPCNT Gv,Ev (F3) +b9: Grp10 (1A) +ba: Grp8 Ev,Ib (1A) +bb: BTC Ev,Gv +bc: BSF Gv,Ev +bd: BSR Gv,Ev +be: MOVSX Gv,Eb +bf: MOVSX Gv,Ew +# 0x0f 0xc0-0xcf +c0: XADD Eb,Gb +c1: XADD Ev,Gv +c2: +c3: movnti Md/q,Gd/q +c4: +c5: +c6: +c7: Grp9 (1A) +c8: BSWAP RAX/EAX/R8/R8D +c9: BSWAP RCX/ECX/R9/R9D +ca: BSWAP RDX/EDX/R10/R10D +cb: BSWAP RBX/EBX/R11/R11D +cc: BSWAP RSP/ESP/R12/R12D +cd: BSWAP RBP/EBP/R13/R13D +ce: BSWAP RSI/ESI/R14/R14D +cf: BSWAP RDI/EDI/R15/R15D +# 0x0f 0xd0-0xdf +d0: +d1: +d2: +d3: +d4: +d5: +d6: +d7: +d8: +d9: +da: +db: +dc: +dd: +de: +df: +# 0x0f 0xe0-0xef +e0: +e1: +e2: +e3: +e4: +e5: +e6: +e7: +e8: +e9: +ea: +eb: +ec: +ed: +ee: +ef: +# 0x0f 0xf0-0xff +f0: +f1: +f2: +f3: +f4: +f5: +f6: +f7: +f8: +f9: +fa: +fb: +fc: +fd: +fe: +ff: +EndTable + +Table: 3-byte opcode 1 +Referrer: 3-byte escape 1 +80: INVEPT Gd/q,Mdq (66) +81: INVPID Gd/q,Mdq (66) +f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) +f1: MOVBE Mv,Gv | CRC32 Gd,Ev (F2) +EndTable + +Table: 3-byte opcode 2 +Referrer: 3-byte escape 2 +# all opcode is for SSE +EndTable + +GrpTable: Grp1 +0: ADD +1: OR +2: ADC +3: SBB +4: AND +5: SUB +6: XOR +7: CMP +EndTable + +GrpTable: Grp1A +0: POP +EndTable + +GrpTable: Grp2 +0: ROL +1: ROR +2: RCL +3: RCR +4: SHL/SAL +5: SHR +6: +7: SAR +EndTable + +GrpTable: Grp3_1 +0: TEST Eb,Ib +1: +2: NOT Eb +3: NEG Eb +4: MUL AL,Eb +5: IMUL AL,Eb +6: DIV AL,Eb +7: IDIV AL,Eb +EndTable + +GrpTable: Grp3_2 +0: TEST Ev,Iz +1: +2: NOT Ev +3: NEG Ev +4: MUL rAX,Ev +5: IMUL rAX,Ev +6: DIV rAX,Ev +7: IDIV rAX,Ev +EndTable + +GrpTable: Grp4 +0: INC Eb +1: DEC Eb +EndTable + +GrpTable: Grp5 +0: INC Ev +1: DEC Ev +2: CALLN Ev (f64) +3: CALLF Ep +4: JMPN Ev (f64) +5: JMPF Ep +6: PUSH Ev (d64) +7: +EndTable + +GrpTable: Grp6 +0: SLDT Rv/Mw +1: STR Rv/Mw +2: LLDT Ew +3: LTR Ew +4: VERR Ew +5: VERW Ew +EndTable + +GrpTable: Grp7 +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) +2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) +3: LIDT Ms +4: SMSW Mw/Rv +5: +6: LMSW Ew +7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B) +EndTable + +GrpTable: Grp8 +4: BT +5: BTS +6: BTR +7: BTC +EndTable + +GrpTable: Grp9 +1: CMPXCHG8B/16B Mq/Mdq +6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) +7: VMPTRST Mq +EndTable + +GrpTable: Grp10 +EndTable + +GrpTable: Grp11 +0: MOV +EndTable + +GrpTable: Grp12 +EndTable + +GrpTable: Grp13 +EndTable + +GrpTable: Grp14 +EndTable + +GrpTable: Grp15 +0: fxsave +1: fxstor +2: ldmxcsr +3: stmxcsr +4: XSAVE +5: XRSTOR | lfence (11B) +6: mfence (11B) +7: clflush | sfence (11B) +EndTable + +GrpTable: Grp16 +0: prefetch NTA +1: prefetch T0 +2: prefetch T1 +3: prefetch T2 +EndTable + +GrpTable: GrpRNG +0: xstore-rng +1: xcrypt-ecb +2: xcrypt-cbc +4: xcrypt-cfb +5: xcrypt-ofb +EndTable diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk new file mode 100644 index 000000000000..93b62c92d044 --- /dev/null +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -0,0 +1,314 @@ +#!/bin/awk -f +# gen-insn-attr-x86.awk: Instruction attribute table generator +# Written by Masami Hiramatsu +# +# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c + +BEGIN { + print "/* x86 opcode map generated from x86-opcode-map.txt */" + print "/* Do not change this code. */" + ggid = 1 + geid = 1 + + opnd_expr = "^[[:alpha:]]" + ext_expr = "^\\(" + sep_expr = "^\\|$" + group_expr = "^Grp[[:alnum:]]+" + + imm_expr = "^[IJAO][[:lower:]]" + imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)" + imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)" + imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)" + imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)" + imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)" + imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)" + imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)" + imm_flag["Ob"] = "INAT_MOFFSET" + imm_flag["Ov"] = "INAT_MOFFSET" + + modrm_expr = "^([CDEGMNPQRSUVW][[:lower:]]+|NTA|T[012])" + force64_expr = "\\([df]64\\)" + rex_expr = "^REX(\\.[XRWB]+)*" + fpu_expr = "^ESC" # TODO + + lprefix1_expr = "\\(66\\)" + delete lptable1 + lprefix2_expr = "\\(F2\\)" + delete lptable2 + lprefix3_expr = "\\(F3\\)" + delete lptable3 + max_lprefix = 4 + + prefix_expr = "\\(Prefix\\)" + prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" + prefix_num["REPNE"] = "INAT_PFX_REPNE" + prefix_num["REP/REPE"] = "INAT_PFX_REPE" + prefix_num["LOCK"] = "INAT_PFX_LOCK" + prefix_num["SEG=CS"] = "INAT_PFX_CS" + prefix_num["SEG=DS"] = "INAT_PFX_DS" + prefix_num["SEG=ES"] = "INAT_PFX_ES" + prefix_num["SEG=FS"] = "INAT_PFX_FS" + prefix_num["SEG=GS"] = "INAT_PFX_GS" + prefix_num["SEG=SS"] = "INAT_PFX_SS" + prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ" + + delete table + delete etable + delete gtable + eid = -1 + gid = -1 +} + +function semantic_error(msg) { + print "Semantic error at " NR ": " msg > "/dev/stderr" + exit 1 +} + +function debug(msg) { + print "DEBUG: " msg +} + +function array_size(arr, i,c) { + c = 0 + for (i in arr) + c++ + return c +} + +/^Table:/ { + print "/* " $0 " */" +} + +/^Referrer:/ { + if (NF == 1) { + # primary opcode table + tname = "inat_primary_table" + eid = -1 + } else { + # escape opcode table + ref = "" + for (i = 2; i <= NF; i++) + ref = ref $i + eid = escape[ref] + tname = sprintf("inat_escape_table_%d", eid) + } +} + +/^GrpTable:/ { + print "/* " $0 " */" + if (!($2 in group)) + semantic_error("No group: " $2 ) + gid = group[$2] + tname = "inat_group_table_" gid +} + +function print_table(tbl,name,fmt,n) +{ + print "const insn_attr_t " name " = {" + for (i = 0; i < n; i++) { + id = sprintf(fmt, i) + if (tbl[id]) + print " [" id "] = " tbl[id] "," + } + print "};" +} + +/^EndTable/ { + if (gid != -1) { + # print group tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]", + "0x%x", 8) + gtable[gid,3] = tname "_3" + } + } else { + # print primary/escaped tables + if (array_size(table) != 0) { + print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,0] = tname + } + if (array_size(lptable1) != 0) { + print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,1] = tname "_1" + } + if (array_size(lptable2) != 0) { + print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,2] = tname "_2" + } + if (array_size(lptable3) != 0) { + print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]", + "0x%02x", 256) + etable[eid,3] = tname "_3" + } + } + print "" + delete table + delete lptable1 + delete lptable2 + delete lptable3 + gid = -1 + eid = -1 +} + +function add_flags(old,new) { + if (old && new) + return old " | " new + else if (old) + return old + else + return new +} + +# convert operands to flags. +function convert_operands(opnd, i,imm,mod) +{ + imm = null + mod = null + for (i in opnd) { + i = opnd[i] + if (match(i, imm_expr) == 1) { + if (!imm_flag[i]) + semantic_error("Unknown imm opnd: " i) + if (imm) { + if (i != "Ib") + semantic_error("Second IMM error") + imm = add_flags(imm, "INAT_SCNDIMM") + } else + imm = imm_flag[i] + } else if (match(i, modrm_expr)) + mod = "INAT_MODRM" + } + return add_flags(imm, mod) +} + +/^[0-9a-f]+\:/ { + if (NR == 1) + next + # get index + idx = "0x" substr($1, 1, index($1,":") - 1) + if (idx in table) + semantic_error("Redefine " idx " in " tname) + + # check if escaped opcode + if ("escape" == $2) { + if ($3 != "#") + semantic_error("No escaped name") + ref = "" + for (i = 4; i <= NF; i++) + ref = ref $i + if (ref in escape) + semantic_error("Redefine escape (" ref ")") + escape[ref] = geid + geid++ + table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")" + next + } + + variant = null + # converts + i = 2 + while (i <= NF) { + opcode = $(i++) + delete opnds + ext = null + flags = null + opnd = null + # parse one opcode + if (match($i, opnd_expr)) { + opnd = $i + split($(i++), opnds, ",") + flags = convert_operands(opnds) + } + if (match($i, ext_expr)) + ext = $(i++) + if (match($i, sep_expr)) + i++ + else if (i < NF) + semantic_error($i " is not a separator") + + # check if group opcode + if (match(opcode, group_expr)) { + if (!(opcode in group)) { + group[opcode] = ggid + ggid++ + } + flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")") + } + # check force(or default) 64bit + if (match(ext, force64_expr)) + flags = add_flags(flags, "INAT_FORCE64") + + # check REX prefix + if (match(opcode, rex_expr)) + flags = add_flags(flags, "INAT_REXPFX") + + # check coprocessor escape : TODO + if (match(opcode, fpu_expr)) + flags = add_flags(flags, "INAT_MODRM") + + # check prefixes + if (match(ext, prefix_expr)) { + if (!prefix_num[opcode]) + semantic_error("Unknown prefix: " opcode) + flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")") + } + if (length(flags) == 0) + continue + # check if last prefix + if (match(ext, lprefix1_expr)) { + lptable1[idx] = add_flags(lptable1[idx],flags) + variant = "INAT_VARIANT" + } else if (match(ext, lprefix2_expr)) { + lptable2[idx] = add_flags(lptable2[idx],flags) + variant = "INAT_VARIANT" + } else if (match(ext, lprefix3_expr)) { + lptable3[idx] = add_flags(lptable3[idx],flags) + variant = "INAT_VARIANT" + } else { + table[idx] = add_flags(table[idx],flags) + } + } + if (variant) + table[idx] = add_flags(table[idx],variant) +} + +END { + # print escape opcode map's array + print "/* Escape opcode map array */" + print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ + "[INAT_LPREFIX_MAX + 1] = {" + for (i = 0; i < geid; i++) + for (j = 0; j < max_lprefix; j++) + if (etable[i,j]) + print " ["i"]["j"] = "etable[i,j]"," + print "};\n" + # print group opcode map's array + print "/* Group opcode map array */" + print "const insn_attr_t const *inat_group_tables[INAT_GRP_MAX + 1]"\ + "[INAT_LPREFIX_MAX + 1] = {" + for (i = 0; i < ggid; i++) + for (j = 0; j < max_lprefix; j++) + if (gtable[i,j]) + print " ["i"]["j"] = "gtable[i,j]"," + print "};" +} -- cgit v1.2.3 From ca0e9badd1a39fecdd235f4bf1481b9da756e27b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:21 -0400 Subject: x86: X86 instruction decoder build-time selftest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a user-space selftest of x86 instruction decoder at kernel build time. When CONFIG_X86_DECODER_SELFTEST=y, Kbuild builds a test harness of x86 instruction decoder and performs it after building vmlinux. The test compares the results of objdump and x86 instruction decoder code and check there are no differences. Signed-off-by: Masami Hiramatsu Signed-off-by: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203421.31965.29006.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/Kconfig.debug | 9 ++++ arch/x86/Makefile | 3 ++ arch/x86/tools/Makefile | 15 ++++++ arch/x86/tools/distill.awk | 42 ++++++++++++++++ arch/x86/tools/test_get_len.c | 113 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 182 insertions(+) create mode 100644 arch/x86/tools/Makefile create mode 100644 arch/x86/tools/distill.awk create mode 100644 arch/x86/tools/test_get_len.c (limited to 'arch') diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index d105f29bb6bb..7d0b681a132b 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -186,6 +186,15 @@ config X86_DS_SELFTEST config HAVE_MMIOTRACE_SUPPORT def_bool y +config X86_DECODER_SELFTEST + bool "x86 instruction decoder selftest" + depends on DEBUG_KERNEL + ---help--- + Perform x86 instruction decoder selftests at build time. + This option is useful for checking the sanity of x86 instruction + decoder code. + If unsure, say "N". + # # IO delay types: # diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 1b68659c41b4..5fe16bfd15ac 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -154,6 +154,9 @@ all: bzImage KBUILD_IMAGE := $(boot)/bzImage bzImage: vmlinux +ifeq ($(CONFIG_X86_DECODER_SELFTEST),y) + $(Q)$(MAKE) $(build)=arch/x86/tools posttest +endif $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@ diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile new file mode 100644 index 000000000000..3dd626b99dc8 --- /dev/null +++ b/arch/x86/tools/Makefile @@ -0,0 +1,15 @@ +PHONY += posttest +quiet_cmd_posttest = TEST $@ + cmd_posttest = $(OBJDUMP) -d $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + +posttest: $(obj)/test_get_len vmlinux + $(call cmd,posttest) + +hostprogs-y := test_get_len + +# -I needed for generated C source and C source which in the kernel tree. +HOSTCFLAGS_test_get_len.o := -Wall -I$(objtree)/arch/x86/lib/ -I$(srctree)/arch/x86/include/ -I$(srctree)/arch/x86/lib/ + +# Dependancies are also needed. +$(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c + diff --git a/arch/x86/tools/distill.awk b/arch/x86/tools/distill.awk new file mode 100644 index 000000000000..d433619bb866 --- /dev/null +++ b/arch/x86/tools/distill.awk @@ -0,0 +1,42 @@ +#!/bin/awk -f +# Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len +# Distills the disassembly as follows: +# - Removes all lines except the disassembled instructions. +# - For instructions that exceed 1 line (7 bytes), crams all the hex bytes +# into a single line. +# - Remove bad(or prefix only) instructions + +BEGIN { + prev_addr = "" + prev_hex = "" + prev_mnemonic = "" + bad_expr = "(\\(bad\\)|^rex|^.byte|^rep(z|nz)$|^lock$|^es$|^cs$|^ss$|^ds$|^fs$|^gs$|^data(16|32)$|^addr(16|32|64))" + fwait_expr = "^9b " + fwait_str="9b\tfwait" +} + +/^ *[0-9a-f]+:/ { + if (split($0, field, "\t") < 3) { + # This is a continuation of the same insn. + prev_hex = prev_hex field[2] + } else { + # Skip bad instructions + if (match(prev_mnemonic, bad_expr)) + prev_addr = "" + # Split fwait from other f* instructions + if (match(prev_hex, fwait_expr) && prev_mnemonic != "fwait") { + printf "%s\t%s\n", prev_addr, fwait_str + sub(fwait_expr, "", prev_hex) + } + if (prev_addr != "") + printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic + prev_addr = field[1] + prev_hex = field[2] + prev_mnemonic = field[3] + } +} + +END { + if (prev_addr != "") + printf "%s\t%s\t%s\n", prev_addr, prev_hex, prev_mnemonic +} diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c new file mode 100644 index 000000000000..1e81adb2d8a9 --- /dev/null +++ b/arch/x86/tools/test_get_len.c @@ -0,0 +1,113 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2009 + */ + +#include +#include +#include +#include + +#ifdef __x86_64__ +#define CONFIG_X86_64 +#else +#define CONFIG_X86_32 +#endif +#define unlikely(cond) (cond) + +#include +#include +#include + +/* + * Test of instruction analysis in general and insn_get_length() in + * particular. See if insn_get_length() and the disassembler agree + * on the length of each instruction in an elf disassembly. + * + * Usage: objdump -d a.out | awk -f distill.awk | ./test_get_len + */ + +const char *prog; + +static void usage(void) +{ + fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" + " ./test_get_len\n"); + exit(1); +} + +static void malformed_line(const char *line, int line_nr) +{ + fprintf(stderr, "%s: malformed line %d:\n%s", prog, line_nr, line); + exit(3); +} + +#define BUFSIZE 256 + +int main(int argc, char **argv) +{ + char line[BUFSIZE]; + unsigned char insn_buf[16]; + struct insn insn; + int insns = 0; + + prog = argv[0]; + if (argc > 1) + usage(); + + while (fgets(line, BUFSIZE, stdin)) { + char copy[BUFSIZE], *s, *tab1, *tab2; + int nb = 0; + unsigned int b; + + insns++; + memset(insn_buf, 0, 16); + strcpy(copy, line); + tab1 = strchr(copy, '\t'); + if (!tab1) + malformed_line(line, insns); + s = tab1 + 1; + s += strspn(s, " "); + tab2 = strchr(s, '\t'); + if (!tab2) + malformed_line(line, insns); + *tab2 = '\0'; /* Characters beyond tab2 aren't examined */ + while (s < tab2) { + if (sscanf(s, "%x", &b) == 1) { + insn_buf[nb++] = (unsigned char) b; + s += 3; + } else + break; + } + /* Decode an instruction */ +#ifdef __x86_64__ + insn_init(&insn, insn_buf, 1); +#else + insn_init(&insn, insn_buf, 0); +#endif + insn_get_length(&insn); + if (insn.length != nb) { + fprintf(stderr, "Error: %s", line); + fprintf(stderr, "Error: objdump says %d bytes, but " + "insn_get_length() says %d (attr:%x)\n", nb, + insn.length, insn.attr); + exit(2); + } + } + fprintf(stderr, "Succeed: decoded and checked %d instructions\n", + insns); + return 0; +} -- cgit v1.2.3 From b46b3d70c9c017d7c4ec49f7f3ffd0af5a622277 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:28 -0400 Subject: kprobes: Checks probe address is instruction boudary on x86 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure safeness of inserting kprobes by checking whether the specified address is at the first byte of an instruction on x86. This is done by decoding probed function from its head to the probe point. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203428.31965.21939.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 7b5169d2b000..aa15f3e1f64b 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -48,12 +48,14 @@ #include #include #include +#include #include #include #include #include #include +#include void jprobe_return_end(void); @@ -244,6 +246,75 @@ retry: } } +/* Recover the probed instruction at addr for further analysis. */ +static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) +{ + struct kprobe *kp; + kp = get_kprobe((void *)addr); + if (!kp) + return -EINVAL; + + /* + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, fix_riprel() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. + * + * On the other hand, kp->opcode has a copy of the first byte of + * the probed instruction, which is overwritten by int3. And + * the instruction at kp->addr is not modified by kprobes except + * for the first byte, we can recover the original instruction + * from it and kp->opcode. + */ + memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + buf[0] = kp->opcode; + return 0; +} + +/* Dummy buffers for kallsyms_lookup */ +static char __dummy_buf[KSYM_NAME_LEN]; + +/* Check if paddr is at an instruction boundary */ +static int __kprobes can_probe(unsigned long paddr) +{ + int ret; + unsigned long addr, offset = 0; + struct insn insn; + kprobe_opcode_t buf[MAX_INSN_SIZE]; + + if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) + return 0; + + /* Decode instructions */ + addr = paddr - offset; + while (addr < paddr) { + kernel_insn_init(&insn, (void *)addr); + insn_get_opcode(&insn); + + /* + * Check if the instruction has been modified by another + * kprobe, in which case we replace the breakpoint by the + * original instruction in our buffer. + */ + if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { + ret = recover_probed_instruction(buf, addr); + if (ret) + /* + * Another debugging subsystem might insert + * this breakpoint. In that case, we can't + * recover it. + */ + return 0; + kernel_insn_init(&insn, buf); + } + insn_get_length(&insn); + addr += insn.length; + } + + return (addr == paddr); +} + /* * Returns non-zero if opcode modifies the interrupt flag. */ @@ -359,6 +430,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p) { + if (!can_probe((unsigned long)p->addr)) + return -EILSEQ; /* insn: must be on special executable page on x86. */ p->ainsn.insn = get_insn_slot(); if (!p->ainsn.insn) -- cgit v1.2.3 From 89ae465b0ee470f7d3f8a1c61353445c3acbbe2a Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:36 -0400 Subject: kprobes: Cleanup fix_riprel() using insn decoder on x86 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cleanup fix_riprel() in arch/x86/kernel/kprobes.c by using the new x86 instruction decoder instead of using comparisons with raw ad hoc numeric opcodes. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203436.31965.34374.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 128 +++++++++------------------------------------- 1 file changed, 23 insertions(+), 105 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index aa15f3e1f64b..16ae9610f6ff 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -108,50 +108,6 @@ static const u32 twobyte_is_boostable[256 / 32] = { /* ----------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -static const u32 onebyte_has_modrm[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ----------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */ - W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */ - W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */ - W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ - W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */ - W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */ - W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */ - W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */ - W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */ - W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */ - W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */ - W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */ - /* ----------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; -static const u32 twobyte_has_modrm[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ----------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */ - W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */ - W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */ - W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */ - W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */ - W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */ - W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */ - W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */ - W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */ - W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */ - W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */ - W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */ - /* ----------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; #undef W struct kretprobe_blackpoint kretprobe_blacklist[] = { @@ -348,68 +304,30 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) static void __kprobes fix_riprel(struct kprobe *p) { #ifdef CONFIG_X86_64 - u8 *insn = p->ainsn.insn; - s64 disp; - int need_modrm; - - /* Skip legacy instruction prefixes. */ - while (1) { - switch (*insn) { - case 0x66: - case 0x67: - case 0x2e: - case 0x3e: - case 0x26: - case 0x64: - case 0x65: - case 0x36: - case 0xf0: - case 0xf3: - case 0xf2: - ++insn; - continue; - } - break; - } + struct insn insn; + kernel_insn_init(&insn, p->ainsn.insn); - /* Skip REX instruction prefix. */ - if (is_REX_prefix(insn)) - ++insn; - - if (*insn == 0x0f) { - /* Two-byte opcode. */ - ++insn; - need_modrm = test_bit(*insn, - (unsigned long *)twobyte_has_modrm); - } else - /* One-byte opcode. */ - need_modrm = test_bit(*insn, - (unsigned long *)onebyte_has_modrm); - - if (need_modrm) { - u8 modrm = *++insn; - if ((modrm & 0xc7) == 0x05) { - /* %rip+disp32 addressing mode */ - /* Displacement follows ModRM byte. */ - ++insn; - /* - * The copied instruction uses the %rip-relative - * addressing mode. Adjust the displacement for the - * difference between the original location of this - * instruction and the location of the copy that will - * actually be run. The tricky bit here is making sure - * that the sign extension happens correctly in this - * calculation, since we need a signed 32-bit result to - * be sign-extended to 64 bits when it's added to the - * %rip value and yield the same 64-bit result that the - * sign-extension of the original signed 32-bit - * displacement would have given. - */ - disp = (u8 *) p->addr + *((s32 *) insn) - - (u8 *) p->ainsn.insn; - BUG_ON((s64) (s32) disp != disp); /* Sanity check. */ - *(s32 *)insn = (s32) disp; - } + if (insn_rip_relative(&insn)) { + s64 newdisp; + u8 *disp; + insn_get_displacement(&insn); + /* + * The copied instruction uses the %rip-relative addressing + * mode. Adjust the displacement for the difference between + * the original location of this instruction and the location + * of the copy that will actually be run. The tricky bit here + * is making sure that the sign extension happens correctly in + * this calculation, since we need a signed 32-bit result to + * be sign-extended to 64 bits when it's added to the %rip + * value and yield the same 64-bit result that the sign- + * extension of the original signed 32-bit displacement would + * have given. + */ + newdisp = (u8 *) p->addr + (s64) insn.displacement.value - + (u8 *) p->ainsn.insn; + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + disp = (u8 *) p->ainsn.insn + insn_offset_displacement(&insn); + *(s32 *) disp = (s32) newdisp; } #endif } -- cgit v1.2.3 From b1cf540f0e5278ecfe8532557e547d833ed269d7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 13 Aug 2009 16:34:44 -0400 Subject: x86: Add pt_regs register and stack access APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add following APIs for accessing registers and stack entries from pt_regs. These APIs are required by kprobes-based event tracer on ftrace. Some other debugging tools might be able to use it too. - regs_query_register_offset(const char *name) Query the offset of "name" register. - regs_query_register_name(unsigned int offset) Query the name of register by its offset. - regs_get_register(struct pt_regs *regs, unsigned int offset) Get the value of a register by its offset. - regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) Check the address is in the kernel stack. - regs_get_kernel_stack_nth(struct pt_regs *reg, unsigned int nth) Get Nth entry of the kernel stack. (N >= 0) - regs_get_argument_nth(struct pt_regs *reg, unsigned int nth) Get Nth argument at function call. (N >= 0) Signed-off-by: Masami Hiramatsu Cc: linux-arch@vger.kernel.org Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: Jim Keniston Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090813203444.31965.26374.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/include/asm/ptrace.h | 62 +++++++++++++++++++++++ arch/x86/kernel/ptrace.c | 112 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 174 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 0f0d908349aa..a3d49dd7d26e 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -7,6 +7,7 @@ #ifdef __KERNEL__ #include +#include #endif #ifndef __ASSEMBLY__ @@ -216,6 +217,67 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) return regs->sp; } +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +extern const char *regs_query_register_name(unsigned int offset); +#define MAX_REG_OFFSET (offsetof(struct pt_regs, ss)) + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register whose offset from @regs + * is @offset. The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + return *(unsigned long *)((unsigned long)regs + offset); +} + +/** + * regs_within_kernel_stack() - check the address in the stack + * @regs: pt_regs which contains kernel stack pointer. + * @addr: address which is checked. + * + * regs_within_kenel_stack() checks @addr is within the kernel stack page(s). + * If @addr is within the kernel stack, it returns true. If not, returns false. + */ +static inline int regs_within_kernel_stack(struct pt_regs *regs, + unsigned long addr) +{ + return ((addr & ~(THREAD_SIZE - 1)) == + (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs: pt_regs which contains kernel stack pointer. + * @n: stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n) +{ + unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); + addr += n; + if (regs_within_kernel_stack(regs, (unsigned long)addr)) + return *addr; + else + return 0; +} + +/* Get Nth argument at function call */ +extern unsigned long regs_get_argument_nth(struct pt_regs *regs, + unsigned int n); + /* * These are defined as per linux/ptrace.h, which see. */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 8d7d5c9c1be3..a33a17d5d5c8 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -49,6 +49,118 @@ enum x86_regset { REGSET_IOPERM32, }; +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +static const struct pt_regs_offset regoffset_table[] = { +#ifdef CONFIG_X86_64 + REG_OFFSET_NAME(r15), + REG_OFFSET_NAME(r14), + REG_OFFSET_NAME(r13), + REG_OFFSET_NAME(r12), + REG_OFFSET_NAME(r11), + REG_OFFSET_NAME(r10), + REG_OFFSET_NAME(r9), + REG_OFFSET_NAME(r8), +#endif + REG_OFFSET_NAME(bx), + REG_OFFSET_NAME(cx), + REG_OFFSET_NAME(dx), + REG_OFFSET_NAME(si), + REG_OFFSET_NAME(di), + REG_OFFSET_NAME(bp), + REG_OFFSET_NAME(ax), +#ifdef CONFIG_X86_32 + REG_OFFSET_NAME(ds), + REG_OFFSET_NAME(es), + REG_OFFSET_NAME(fs), + REG_OFFSET_NAME(gs), +#endif + REG_OFFSET_NAME(orig_ax), + REG_OFFSET_NAME(ip), + REG_OFFSET_NAME(cs), + REG_OFFSET_NAME(flags), + REG_OFFSET_NAME(sp), + REG_OFFSET_NAME(ss), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} + +static const int arg_offs_table[] = { +#ifdef CONFIG_X86_32 + [0] = offsetof(struct pt_regs, ax), + [1] = offsetof(struct pt_regs, dx), + [2] = offsetof(struct pt_regs, cx) +#else /* CONFIG_X86_64 */ + [0] = offsetof(struct pt_regs, di), + [1] = offsetof(struct pt_regs, si), + [2] = offsetof(struct pt_regs, dx), + [3] = offsetof(struct pt_regs, cx), + [4] = offsetof(struct pt_regs, r8), + [5] = offsetof(struct pt_regs, r9) +#endif +}; + +/** + * regs_get_argument_nth() - get Nth argument at function call + * @regs: pt_regs which contains registers at function entry. + * @n: argument number. + * + * regs_get_argument_nth() returns @n th argument of a function call. + * Since usually the kernel stack will be changed right after function entry, + * you must use this at function entry. If the @n th entry is NOT in the + * kernel stack or pt_regs, this returns 0. + */ +unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) +{ + if (n < ARRAY_SIZE(arg_offs_table)) + return *((unsigned long *)regs + arg_offs_table[n]); + else { + /* + * The typical case: arg n is on the stack. + * (Note: stack[0] = return address, so skip it) + */ + n -= ARRAY_SIZE(arg_offs_table); + return regs_get_kernel_stack_nth(regs, 1 + n); + } +} + /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. -- cgit v1.2.3 From 8d7d14fb27818eb08ebedf9f4a6e286970fe9977 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:07 -0400 Subject: x86: Fix x86 instruction decoder selftest to check only .text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix x86 instruction decoder selftest to check only .text because other sections (e.g. .notes) will have random bytes which don't need to be checked. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194307.12478.76938.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 3dd626b99dc8..95e9cc4bcd94 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,6 +1,6 @@ PHONY += posttest quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) -- cgit v1.2.3 From 69d991f32152283cbc373136fa45bbb152b32048 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 21 Aug 2009 15:43:16 -0400 Subject: x86: Check awk features before generating inat-tables.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check some awk mandatory features to generate inat-tables.c that old mawk doesn't support. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Ananth N Mavinakayanahalli Cc: Avi Kivity Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Przemysław Pawełczyk Cc: Roland McGrath Cc: Sam Ravnborg Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi Cc: Vegard Nossum LKML-Reference: <20090821194316.12478.57394.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/gen-insn-attr-x86.awk | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index 93b62c92d044..19ba096b7dd1 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk @@ -4,7 +4,25 @@ # # Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c +# Awk implementation sanity check +function check_awk_implement() { + if (!match("abc", "[[:lower:]]+")) + return "Your awk doesn't support charactor-class." + if (sprintf("%x", 0) != "0") + return "Your awk has a printf-format problem." + return "" +} + BEGIN { + # Implementation error checking + awkchecked = check_awk_implement() + if (awkchecked != "") { + print "Error: " awkchecked > "/dev/stderr" + print "Please try to use gawk." > "/dev/stderr" + exit 1 + } + + # Setup generating tables print "/* x86 opcode map generated from x86-opcode-map.txt */" print "/* Do not change this code. */" ggid = 1 @@ -293,6 +311,8 @@ function convert_operands(opnd, i,imm,mod) } END { + if (awkchecked != "") + exit 1 # print escape opcode map's array print "/* Escape opcode map array */" print "const insn_attr_t const *inat_escape_tables[INAT_ESC_MAX + 1]" \ -- cgit v1.2.3 From 24851d2447830e6cba4c4b641cb73e713f312373 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 26 Aug 2009 23:38:30 +0200 Subject: tracing/kprobes: Dump the culprit kprobe in case of kprobe recursion Kprobes can enter into a probing recursion, ie: a kprobe that does an endless loop because one of its core mechanism function used during probing is also probed itself. This patch helps pinpointing the kprobe that raised such recursion by dumping it and raising a BUG instead of a warning (we also disarm the kprobe to try avoiding recursion in BUG itself). Having a BUG instead of a warning stops the stacktrace in the right place and doesn't pollute the logs with hundreds of traces that eventually end up in a stack overflow. Signed-off-by: Frederic Weisbecker Cc: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli --- arch/x86/kernel/kprobes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index 16ae9610f6ff..ecee3d23fef8 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -490,9 +490,13 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, /* A probe has been hit in the codepath leading up * to, or just after, single-stepping of a probed * instruction. This entire codepath should strictly - * reside in .kprobes.text section. Raise a warning - * to highlight this peculiar case. + * reside in .kprobes.text section. + * Raise a BUG or we'll continue in an endless + * reentering loop and eventually a stack overflow. */ + arch_disarm_kprobe(p); + dump_kprobe(p); + BUG(); } default: /* impossible cases */ -- cgit v1.2.3 From 3fe6ad6c39834350d74145de855fbffdde02ace4 Mon Sep 17 00:00:00 2001 From: Peter Ma Date: Mon, 24 Aug 2009 14:13:29 -0700 Subject: avr32: re-instate MCI WP/CD pin assignments for ATNGW100 The MRMT1 patch mistakenly reverted commit fe272b5bd13d3522f9d1ed35425f1c7af4d8343f. This new patch is intended to correct this, so that both daughtercards should be able to assign GPIO PC25 and PE0 to the MCI driver. Signed-off-by: Peter Ma Signed-off-by: Haavard Skinnemoen --- arch/avr32/boards/atngw100/setup.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index bc299fbbeb4e..5b022aad4bd9 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c @@ -56,13 +56,8 @@ static struct spi_board_info spi0_board_info[] __initdata = { static struct mci_platform_data __initdata mci0_data = { .slot[0] = { .bus_width = 4, -#if defined(CONFIG_BOARD_ATNGW100_EVKLCD10X) || defined(CONFIG_BOARD_ATNGW100_MRMT1) - .detect_pin = GPIO_PIN_NONE, - .wp_pin = GPIO_PIN_NONE, -#else .detect_pin = GPIO_PIN_PC(25), .wp_pin = GPIO_PIN_PE(0), -#endif }, }; -- cgit v1.2.3 From 0708420370db7350b75cbf64b1eb80ad1b2a89df Mon Sep 17 00:00:00 2001 From: Peter Ma Date: Thu, 27 Aug 2009 00:31:47 -0700 Subject: avr32: function for independently setting up SPI slaves at32ap_spi_setup_slaves() is exposed to allow the addition of SPI slaves to a SPI bus that has already been registered. This is especially useful for add-on cards, which have their own board setup function. at32ap_spi_add_device() has been slightly re-organized, but should be transparent to existing users. So to add a SPI slave, after a SPI bus has already been registered: /* Configure the list of slaves on SPI controller "id" */ at32_spi_setup_slaves(id,spi_board_info,ARRAY_SIZE(spi_board_info)); /* Register the list of slaves */ spi_register_board_info(spi_board_info,ARRAY_SIZE(spi_board_info)); Signed-off-by: Peter Ma [haavard.skinnemoen@atmel.com: trivial whitespace fix] Signed-off-by: Haavard Skinnemoen --- arch/avr32/mach-at32ap/at32ap700x.c | 35 ++++++++++++++++------------- arch/avr32/mach-at32ap/include/mach/board.h | 1 + 2 files changed, 20 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index eb9d4dc2e86d..a47592e2fc32 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -1181,19 +1181,32 @@ static struct resource atmel_spi1_resource[] = { DEFINE_DEV(atmel_spi, 1); DEV_CLK(spi_clk, atmel_spi1, pba, 1); -static void __init -at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, - unsigned int n, const u8 *pins) +void __init +at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n) { + /* + * Manage the chipselects as GPIOs, normally using the same pins + * the SPI controller expects; but boards can use other pins. + */ + static u8 __initdata spi_pins[][4] = { + { GPIO_PIN_PA(3), GPIO_PIN_PA(4), + GPIO_PIN_PA(5), GPIO_PIN_PA(20) }, + { GPIO_PIN_PB(2), GPIO_PIN_PB(3), + GPIO_PIN_PB(4), GPIO_PIN_PA(27) }, + }; unsigned int pin, mode; + /* There are only 2 SPI controllers */ + if (bus_num > 1) + return; + for (; n; n--, b++) { b->bus_num = bus_num; if (b->chip_select >= 4) continue; pin = (unsigned)b->controller_data; if (!pin) { - pin = pins[b->chip_select]; + pin = spi_pins[bus_num][b->chip_select]; b->controller_data = (void *)pin; } mode = AT32_GPIOF_OUTPUT; @@ -1206,16 +1219,6 @@ at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, struct platform_device *__init at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) { - /* - * Manage the chipselects as GPIOs, normally using the same pins - * the SPI controller expects; but boards can use other pins. - */ - static u8 __initdata spi0_pins[] = - { GPIO_PIN_PA(3), GPIO_PIN_PA(4), - GPIO_PIN_PA(5), GPIO_PIN_PA(20), }; - static u8 __initdata spi1_pins[] = - { GPIO_PIN_PB(2), GPIO_PIN_PB(3), - GPIO_PIN_PB(4), GPIO_PIN_PA(27), }; struct platform_device *pdev; u32 pin_mask; @@ -1228,7 +1231,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) select_peripheral(PIOA, (1 << 0), PERIPH_A, AT32_GPIOF_PULLUP); select_peripheral(PIOA, pin_mask, PERIPH_A, 0); - at32_spi_setup_slaves(0, b, n, spi0_pins); + at32_spi_setup_slaves(0, b, n); break; case 1: @@ -1239,7 +1242,7 @@ at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n) select_peripheral(PIOB, (1 << 0), PERIPH_B, AT32_GPIOF_PULLUP); select_peripheral(PIOB, pin_mask, PERIPH_B, 0); - at32_spi_setup_slaves(1, b, n, spi1_pins); + at32_spi_setup_slaves(1, b, n); break; default: diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h index ddedb471f33e..c7f25bb1d068 100644 --- a/arch/avr32/mach-at32ap/include/mach/board.h +++ b/arch/avr32/mach-at32ap/include/mach/board.h @@ -49,6 +49,7 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data); struct spi_board_info; struct platform_device * at32_add_device_spi(unsigned int id, struct spi_board_info *b, unsigned int n); +void at32_spi_setup_slaves(unsigned int bus_num, struct spi_board_info *b, unsigned int n); struct atmel_lcdfb_info; struct platform_device * -- cgit v1.2.3 From d55651168a20078a94597a297d5cdfd807bf07b6 Mon Sep 17 00:00:00 2001 From: Peter Ma Date: Thu, 27 Aug 2009 00:56:43 -0700 Subject: avr32: MRMT: correct setup of SPI slaves For MRMT1/2 add-on card to Atmel ATNGW100. This patch implements the SPI slave setup method created by patch: <> Signed-off-by: Peter Ma Signed-off-by: Haavard Skinnemoen --- arch/avr32/boards/atngw100/mrmt.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index bf78e516a85f..7919be311f4a 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c @@ -302,6 +302,7 @@ static int __init mrmt1_init(void) at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); set_irq_type( AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING ); + at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info)); spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); #endif -- cgit v1.2.3 From c8bc6f3c806f1fcbfdbf0b1ff6c52dba59192d3b Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Tue, 4 Aug 2009 12:07:09 -0700 Subject: x86: arch specific support for remapping HPET MSIs x86 arch support for remapping HPET MSI's by associating the HPET timer block with the interrupt-remapping HW unit and setting up appropriate irq_chip Signed-off-by: Suresh Siddha Cc: Venkatesh Pallipadi Cc: David Woodhouse Cc: Jesse Barnes Cc: Jay Fenlason LKML-Reference: <20090804190729.630510000@intel.com> Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/hpet.h | 5 +++-- arch/x86/kernel/acpi/boot.c | 1 + arch/x86/kernel/apic/io_apic.c | 49 +++++++++++++++++++++++++++++++++++------- arch/x86/kernel/hpet.c | 3 ++- 4 files changed, 47 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 65847c578b70..5d89fd2a3690 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -65,6 +65,7 @@ /* hpet memory map physical address */ extern unsigned long hpet_address; extern unsigned long force_hpet_address; +extern u8 hpet_blockid; extern int hpet_force_user; extern int is_hpet_enabled(void); extern int hpet_enable(void); @@ -78,9 +79,9 @@ extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg); extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg); #ifdef CONFIG_PCI_MSI -extern int arch_setup_hpet_msi(unsigned int irq); +extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); #else -static inline int arch_setup_hpet_msi(unsigned int irq) +static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) { return -EINVAL; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6b8ca3a0285d..eae642b0f345 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -624,6 +624,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) } hpet_address = hpet_tbl->address.address; + hpet_blockid = hpet_tbl->sequence; /* * Some broken BIOSes advertise HPET at 0x0. We really do not diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d2ed6c5ddc80..d9c6f14d3b32 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -3254,7 +3254,8 @@ void destroy_irq(unsigned int irq) * MSI message composition */ #ifdef CONFIG_PCI_MSI -static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) +static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, + struct msi_msg *msg, u8 hpet_id) { struct irq_cfg *cfg; int err; @@ -3288,7 +3289,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms irte.dest_id = IRTE_DEST(dest); /* Set source-id of interrupt request */ - set_msi_sid(&irte, pdev); + if (pdev) + set_msi_sid(&irte, pdev); + else + set_hpet_sid(&irte, hpet_id); modify_irte(irq, &irte); @@ -3453,7 +3457,7 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) int ret; struct msi_msg msg; - ret = msi_compose_msg(dev, irq, &msg); + ret = msi_compose_msg(dev, irq, &msg, -1); if (ret < 0) return ret; @@ -3586,7 +3590,7 @@ int arch_setup_dmar_msi(unsigned int irq) int ret; struct msi_msg msg; - ret = msi_compose_msg(NULL, irq, &msg); + ret = msi_compose_msg(NULL, irq, &msg, -1); if (ret < 0) return ret; dmar_msi_write(irq, &msg); @@ -3626,6 +3630,19 @@ static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask) #endif /* CONFIG_SMP */ +static struct irq_chip ir_hpet_msi_type = { + .name = "IR-HPET_MSI", + .unmask = hpet_msi_unmask, + .mask = hpet_msi_mask, +#ifdef CONFIG_INTR_REMAP + .ack = ir_ack_apic_edge, +#ifdef CONFIG_SMP + .set_affinity = ir_set_msi_irq_affinity, +#endif +#endif + .retrigger = ioapic_retrigger_irq, +}; + static struct irq_chip hpet_msi_type = { .name = "HPET_MSI", .unmask = hpet_msi_unmask, @@ -3637,20 +3654,36 @@ static struct irq_chip hpet_msi_type = { .retrigger = ioapic_retrigger_irq, }; -int arch_setup_hpet_msi(unsigned int irq) +int arch_setup_hpet_msi(unsigned int irq, unsigned int id) { int ret; struct msi_msg msg; struct irq_desc *desc = irq_to_desc(irq); - ret = msi_compose_msg(NULL, irq, &msg); + if (intr_remapping_enabled) { + struct intel_iommu *iommu = map_hpet_to_ir(id); + int index; + + if (!iommu) + return -1; + + index = alloc_irte(iommu, irq, 1); + if (index < 0) + return -1; + } + + ret = msi_compose_msg(NULL, irq, &msg, id); if (ret < 0) return ret; hpet_msi_write(irq, &msg); desc->status |= IRQ_MOVE_PCNTXT; - set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq, - "edge"); + if (irq_remapped(irq)) + set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, + handle_edge_irq, "edge"); + else + set_irq_chip_and_handler_name(irq, &hpet_msi_type, + handle_edge_irq, "edge"); return 0; } diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ba575f0f2e34..7f024ff47d1d 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -33,6 +33,7 @@ * HPET address is set in acpi/boot.c, when an ACPI entry exists */ unsigned long hpet_address; +u8 hpet_blockid; /* OS timer block num */ #ifdef CONFIG_PCI_MSI static unsigned long hpet_num_timers; #endif @@ -467,7 +468,7 @@ static int hpet_msi_next_event(unsigned long delta, static int hpet_setup_msi_irq(unsigned int irq) { - if (arch_setup_hpet_msi(irq)) { + if (arch_setup_hpet_msi(irq, hpet_blockid)) { destroy_irq(irq); return -EINVAL; } -- cgit v1.2.3 From e9afe9e1b3fdbd56cca53959a2519e70db9c8095 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:22:58 -0400 Subject: kprobes/x86: Call BUG() when reentering probe into KPROBES_HIT_SS Call BUG() when a probe have been hit on the way of kprobe processing path, because that kind of probes are currently unrecoverable (recovering it will cause an infinite loop and stack overflow). The original code seems to assume that it's caused by an int3 which another subsystem inserted on out-of-line singlestep buffer if the hitting probe is same as current probe. However, in that case, int3-hitting-address is on the out-of-line buffer and should be different from first (current) int3 address. Thus, I decided to remove the code. I also removes arch_disarm_kprobe() because it will involve other stuffs in text_poke(). Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172258.8246.61889.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index ecee3d23fef8..e0fb615ba1e9 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -482,22 +482,16 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_status = KPROBE_REENTER; break; case KPROBE_HIT_SS: - if (p == kprobe_running()) { - regs->flags &= ~X86_EFLAGS_TF; - regs->flags |= kcb->kprobe_saved_flags; - return 0; - } else { - /* A probe has been hit in the codepath leading up - * to, or just after, single-stepping of a probed - * instruction. This entire codepath should strictly - * reside in .kprobes.text section. - * Raise a BUG or we'll continue in an endless - * reentering loop and eventually a stack overflow. - */ - arch_disarm_kprobe(p); - dump_kprobe(p); - BUG(); - } + /* A probe has been hit in the codepath leading up to, or just + * after, single-stepping of a probed instruction. This entire + * codepath should strictly reside in .kprobes.text section. + * Raise a BUG or we'll continue in an endless reentering loop + * and eventually a stack overflow. + */ + printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n", + p->addr); + dump_kprobe(p); + BUG(); default: /* impossible cases */ WARN_ON(1); -- cgit v1.2.3 From f5ad31158d60946b9fd18c8a79c283a6bc432430 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:04 -0400 Subject: kprobes/x86-64: Allow to reenter probe on post_handler Allow to reenter probe on the post_handler of another probe on x86-64, because x86-64 already allows reentering int3. In that case, reentered probe just increases kp.nmissed and returns. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172304.8246.4822.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/kprobes.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c index e0fb615ba1e9..c5f1f117e0c0 100644 --- a/arch/x86/kernel/kprobes.c +++ b/arch/x86/kernel/kprobes.c @@ -463,17 +463,6 @@ static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs, { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: -#ifdef CONFIG_X86_64 - /* TODO: Provide re-entrancy from post_kprobes_handler() and - * avoid exception stack corruption while single-stepping on - * the instruction of the new probe. - */ - arch_disarm_kprobe(p); - regs->ip = (unsigned long)p->addr; - reset_current_kprobe(); - preempt_enable_no_resched(); - break; -#endif case KPROBE_HIT_ACTIVE: save_previous_kprobe(kcb); set_current_kprobe(p, regs, kcb); -- cgit v1.2.3 From 62c9295f9dd250ea1bb2c8078642a275a9ce82f8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:11 -0400 Subject: kprobes/x86: Fix to add __kprobes to in-kernel fault handing functions Add __kprobes to the functions which handle in-kernel fixable page faults. Since kprobes can cause those in-kernel page faults by accessing kprobe data structures, probing those fault functions will cause fault-int3-loop (do_page_fault has already been marked as __kprobes). Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172311.8246.92725.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/mm/fault.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index bfae139182ff..c322e59f2d10 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -38,7 +38,8 @@ enum x86_pf_error_code { * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: */ -static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) +static inline int __kprobes +kmmio_fault(struct pt_regs *regs, unsigned long addr) { if (unlikely(is_kmmio_active())) if (kmmio_handler(regs, addr) == 1) @@ -46,7 +47,7 @@ static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) return 0; } -static inline int notify_page_fault(struct pt_regs *regs) +static inline int __kprobes notify_page_fault(struct pt_regs *regs) { int ret = 0; @@ -239,7 +240,7 @@ void vmalloc_sync_all(void) * * Handle a fault on the vmalloc or module mapping area */ -static noinline int vmalloc_fault(unsigned long address) +static noinline __kprobes int vmalloc_fault(unsigned long address) { unsigned long pgd_paddr; pmd_t *pmd_k; @@ -361,7 +362,7 @@ void vmalloc_sync_all(void) * * This assumes no large pages in there. */ -static noinline int vmalloc_fault(unsigned long address) +static noinline __kprobes int vmalloc_fault(unsigned long address) { pgd_t *pgd, *pgd_ref; pud_t *pud, *pud_ref; @@ -858,7 +859,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static noinline int +static noinline __kprobes int spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; -- cgit v1.2.3 From 8222d718b3ad3ae49c48f69ae4b6a1128c9a92cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 27 Aug 2009 13:23:25 -0400 Subject: kprobes/x86-64: Fix to move common_interrupt to .kprobes.text Since nmi, debug and int3 returns to irq_return inside common_interrupt, probing this function will cause int3-loop, so it should be marked as __kprobes. Signed-off-by: Masami Hiramatsu Acked-by: Ananth N Mavinakayanahalli Cc: Ingo Molnar LKML-Reference: <20090827172325.8246.40000.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/entry_64.S | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index c251be745107..36e2ef5cc83f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -809,6 +809,10 @@ END(interrupt) call \func .endm +/* + * Interrupt entry/exit should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. @@ -947,6 +951,10 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) +/* + * End of kprobes section + */ + .popsection /* * APIC interrupts. -- cgit v1.2.3 From 50a482fbd96943516b7a2783900e8fe61a6425e7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Aug 2009 18:13:19 -0400 Subject: x86: Allow x86-32 instruction decoder selftest on x86-64 Pass $(CONFIG_64BIT) to the x86 insn decoder selftest in case we are decoding 32bit code on x86-64, which will happen when building kernel with ARCH=i386 on x86-64. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090828221319.8778.88508.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/Makefile | 2 +- arch/x86/tools/test_get_len.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile index 95e9cc4bcd94..1bd006c81564 100644 --- a/arch/x86/tools/Makefile +++ b/arch/x86/tools/Makefile @@ -1,6 +1,6 @@ PHONY += posttest quiet_cmd_posttest = TEST $@ - cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len + cmd_posttest = $(OBJDUMP) -d -j .text $(objtree)/vmlinux | awk -f $(srctree)/arch/x86/tools/distill.awk | $(obj)/test_get_len $(CONFIG_64BIT) posttest: $(obj)/test_get_len vmlinux $(call cmd,posttest) diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index 1e81adb2d8a9..a3273f4244d5 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -45,7 +45,7 @@ const char *prog; static void usage(void) { fprintf(stderr, "Usage: objdump -d a.out | awk -f distill.awk |" - " ./test_get_len\n"); + " %s [y|n](64bit flag)\n", prog); exit(1); } @@ -63,11 +63,15 @@ int main(int argc, char **argv) unsigned char insn_buf[16]; struct insn insn; int insns = 0; + int x86_64 = 0; prog = argv[0]; - if (argc > 1) + if (argc > 2) usage(); + if (argc == 2 && argv[1][0] == 'y') + x86_64 = 1; + while (fgets(line, BUFSIZE, stdin)) { char copy[BUFSIZE], *s, *tab1, *tab2; int nb = 0; @@ -93,11 +97,7 @@ int main(int argc, char **argv) break; } /* Decode an instruction */ -#ifdef __x86_64__ - insn_init(&insn, insn_buf, 1); -#else - insn_init(&insn, insn_buf, 0); -#endif + insn_init(&insn, insn_buf, x86_64); insn_get_length(&insn); if (insn.length != nb) { fprintf(stderr, "Error: %s", line); -- cgit v1.2.3 From 70069577323e6f72b845166724f34b9858134437 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Fri, 28 Aug 2009 18:13:26 -0400 Subject: x86: Remove unused config macros from instruction decoder selftest Remove dummy definitions of CONFIG_X86_64 and CONFIG_X86_32 because those macros are not used in the instruction decoder anymore. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090828221326.8778.70723.stgit@localhost.localdomain> Signed-off-by: Frederic Weisbecker --- arch/x86/tools/test_get_len.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/tools/test_get_len.c b/arch/x86/tools/test_get_len.c index a3273f4244d5..376d33852191 100644 --- a/arch/x86/tools/test_get_len.c +++ b/arch/x86/tools/test_get_len.c @@ -21,11 +21,6 @@ #include #include -#ifdef __x86_64__ -#define CONFIG_X86_64 -#else -#define CONFIG_X86_32 -#endif #define unlikely(cond) (cond) #include -- cgit v1.2.3 From 654d364e26c797e8a5f9e2a1393607e6ca0106eb Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 14:04:06 +0900 Subject: sh: sh4_flush_cache_mm() optimizations. The i-cache flush in the case of VM_EXEC was added way back when as a sanity measure, and in practice we only care about evicting aliases from the d-cache. As a result, it's possible to drop the i-cache flush completely here. After careful profiling it's also come up that all of the work associated with hunting down aliases and doing ranged flushing ends up generating more overhead than simply blasting away the entire dcache, particularly if there are many mm's that need to be iterated over. As a result of that, just move back to flush_dcache_all() in these cases, which restores the old behaviour, and vastly simplifies the path. Additionally, on platforms without aliases at all, this can simply be nopped out. Presently we have the alias check in the SH-4 specific version, but this is true for all of the platforms, so move the check up to a generic location. This cuts down quite a bit on superfluous cacheop IPIs. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 124 ++----------------------------------------------- arch/sh/mm/cache.c | 6 +++ 2 files changed, 10 insertions(+), 120 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b2453bbef4cd..a5c339bca8aa 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -170,89 +170,13 @@ static void sh4_flush_cache_all(void *unused) flush_icache_all(); } -static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, - unsigned long end) -{ - unsigned long d = 0, p = start & PAGE_MASK; - unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; - unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; - unsigned long select_bit; - unsigned long all_aliases_mask; - unsigned long addr_offset; - pgd_t *dir; - pmd_t *pmd; - pud_t *pud; - pte_t *pte; - int i; - - dir = pgd_offset(mm, p); - pud = pud_offset(dir, p); - pmd = pmd_offset(pud, p); - end = PAGE_ALIGN(end); - - all_aliases_mask = (1 << n_aliases) - 1; - - do { - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { - p &= PMD_MASK; - p += PMD_SIZE; - pmd++; - - continue; - } - - pte = pte_offset_kernel(pmd, p); - - do { - unsigned long phys; - pte_t entry = *pte; - - if (!(pte_val(entry) & _PAGE_PRESENT)) { - pte++; - p += PAGE_SIZE; - continue; - } - - phys = pte_val(entry) & PTE_PHYS_MASK; - - if ((p ^ phys) & alias_mask) { - d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); - d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); - - if (d == all_aliases_mask) - goto loop_exit; - } - - pte++; - p += PAGE_SIZE; - } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); - pmd++; - } while (p < end); - -loop_exit: - addr_offset = 0; - select_bit = 1; - - for (i = 0; i < n_aliases; i++) { - if (d & select_bit) { - (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); - wmb(); - } - - select_bit <<= 1; - addr_offset += PAGE_SIZE; - } -} - /* * Note : (RPC) since the caches are physically tagged, the only point * of flush_cache_mm for SH-4 is to get rid of aliases from the * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that * lines can stay resident so long as the virtual address they were * accessed with (hence cache set) is in accord with the physical - * address (i.e. tag). It's no different here. So I reckon we don't - * need to flush the I-cache, since aliases don't matter for that. We - * should try that. + * address (i.e. tag). It's no different here. * * Caller takes mm->mmap_sem. */ @@ -263,33 +187,7 @@ static void sh4_flush_cache_mm(void *arg) if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) return; - /* - * If cache is only 4k-per-way, there are never any 'aliases'. Since - * the cache is physically tagged, the data can just be left in there. - */ - if (boot_cpu_data.dcache.n_aliases == 0) - return; - - /* - * Don't bother groveling around the dcache for the VMA ranges - * if there are too many PTEs to make it worthwhile. - */ - if (mm->nr_ptes >= MAX_DCACHE_PAGES) - flush_dcache_all(); - else { - struct vm_area_struct *vma; - - /* - * In this case there are reasonably sized ranges to flush, - * iterate through the VMA list and take care of any aliases. - */ - for (vma = mm->mmap; vma; vma = vma->vm_next) - __flush_cache_mm(mm, vma->vm_start, vma->vm_end); - } - - /* Only touch the icache if one of the VMAs has VM_EXEC set. */ - if (mm->exec_vm) - flush_icache_all(); + flush_dcache_all(); } /* @@ -372,24 +270,10 @@ static void sh4_flush_cache_range(void *args) if (boot_cpu_data.dcache.n_aliases == 0) return; - /* - * Don't bother with the lookup and alias check if we have a - * wide range to cover, just blow away the dcache in its - * entirety instead. -- PFM. - */ - if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) - flush_dcache_all(); - else - __flush_cache_mm(vma->vm_mm, start, end); + flush_dcache_all(); - if (vma->vm_flags & VM_EXEC) { - /* - * TODO: Is this required??? Need to look at how I-cache - * coherency is assured when new programs are loaded to see if - * this matters. - */ + if (vma->vm_flags & VM_EXEC) flush_icache_all(); - } } /** diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 35c37b7f717a..4aa926054531 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -164,11 +164,17 @@ void flush_cache_all(void) void flush_cache_mm(struct mm_struct *mm) { + if (boot_cpu_data.dcache.n_aliases == 0) + return; + cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); } void flush_cache_dup_mm(struct mm_struct *mm) { + if (boot_cpu_data.dcache.n_aliases == 0) + return; + cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); } -- cgit v1.2.3 From 31c9efde786252112cc3d04a1ed3513b6ec63a7b Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 14:10:28 +0900 Subject: sh: Kill off broken PHYSADDR() usage in sh4_flush_dcache_page(). PHYSADDR() runs in to issues in 32-bit mode when we do not have the legacy P1/P2 areas mapped, as such, we need to use page_to_phys() directly, which also happens to do the right thing in legacy 29-bit mode. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index a5c339bca8aa..f0999606686f 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -123,12 +123,12 @@ static void sh4_flush_dcache_page(void *arg) else #endif { - unsigned long phys = PHYSADDR(page_address(page)); + unsigned long phys = page_to_phys(page); unsigned long addr = CACHE_OC_ADDRESS_ARRAY; int i, n; /* Loop all the D-cache */ - n = boot_cpu_data.dcache.n_aliases; + n = boot_cpu_data.dcache.way_incr >> 12; for (i = 0; i < n; i++, addr += 4096) flush_cache_4096(addr, phys); } -- cgit v1.2.3 From bd6df57481b329dfeeb4889068848ee4f4761561 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 14:22:15 +0900 Subject: sh: Kill off segment-based d-cache flushing on SH-4. This kills off the unrolled segment based flushers on SH-4 and switches over to a generic unrolled approach derived from the writethrough segment flusher. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 291 ++++--------------------------------------------- 1 file changed, 20 insertions(+), 271 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index f0999606686f..92b7d947db94 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -29,14 +29,6 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset); -/* - * This is initialised here to ensure that it is not placed in the BSS. If - * that were to happen, note that cache_init gets called before the BSS is - * cleared, so this would get nulled out which would be hopeless. - */ -static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = - (void (*)(unsigned long, unsigned long))0xdeadbeef; - /* * Write back the range of D-cache, and purge the I-cache. * @@ -158,10 +150,27 @@ static void __uses_jump_to_uncached flush_icache_all(void) local_irq_restore(flags); } -static inline void flush_dcache_all(void) +static void flush_dcache_all(void) { - (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); - wmb(); + unsigned long addr, end_addr, entry_offset; + + end_addr = CACHE_OC_ADDRESS_ARRAY + + (current_cpu_data.dcache.sets << + current_cpu_data.dcache.entry_shift) * + current_cpu_data.dcache.ways; + + entry_offset = 1 << current_cpu_data.dcache.entry_shift; + + for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + __raw_writel(0, addr); addr += entry_offset; + } } static void sh4_flush_cache_all(void *unused) @@ -347,245 +356,6 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, } while (--way_count != 0); } -/* - * Break the 1, 2 and 4 way variants of this out into separate functions to - * avoid nearly all the overhead of having the conditional stuff in the function - * bodies (+ the 1 and 2 way cases avoid saving any registers too). - * - * We want to eliminate unnecessary bus transactions, so this code uses - * a non-obvious technique. - * - * Loop over a cache way sized block of, one cache line at a time. For each - * line, use movca.a to cause the current cache line contents to be written - * back, but without reading anything from main memory. However this has the - * side effect that the cache is now caching that memory location. So follow - * this with a cache invalidate to mark the cache line invalid. And do all - * this with interrupts disabled, to avoid the cache line being accidently - * evicted while it is holding garbage. - * - * This also breaks in a number of circumstances: - * - if there are modifications to the region of memory just above - * empty_zero_page (for example because a breakpoint has been placed - * there), then these can be lost. - * - * This is because the the memory address which the cache temporarily - * caches in the above description is empty_zero_page. So the - * movca.l hits the cache (it is assumed that it misses, or at least - * isn't dirty), modifies the line and then invalidates it, losing the - * required change. - * - * - If caches are disabled or configured in write-through mode, then - * the movca.l writes garbage directly into memory. - */ -static void __flush_dcache_segment_writethrough(unsigned long start, - unsigned long extent_per_way) -{ - unsigned long addr; - int i; - - addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); - - while (extent_per_way) { - for (i = 0; i < cpu_data->dcache.ways; i++) - __raw_writel(0, addr + cpu_data->dcache.way_incr * i); - - addr += cpu_data->dcache.linesz; - extent_per_way -= cpu_data->dcache.linesz; - } -} - -static void __flush_dcache_segment_1way(unsigned long start, - unsigned long extent_per_way) -{ - unsigned long orig_sr, sr_with_bl; - unsigned long base_addr; - unsigned long way_incr, linesz, way_size; - struct cache_info *dcache; - register unsigned long a0, a0e; - - asm volatile("stc sr, %0" : "=r" (orig_sr)); - sr_with_bl = orig_sr | (1<<28); - base_addr = ((unsigned long)&empty_zero_page[0]); - - /* - * The previous code aligned base_addr to 16k, i.e. the way_size of all - * existing SH-4 D-caches. Whilst I don't see a need to have this - * aligned to any better than the cache line size (which it will be - * anyway by construction), let's align it to at least the way_size of - * any existing or conceivable SH-4 D-cache. -- RPC - */ - base_addr = ((base_addr >> 16) << 16); - base_addr |= start; - - dcache = &boot_cpu_data.dcache; - linesz = dcache->linesz; - way_incr = dcache->way_incr; - way_size = dcache->way_size; - - a0 = base_addr; - a0e = base_addr + extent_per_way; - do { - asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); - asm volatile("movca.l r0, @%0\n\t" - "ocbi @%0" : : "r" (a0)); - a0 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "ocbi @%0" : : "r" (a0)); - a0 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "ocbi @%0" : : "r" (a0)); - a0 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "ocbi @%0" : : "r" (a0)); - asm volatile("ldc %0, sr" : : "r" (orig_sr)); - a0 += linesz; - } while (a0 < a0e); -} - -static void __flush_dcache_segment_2way(unsigned long start, - unsigned long extent_per_way) -{ - unsigned long orig_sr, sr_with_bl; - unsigned long base_addr; - unsigned long way_incr, linesz, way_size; - struct cache_info *dcache; - register unsigned long a0, a1, a0e; - - asm volatile("stc sr, %0" : "=r" (orig_sr)); - sr_with_bl = orig_sr | (1<<28); - base_addr = ((unsigned long)&empty_zero_page[0]); - - /* See comment under 1-way above */ - base_addr = ((base_addr >> 16) << 16); - base_addr |= start; - - dcache = &boot_cpu_data.dcache; - linesz = dcache->linesz; - way_incr = dcache->way_incr; - way_size = dcache->way_size; - - a0 = base_addr; - a1 = a0 + way_incr; - a0e = base_addr + extent_per_way; - do { - asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "ocbi @%0\n\t" - "ocbi @%1" : : - "r" (a0), "r" (a1)); - a0 += linesz; - a1 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "ocbi @%0\n\t" - "ocbi @%1" : : - "r" (a0), "r" (a1)); - a0 += linesz; - a1 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "ocbi @%0\n\t" - "ocbi @%1" : : - "r" (a0), "r" (a1)); - a0 += linesz; - a1 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "ocbi @%0\n\t" - "ocbi @%1" : : - "r" (a0), "r" (a1)); - asm volatile("ldc %0, sr" : : "r" (orig_sr)); - a0 += linesz; - a1 += linesz; - } while (a0 < a0e); -} - -static void __flush_dcache_segment_4way(unsigned long start, - unsigned long extent_per_way) -{ - unsigned long orig_sr, sr_with_bl; - unsigned long base_addr; - unsigned long way_incr, linesz, way_size; - struct cache_info *dcache; - register unsigned long a0, a1, a2, a3, a0e; - - asm volatile("stc sr, %0" : "=r" (orig_sr)); - sr_with_bl = orig_sr | (1<<28); - base_addr = ((unsigned long)&empty_zero_page[0]); - - /* See comment under 1-way above */ - base_addr = ((base_addr >> 16) << 16); - base_addr |= start; - - dcache = &boot_cpu_data.dcache; - linesz = dcache->linesz; - way_incr = dcache->way_incr; - way_size = dcache->way_size; - - a0 = base_addr; - a1 = a0 + way_incr; - a2 = a1 + way_incr; - a3 = a2 + way_incr; - a0e = base_addr + extent_per_way; - do { - asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "movca.l r0, @%2\n\t" - "movca.l r0, @%3\n\t" - "ocbi @%0\n\t" - "ocbi @%1\n\t" - "ocbi @%2\n\t" - "ocbi @%3\n\t" : : - "r" (a0), "r" (a1), "r" (a2), "r" (a3)); - a0 += linesz; - a1 += linesz; - a2 += linesz; - a3 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "movca.l r0, @%2\n\t" - "movca.l r0, @%3\n\t" - "ocbi @%0\n\t" - "ocbi @%1\n\t" - "ocbi @%2\n\t" - "ocbi @%3\n\t" : : - "r" (a0), "r" (a1), "r" (a2), "r" (a3)); - a0 += linesz; - a1 += linesz; - a2 += linesz; - a3 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "movca.l r0, @%2\n\t" - "movca.l r0, @%3\n\t" - "ocbi @%0\n\t" - "ocbi @%1\n\t" - "ocbi @%2\n\t" - "ocbi @%3\n\t" : : - "r" (a0), "r" (a1), "r" (a2), "r" (a3)); - a0 += linesz; - a1 += linesz; - a2 += linesz; - a3 += linesz; - asm volatile("movca.l r0, @%0\n\t" - "movca.l r0, @%1\n\t" - "movca.l r0, @%2\n\t" - "movca.l r0, @%3\n\t" - "ocbi @%0\n\t" - "ocbi @%1\n\t" - "ocbi @%2\n\t" - "ocbi @%3\n\t" : : - "r" (a0), "r" (a1), "r" (a2), "r" (a3)); - asm volatile("ldc %0, sr" : : "r" (orig_sr)); - a0 += linesz; - a1 += linesz; - a2 += linesz; - a3 += linesz; - } while (a0 < a0e); -} - extern void __weak sh4__flush_region_init(void); /* @@ -593,32 +363,11 @@ extern void __weak sh4__flush_region_init(void); */ void __init sh4_cache_init(void) { - unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); - printk("PVR=%08x CVR=%08x PRR=%08x\n", ctrl_inl(CCN_PVR), ctrl_inl(CCN_CVR), ctrl_inl(CCN_PRR)); - if (wt_enabled) - __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; - else { - switch (boot_cpu_data.dcache.ways) { - case 1: - __flush_dcache_segment_fn = __flush_dcache_segment_1way; - break; - case 2: - __flush_dcache_segment_fn = __flush_dcache_segment_2way; - break; - case 4: - __flush_dcache_segment_fn = __flush_dcache_segment_4way; - break; - default: - panic("unknown number of cache ways\n"); - break; - } - } - local_flush_icache_range = sh4_flush_icache_range; local_flush_dcache_page = sh4_flush_dcache_page; local_flush_cache_all = sh4_flush_cache_all; -- cgit v1.2.3 From deaef20e9789d93c06d2d3b5ffc99939814802ca Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 16:06:39 +0900 Subject: sh: Rework sh4_flush_cache_page() for coherent kmap mapping. This builds on top of the MIPS r4k code that does roughly the same thing. This permits the use of kmap_coherent() for mapped pages with dirty dcache lines and falls back on kmap_atomic() otherwise. This also fixes up a problem with the alias check and defers to shm_align_mask directly. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 75 ++++++++++++++++++++++++++++++++------------------ 1 file changed, 48 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 92b7d947db94..e3fbd99b323c 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -2,7 +2,7 @@ * arch/sh/mm/cache-sh4.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2001 - 2007 Paul Mundt + * Copyright (C) 2001 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. * @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include @@ -23,7 +25,6 @@ * flushing. Anything exceeding this will simply flush the dcache in its * entirety. */ -#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ #define MAX_ICACHE_PAGES 32 static void __flush_cache_4096(unsigned long addr, unsigned long phys, @@ -209,44 +210,64 @@ static void sh4_flush_cache_page(void *args) { struct flusher_data *data = args; struct vm_area_struct *vma; + struct page *page; unsigned long address, pfn, phys; - unsigned int alias_mask; + int map_coherent = 0; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + void *vaddr; vma = data->vma; address = data->addr1; pfn = data->addr2; phys = pfn << PAGE_SHIFT; + page = pfn_to_page(pfn); if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) return; - alias_mask = boot_cpu_data.dcache.alias_mask; - - /* We only need to flush D-cache when we have alias */ - if ((address^phys) & alias_mask) { - /* Loop 4K of the D-cache */ - flush_cache_4096( - CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), - phys); - /* Loop another 4K of the D-cache */ - flush_cache_4096( - CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), - phys); - } + address &= PAGE_MASK; + pgd = pgd_offset(vma->vm_mm, address); + pud = pud_offset(pgd, address); + pmd = pmd_offset(pud, address); + pte = pte_offset_kernel(pmd, address); + + /* If the page isn't present, there is nothing to do here. */ + if (!(pte_val(*pte) & _PAGE_PRESENT)) + return; - alias_mask = boot_cpu_data.icache.alias_mask; - if (vma->vm_flags & VM_EXEC) { + if ((vma->vm_mm == current->active_mm)) + vaddr = NULL; + else { /* - * Evict entries from the portion of the cache from which code - * may have been executed at this address (virtual). There's - * no need to evict from the portion corresponding to the - * physical address as for the D-cache, because we know the - * kernel has never executed the code through its identity - * translation. + * Use kmap_coherent or kmap_atomic to do flushes for + * another ASID than the current one. */ - flush_cache_4096( - CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), - phys); + map_coherent = (current_cpu_data.dcache.n_aliases && + !test_bit(PG_dcache_dirty, &page->flags) && + page_mapped(page)); + if (map_coherent) + vaddr = kmap_coherent(page, address); + else + vaddr = kmap_atomic(page, KM_USER0); + + address = (unsigned long)vaddr; + } + + if (pages_do_alias(address, phys)) + flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | + (address & shm_align_mask), phys); + + if (vma->vm_flags & VM_EXEC) + flush_icache_all(); + + if (vaddr) { + if (map_coherent) + kunmap_coherent(vaddr); + else + kunmap_atomic(vaddr, KM_USER0); } } -- cgit v1.2.3 From c4845a4b2288a9e5d96a0558e474809028c8aff3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 17:13:07 +0900 Subject: sh: Fix up redundant cache flushing for PAGE_SIZE > 4k. If PAGE_SIZE is presently over 4k we do a lot of extra flushing given that we purge the cache 4k at a time. Make it explicitly 4k per iteration, rather than iterating for PAGE_SIZE before looping over again. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index e3fbd99b323c..8362d312ad94 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -357,7 +357,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, * pointless nead-of-loop check for 0 iterations. */ do { - ea = base_addr + PAGE_SIZE; + ea = base_addr + 4096; a = base_addr; p = phys; -- cgit v1.2.3 From f9e2bdfdbb4c9da13422b349227be8c7b41dbd44 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 9 Sep 2009 17:14:19 +0900 Subject: sh: Factor in cpu id for selection of cache colour fixmap. In the SMP VIPT case the page copy/clear ops still perform colouring, care needs to be taken that CPUs don't end up stepping on each other, so we give them a bit of room to work with. At the same time, we reduce the worst-case colouring given that these pages are always consumed. Signed-off-by: Paul Mundt --- arch/sh/include/asm/fixmap.h | 6 +++--- arch/sh/mm/kmap.c | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e98..76c5a3099cb8 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h @@ -14,9 +14,9 @@ #define _ASM_FIXMAP_H #include +#include #include #ifdef CONFIG_HIGHMEM -#include #include #endif @@ -46,9 +46,9 @@ * fix-mapped? */ enum fixed_addresses { -#define FIX_N_COLOURS 16 +#define FIX_N_COLOURS 8 FIX_CMAP_BEGIN, - FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), FIX_UNCACHED, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 16e01b5fed04..15d74ea42094 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c @@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr) pagefault_disable(); idx = FIX_CMAP_END - - ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); + (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + + (FIX_N_COLOURS * smp_processor_id())); + vaddr = __fix_to_virt(idx); BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); -- cgit v1.2.3 From f12b4f546b4e327d5620a544a2bddab68de66027 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 8 Sep 2009 12:32:46 -0400 Subject: x86: Add MMX support for instruction decoder Add MMX/SSE instructions to x86 opcode maps, since some of those instructions are used in the kernel. This also fixes failures in the x86 instruction decoder seftest. Signed-off-by: Masami Hiramatsu Cc: Jim Keniston Cc: H. Peter Anvin Cc: Sam Ravnborg Cc: Frederic Weisbecker Cc: Ingo Molnar LKML-Reference: <20090908163246.23516.78835.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/lib/x86-opcode-map.txt | 307 ++++++++++++++++++++++++++-------------- 1 file changed, 200 insertions(+), 107 deletions(-) (limited to 'arch') diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 083dd59dd74b..59e20d5c2a52 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -310,14 +310,14 @@ Referrer: 2-byte escape 0e: 0f: # 0x0f 0x10-0x1f -10: -11: -12: -13: -14: -15: -16: -17: +10: movups Vps,Wps | movss Vss,Wss (F3) | movupd Vpd,Wpd (66) | movsd Vsd,Wsd (F2) +11: movups Wps,Vps | movss Wss,Vss (F3) | movupd Wpd,Vpd (66) | movsd Wsd,Vsd (F2) +12: movlps Vq,Mq | movlpd Vq,Mq (66) | movhlps Vq,Uq | movddup Vq,Wq (F2) | movsldup Vq,Wq (F3) +13: mpvlps Mq,Vq | movlpd Mq,Vq (66) +14: unpcklps Vps,Wq | unpcklpd Vpd,Wq (66) +15: unpckhps Vps,Wq | unpckhpd Vpd,Wq (66) +16: movhps Vq,Mq | movhpd Vq,Mq (66) | movlsps Vq,Uq | movshdup Vq,Wq (F3) +17: movhps Mq,Vq | movhpd Mq,Vq (66) 18: Grp16 (1A) 19: 1a: @@ -337,12 +337,12 @@ Referrer: 2-byte escape 27: 28: movaps Vps,Wps | movapd Vpd,Wpd (66) 29: movaps Wps,Vps | movapd Wpd,Vpd (66) -2a: -2b: -2c: -2d: -2e: -2f: +2a: cvtpi2ps Vps,Qpi | cvtsi2ss Vss,Ed/q (F3) | cvtpi2pd Vpd,Qpi (66) | cvtsi2sd Vsd,Ed/q (F2) +2b: movntps Mps,Vps | movntpd Mpd,Vpd (66) +2c: cvttps2pi Ppi,Wps | cvttss2si Gd/q,Wss (F3) | cvttpd2pi Ppi,Wpd (66) | cvttsd2si Gd/q,Wsd (F2) +2d: cvtps2pi Ppi,Wps | cvtss2si Gd/q,Wss (F3) | cvtpd2pi Qpi,Wpd (66) | cvtsd2si Gd/q,Wsd (F2) +2e: ucomiss Vss,Wss | ucomisd Vsd,Wsd (66) +2f: comiss Vss,Wss | comisd Vsd,Wsd (66) # 0x0f 0x30-0x3f 30: WRMSR 31: RDTSC @@ -378,56 +378,56 @@ Referrer: 2-byte escape 4e: CMOVLE/NG Gv,Ev 4f: CMOVNLE/G Gv,Ev # 0x0f 0x50-0x5f -50: -51: -52: -53: -54: -55: -56: -57: -58: -59: -5a: -5b: -5c: -5d: -5e: -5f: +50: movmskps Gd/q,Ups | movmskpd Gd/q,Upd (66) +51: sqrtps Vps,Wps | sqrtss Vss,Wss (F3) | sqrtpd Vpd,Wpd (66) | sqrtsd Vsd,Wsd (F2) +52: rsqrtps Vps,Wps | rsqrtss Vss,Wss (F3) +53: rcpps Vps,Wps | rcpss Vss,Wss (F3) +54: andps Vps,Wps | andpd Vpd,Wpd (66) +55: andnps Vps,Wps | andnpd Vpd,Wpd (66) +56: orps Vps,Wps | orpd Vpd,Wpd (66) +57: xorps Vps,Wps | xorpd Vpd,Wpd (66) +58: addps Vps,Wps | addss Vss,Wss (F3) | addpd Vpd,Wpd (66) | addsd Vsd,Wsd (F2) +59: mulps Vps,Wps | mulss Vss,Wss (F3) | mulpd Vpd,Wpd (66) | mulsd Vsd,Wsd (F2) +5a: cvtps2pd Vpd,Wps | cvtss2sd Vsd,Wss (F3) | cvtpd2ps Vps,Wpd (66) | cvtsd2ss Vsd,Wsd (F2) +5b: cvtdq2ps Vps,Wdq | cvtps2dq Vdq,Wps (66) | cvttps2dq Vdq,Wps (F3) +5c: subps Vps,Wps | subss Vss,Wss (F3) | subpd Vpd,Wpd (66) | subsd Vsd,Wsd (F2) +5d: minps Vps,Wps | minss Vss,Wss (F3) | minpd Vpd,Wpd (66) | minsd Vsd,Wsd (F2) +5e: divps Vps,Wps | divss Vss,Wss (F3) | divpd Vpd,Wpd (66) | divsd Vsd,Wsd (F2) +5f: maxps Vps,Wps | maxss Vss,Wss (F3) | maxpd Vpd,Wpd (66) | maxsd Vsd,Wsd (F2) # 0x0f 0x60-0x6f -60: -61: -62: -63: -64: -65: -66: -67: -68: -69: -6a: -6b: -6c: -6d: -6e: -6f: +60: punpcklbw Pq,Qd | punpcklbw Vdq,Wdq (66) +61: punpcklwd Pq,Qd | punpcklwd Vdq,Wdq (66) +62: punpckldq Pq,Qd | punpckldq Vdq,Wdq (66) +63: packsswb Pq,Qq | packsswb Vdq,Wdq (66) +64: pcmpgtb Pq,Qq | pcmpgtb Vdq,Wdq (66) +65: pcmpgtw Pq,Qq | pcmpgtw(66) Vdq,Wdq +66: pcmpgtd Pq,Qq | pcmpgtd Vdq,Wdq (66) +67: packuswb Pq,Qq | packuswb(66) Vdq,Wdq +68: punpckhbw Pq,Qd | punpckhbw Vdq,Wdq (66) +69: punpckhwd Pq,Qd | punpckhwd Vdq,Wdq (66) +6a: punpckhdq Pq,Qd | punpckhdq Vdq,Wdq (66) +6b: packssdw Pq,Qd | packssdw Vdq,Wdq (66) +6c: punpcklqdq Vdq,Wdq (66) +6d: punpckhqdq Vdq,Wdq (66) +6e: movd/q/ Pd,Ed/q | movd/q Vdq,Ed/q (66) +6f: movq Pq,Qq | movdqa Vdq,Wdq (66) | movdqu Vdq,Wdq (F3) # 0x0f 0x70-0x7f -70: +70: pshufw Pq,Qq,Ib | pshufd Vdq,Wdq,Ib (66) | pshufhw Vdq,Wdq,Ib (F3) | pshuflw VdqWdq,Ib (F2) 71: Grp12 (1A) 72: Grp13 (1A) 73: Grp14 (1A) -74: -75: -76: -77: +74: pcmpeqb Pq,Qq | pcmpeqb Vdq,Wdq (66) +75: pcmpeqw Pq,Qq | pcmpeqw Vdq,Wdq (66) +76: pcmpeqd Pq,Qq | pcmpeqd Vdq,Wdq (66) +77: emms 78: VMREAD Ed/q,Gd/q 79: VMWRITE Gd/q,Ed/q 7a: 7b: -7c: -7d: -7e: -7f: +7c: haddps(F2) Vps,Wps | haddpd(66) Vpd,Wpd +7d: hsubps(F2) Vps,Wps | hsubpd(66) Vpd,Wpd +7e: movd/q Ed/q,Pd | movd/q Ed/q,Vdq (66) | movq Vq,Wq (F3) +7f: movq Qq,Pq | movdqa Wdq,Vdq (66) | movdqu Wdq,Vdq (F3) # 0x0f 0x80-0x8f 80: JO Jz (f64) 81: JNO Jz (f64) @@ -499,11 +499,11 @@ bf: MOVSX Gv,Ew # 0x0f 0xc0-0xcf c0: XADD Eb,Gb c1: XADD Ev,Gv -c2: +c2: cmpps Vps,Wps,Ib | cmpss Vss,Wss,Ib (F3) | cmppd Vpd,Wpd,Ib (66) | cmpsd Vsd,Wsd,Ib (F2) c3: movnti Md/q,Gd/q -c4: -c5: -c6: +c4: pinsrw Pq,Rd/q/Mw,Ib | pinsrw Vdq,Rd/q/Mw,Ib (66) +c5: pextrw Gd,Nq,Ib | pextrw Gd,Udq,Ib (66) +c6: shufps Vps,Wps,Ib | shufpd Vpd,Wpd,Ib (66) c7: Grp9 (1A) c8: BSWAP RAX/EAX/R8/R8D c9: BSWAP RCX/ECX/R9/R9D @@ -514,60 +514,131 @@ cd: BSWAP RBP/EBP/R13/R13D ce: BSWAP RSI/ESI/R14/R14D cf: BSWAP RDI/EDI/R15/R15D # 0x0f 0xd0-0xdf -d0: -d1: -d2: -d3: -d4: -d5: -d6: -d7: -d8: -d9: -da: -db: -dc: -dd: -de: -df: +d0: addsubps Vps,Wps (F2) | addsubpd Vpd,Wpd (66) +d1: psrlw Pq,Qq | psrlw Vdq,Wdq (66) +d2: psrld Pq,Qq | psrld Vdq,Wdq (66) +d3: psrlq Pq,Qq | psrlq Vdq,Wdq (66) +d4: paddq Pq,Qq | paddq Vdq,Wdq (66) +d5: pmullw Pq,Qq | pmullw Vdq,Wdq (66) +d6: movq Wq,Vq (66) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2) +d7: pmovmskb Gd,Nq | pmovmskb Gd,Udq (66) +d8: psubusb Pq,Qq | psubusb Vdq,Wdq (66) +d9: psubusw Pq,Qq | psubusw Vdq,Wdq (66) +da: pminub Pq,Qq | pminub Vdq,Wdq (66) +db: pand Pq,Qq | pand Vdq,Wdq (66) +dc: paddusb Pq,Qq | paddusb Vdq,Wdq (66) +dd: paddusw Pq,Qq | paddusw Vdq,Wdq (66) +de: pmaxub Pq,Qq | pmaxub Vdq,Wdq (66) +df: pandn Pq,Qq | pandn Vdq,Wdq (66) # 0x0f 0xe0-0xef -e0: -e1: -e2: -e3: -e4: -e5: -e6: -e7: -e8: -e9: -ea: -eb: -ec: -ed: -ee: -ef: +e0: pavgb Pq,Qq | pavgb Vdq,Wdq (66) +e1: psraw Pq,Qq | psraw Vdq,Wdq (66) +e2: psrad Pq,Qq | psrad Vdq,Wdq (66) +e3: pavgw Pq,Qq | pavgw Vdq,Wdq (66) +e4: pmulhuw Pq,Qq | pmulhuw Vdq,Wdq (66) +e5: pmulhw Pq,Qq | pmulhw Vdq,Wdq (66) +e6: cvtpd2dq Vdq,Wpd (F2) | cvttpd2dq Vdq,Wpd (66) | cvtdq2pd Vpd,Wdq (F3) +e7: movntq Mq,Pq | movntdq Mdq,Vdq (66) +e8: psubsb Pq,Qq | psubsb Vdq,Wdq (66) +e9: psubsw Pq,Qq | psubsw Vdq,Wdq (66) +ea: pminsw Pq,Qq | pminsw Vdq,Wdq (66) +eb: por Pq,Qq | por Vdq,Wdq (66) +ec: paddsb Pq,Qq | paddsb Vdq,Wdq (66) +ed: paddsw Pq,Qq | paddsw Vdq,Wdq (66) +ee: pmaxsw Pq,Qq | pmaxsw Vdq,Wdq (66) +ef: pxor Pq,Qq | pxor Vdq,Wdq (66) # 0x0f 0xf0-0xff -f0: -f1: -f2: -f3: -f4: -f5: -f6: -f7: -f8: -f9: -fa: -fb: -fc: -fd: -fe: +f0: lddqu Vdq,Mdq (F2) +f1: psllw Pq,Qq | psllw Vdq,Wdq (66) +f2: pslld Pq,Qq | pslld Vdq,Wdq (66) +f3: psllq Pq,Qq | psllq Vdq,Wdq (66) +f4: pmuludq Pq,Qq | pmuludq Vdq,Wdq (66) +f5: pmaddwd Pq,Qq | pmaddwd Vdq,Wdq (66) +f6: psadbw Pq,Qq | psadbw Vdq,Wdq (66) +f7: maskmovq Pq,Nq | maskmovdqu Vdq,Udq (66) +f8: psubb Pq,Qq | psubb Vdq,Wdq (66) +f9: psubw Pq,Qq | psubw Vdq,Wdq (66) +fa: psubd Pq,Qq | psubd Vdq,Wdq (66) +fb: psubq Pq,Qq | psubq Vdq,Wdq (66) +fc: paddb Pq,Qq | paddb Vdq,Wdq (66) +fd: paddw Pq,Qq | paddw Vdq,Wdq (66) +fe: paddd Pq,Qq | paddd Vdq,Wdq (66) ff: EndTable Table: 3-byte opcode 1 Referrer: 3-byte escape 1 +# 0x0f 0x38 0x00-0x0f +00: pshufb Pq,Qq | pshufb Vdq,Wdq (66) +01: phaddw Pq,Qq | phaddw Vdq,Wdq (66) +02: phaddd Pq,Qq | phaddd Vdq,Wdq (66) +03: phaddsw Pq,Qq | phaddsw Vdq,Wdq (66) +04: pmaddubsw Pq,Qq | pmaddubsw (66)Vdq,Wdq +05: phsubw Pq,Qq | phsubw Vdq,Wdq (66) +06: phsubd Pq,Qq | phsubd Vdq,Wdq (66) +07: phsubsw Pq,Qq | phsubsw Vdq,Wdq (66) +08: psignb Pq,Qq | psignb Vdq,Wdq (66) +09: psignw Pq,Qq | psignw Vdq,Wdq (66) +0a: psignd Pq,Qq | psignd Vdq,Wdq (66) +0b: pmulhrsw Pq,Qq | pmulhrsw Vdq,Wdq (66) +0c: +0d: +0e: +0f: +# 0x0f 0x38 0x10-0x1f +10: pblendvb Vdq,Wdq (66) +11: +12: +13: +14: blendvps Vdq,Wdq (66) +15: blendvpd Vdq,Wdq (66) +16: +17: ptest Vdq,Wdq (66) +18: +19: +1a: +1b: +1c: pabsb Pq,Qq | pabsb Vdq,Wdq (66) +1d: pabsw Pq,Qq | pabsw Vdq,Wdq (66) +1e: pabsd Pq,Qq | pabsd Vdq,Wdq (66) +1f: +# 0x0f 0x38 0x20-0x2f +20: pmovsxbw Vdq,Udq/Mq (66) +21: pmovsxbd Vdq,Udq/Md (66) +22: pmovsxbq Vdq,Udq/Mw (66) +23: pmovsxwd Vdq,Udq/Mq (66) +24: pmovsxwq Vdq,Udq/Md (66) +25: pmovsxdq Vdq,Udq/Mq (66) +26: +27: +28: pmuldq Vdq,Wdq (66) +29: pcmpeqq Vdq,Wdq (66) +2a: movntdqa Vdq,Mdq (66) +2b: packusdw Vdq,Wdq (66) +2c: +2d: +2e: +2f: +# 0x0f 0x38 0x30-0x3f +30: pmovzxbw Vdq,Udq/Mq (66) +31: pmovzxbd Vdq,Udq/Md (66) +32: pmovzxbq Vdq,Udq/Mw (66) +33: pmovzxwd Vdq,Udq/Mq (66) +34: pmovzxwq Vdq,Udq/Md (66) +35: pmovzxdq Vdq,Udq/Mq (66) +36: +37: pcmpgtq Vdq,Wdq (66) +38: pminsb Vdq,Wdq (66) +39: pminsd Vdq,Wdq (66) +3a: pminuw Vdq,Wdq (66) +3b: pminud Vdq,Wdq (66) +3c: pmaxsb Vdq,Wdq (66) +3d: pmaxsd Vdq,Wdq (66) +3e: pmaxuw Vdq,Wdq (66) +3f: pmaxud Vdq,Wdq (66) +# 0x0f 0x38 0x4f-0xff +40: pmulld Vdq,Wdq (66) +41: phminposuw Vdq,Wdq (66) 80: INVEPT Gd/q,Mdq (66) 81: INVPID Gd/q,Mdq (66) f0: MOVBE Gv,Mv | CRC32 Gd,Eb (F2) @@ -576,7 +647,29 @@ EndTable Table: 3-byte opcode 2 Referrer: 3-byte escape 2 -# all opcode is for SSE +# 0x0f 0x3a 0x00-0xff +08: roundps Vdq,Wdq,Ib (66) +09: roundpd Vdq,Wdq,Ib (66) +0a: roundss Vss,Wss,Ib (66) +0b: roundsd Vsd,Wsd,Ib (66) +0c: blendps Vdq,Wdq,Ib (66) +0d: blendpd Vdq,Wdq,Ib (66) +0e: pblendw Vdq,Wdq,Ib (66) +0f: palignr Pq,Qq,Ib | palignr Vdq,Wdq,Ib (66) +14: pextrb Rd/Mb,Vdq,Ib (66) +15: pextrw Rd/Mw,Vdq,Ib (66) +16: pextrd/pextrq Ed/q,Vdq,Ib (66) +17: extractps Ed,Vdq,Ib (66) +20: pinsrb Vdq,Rd/q/Mb,Ib (66) +21: insertps Vdq,Udq/Md,Ib (66) +22: pinsrd/pinsrq Vdq,Ed/q,Ib (66) +40: dpps Vdq,Wdq,Ib (66) +41: dppd Vdq,Wdq,Ib (66) +42: mpsadbw Vdq,Wdq,Ib (66) +60: pcmpestrm Vdq,Wdq,Ib (66) +61: pcmpestri Vdq,Wdq,Ib (66) +62: pcmpistrm Vdq,Wdq,Ib (66) +63: pcmpistri Vdq,Wdq,Ib (66) EndTable GrpTable: Grp1 -- cgit v1.2.3 From a00e817f42663941ea0aa5f85a9d1c4f8b212839 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Tue, 8 Sep 2009 12:47:55 -0400 Subject: kprobes/x86-32: Move irq-exit functions to kprobes section Move irq-exit functions to .kprobes.text section to protect against kprobes recursion. When I ran kprobe stress test on x86-32, I found below symbols cause unrecoverable recursive probing: ret_from_exception ret_from_intr check_userspace restore_all restore_all_notrace restore_nocheck irq_return And also, I found some interrupt/exception entry points that cause similar problems. This patch moves those symbols (including their container functions) to .kprobes.text section to prevent any kprobes probing. Signed-off-by: Masami Hiramatsu Cc: Frederic Weisbecker Cc: Ananth N Mavinakayanahalli Cc: Jim Keniston Cc: Ingo Molnar LKML-Reference: <20090908164755.24050.81182.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/entry_32.S | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c097e7d607c6..beb30da203d6 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -333,6 +333,10 @@ ENTRY(ret_from_fork) CFI_ENDPROC END(ret_from_fork) +/* + * Interrupt exit functions should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to @@ -383,6 +387,10 @@ need_resched: END(resume_kernel) #endif CFI_ENDPROC +/* + * End of kprobes section + */ + .popsection /* SYSENTER_RETURN points to after the "sysenter" instruction in the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ @@ -513,6 +521,10 @@ sysexit_audit: PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) +/* + * syscall stub including irq exit should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" # system call handler stub ENTRY(system_call) RING0_INT_FRAME # can't unwind into user space anyway @@ -705,6 +717,10 @@ syscall_badsys: jmp resume_userspace END(syscall_badsys) CFI_ENDPROC +/* + * End of kprobes section + */ + .popsection /* * System calls that need a pt_regs pointer. @@ -814,6 +830,10 @@ common_interrupt: ENDPROC(common_interrupt) CFI_ENDPROC +/* + * Irq entries should be protected against kprobes + */ + .pushsection .kprobes.text, "ax" #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ @@ -980,6 +1000,10 @@ ENTRY(spurious_interrupt_bug) jmp error_code CFI_ENDPROC END(spurious_interrupt_bug) +/* + * End of kprobes section + */ + .popsection ENTRY(kernel_thread_helper) pushl $0 # fake return address for unwinder -- cgit v1.2.3 From ad5cafcdb09c57008c990edd309c0a563b09f238 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 10 Sep 2009 19:53:06 -0400 Subject: x86/ptrace: Fix regs_get_argument_nth() to add correct offset Fix regs_get_argument_nth() to add correct offset bytes. Because offset_of() returns offset in byte, the offset should be added to char * instead of unsigned long *. Signed-off-by: Masami Hiramatsu Acked-by: Steven Rostedt Cc: Jim Keniston Cc: Ananth N Mavinakayanahalli Cc: Andi Kleen Cc: Christoph Hellwig Cc: Frank Ch. Eigler Cc: Frederic Weisbecker Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Jason Baron Cc: K.Prasad Cc: Lai Jiangshan Cc: Li Zefan Cc: Peter Zijlstra Cc: Srikar Dronamraju Cc: Steven Rostedt Cc: Tom Zanussi LKML-Reference: <20090910235306.22412.31613.stgit@dhcp-100-2-132.bos.redhat.com> Signed-off-by: Frederic Weisbecker --- arch/x86/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index a33a17d5d5c8..caffb6809452 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -150,7 +150,7 @@ static const int arg_offs_table[] = { unsigned long regs_get_argument_nth(struct pt_regs *regs, unsigned int n) { if (n < ARRAY_SIZE(arg_offs_table)) - return *((unsigned long *)regs + arg_offs_table[n]); + return *(unsigned long *)((char *)regs + arg_offs_table[n]); else { /* * The typical case: arg n is on the stack. -- cgit v1.2.3 From b8a4754147d61f5359a765a3afd3eb03012aa052 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 30 Jul 2009 11:10:02 +0200 Subject: x86, msr: Unify rdmsr_on_cpus/wrmsr_on_cpus Since rdmsr_on_cpus and wrmsr_on_cpus are almost identical, unify them into a common __rwmsr_on_cpus helper thus avoiding code duplication. While at it, convert cpumask_t's to const struct cpumask *. Signed-off-by: Borislav Petkov Signed-off-by: H. Peter Anvin Signed-off-by: Ingo Molnar --- arch/x86/include/asm/msr.h | 4 ++-- arch/x86/lib/msr.c | 46 +++++++++++++++++++--------------------------- 2 files changed, 21 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7e2b6ba962ff..9a00219b331a 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -247,8 +247,8 @@ do { \ #ifdef CONFIG_SMP int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); -void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); -void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs); int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 33a1e3ca22d8..41628b104b9e 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -71,14 +71,9 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) } EXPORT_SYMBOL(wrmsr_on_cpu); -/* rdmsr on a bunch of CPUs - * - * @mask: which CPUs - * @msr_no: which MSR - * @msrs: array of MSR values - * - */ -void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) +static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, + struct msr *msrs, + void (*msr_func) (void *info)) { struct msr_info rv; int this_cpu; @@ -92,11 +87,23 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) this_cpu = get_cpu(); if (cpumask_test_cpu(this_cpu, mask)) - __rdmsr_on_cpu(&rv); + msr_func(&rv); - smp_call_function_many(mask, __rdmsr_on_cpu, &rv, 1); + smp_call_function_many(mask, msr_func, &rv, 1); put_cpu(); } + +/* rdmsr on a bunch of CPUs + * + * @mask: which CPUs + * @msr_no: which MSR + * @msrs: array of MSR values + * + */ +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) +{ + __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); +} EXPORT_SYMBOL(rdmsr_on_cpus); /* @@ -107,24 +114,9 @@ EXPORT_SYMBOL(rdmsr_on_cpus); * @msrs: array of MSR values * */ -void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs) +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) { - struct msr_info rv; - int this_cpu; - - memset(&rv, 0, sizeof(rv)); - - rv.off = cpumask_first(mask); - rv.msrs = msrs; - rv.msr_no = msr_no; - - this_cpu = get_cpu(); - - if (cpumask_test_cpu(this_cpu, mask)) - __wrmsr_on_cpu(&rv); - - smp_call_function_many(mask, __wrmsr_on_cpu, &rv, 1); - put_cpu(); + __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); } EXPORT_SYMBOL(wrmsr_on_cpus); -- cgit v1.2.3 From 8bb014895547eeeb9aa61a654f24e41e15919304 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 16 Sep 2009 19:38:53 +0100 Subject: ASoC: Add S3C64xx IIS CDCLK source selection CDCLK can either be an output generated by the CPU, intended for use as the CODEC master clock, or an input (probably from the CODEC) providing a master clock for the IIS block. Signed-off-by: Mark Brown --- arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h index 07659dad1748..abf2fbc2eb2f 100644 --- a/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h +++ b/arch/arm/plat-s3c/include/plat/regs-s3c2412-iis.h @@ -67,6 +67,8 @@ #define S3C2412_IISMOD_BCLK_MASK (3 << 1) #define S3C2412_IISMOD_8BIT (1 << 0) +#define S3C64XX_IISMOD_CDCLKCON (1 << 12) + #define S3C2412_IISPSR_PSREN (1 << 15) #define S3C2412_IISFIC_TXFLUSH (1 << 15) -- cgit v1.2.3 From c1558b55c55620f54f0f78cb8295947a2a3f6de2 Mon Sep 17 00:00:00 2001 From: Alessandro Rubini Date: Sun, 20 Sep 2009 23:28:24 +0200 Subject: ARM Nomadik: use new OneNand name and pdata This enables the onenand with the new platform device name. Moreover, it fixes a related typo that doesn't deserve a separate patch. Signed-off-by: Alessandro Rubini Acked-by: Andrea Gallo Signed-off-by: David Woodhouse --- arch/arm/mach-nomadik/board-nhk8815.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c index 6bfd537d5afb..781b915819c8 100644 --- a/arch/arm/mach-nomadik/board-nhk8815.c +++ b/arch/arm/mach-nomadik/board-nhk8815.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -142,7 +143,7 @@ static struct mtd_partition nhk8815_onenand_partitions[] = { } }; -static struct flash_platform_data nhk8815_onenand_data = { +static struct onenand_platform_data nhk8815_onenand_data = { .parts = nhk8815_onenand_partitions, .nr_parts = ARRAY_SIZE(nhk8815_onenand_partitions), }; @@ -156,7 +157,7 @@ static struct resource nhk8815_onenand_resource[] = { }; static struct platform_device nhk8815_onenand_device = { - .name = "onenand", + .name = "onenand-flash", .id = -1, .dev = { .platform_data = &nhk8815_onenand_data, @@ -167,10 +168,10 @@ static struct platform_device nhk8815_onenand_device = { static void __init nhk8815_onenand_init(void) { -#ifdef CONFIG_ONENAND +#ifdef CONFIG_MTD_ONENAND /* Set up SMCS0 for OneNand */ - writel(0x000030db, FSMC_BCR0); - writel(0x02100551, FSMC_BTR0); + writel(0x000030db, FSMC_BCR(0)); + writel(0x02100551, FSMC_BTR(0)); #endif } -- cgit v1.2.3 From 9f0cf4adb6aa0bfccf675c938124e68f7f06349d Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Sat, 26 Sep 2009 14:33:01 +0200 Subject: x86: Use __builtin_object_size() to validate the buffer size for copy_from_user() gcc (4.x) supports the __builtin_object_size() builtin, which reports the size of an object that a pointer point to, when known at compile time. If the buffer size is not known at compile time, a constant -1 is returned. This patch uses this feature to add a sanity check to copy_from_user(); if the target buffer is known to be smaller than the copy size, the copy is aborted and a WARNing is emitted in memory debug mode. These extra checks compile away when the object size is not known, or if both the buffer size and the copy length are constants. Signed-off-by: Arjan van de Ven LKML-Reference: <20090926143301.2c396b94@infradead.org> Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess_32.h | 19 ++++++++++++++++++- arch/x86/include/asm/uaccess_64.h | 19 ++++++++++++++++++- arch/x86/kernel/x8664_ksyms_64.c | 2 +- arch/x86/lib/copy_user_64.S | 4 ++-- arch/x86/lib/usercopy_32.c | 4 ++-- 5 files changed, 41 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 632fb44b4cb5..582d6aef7417 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -187,9 +187,26 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n); -unsigned long __must_check copy_from_user(void *to, +unsigned long __must_check _copy_from_user(void *to, const void __user *from, unsigned long n); + +static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) +{ + int sz = __compiletime_object_size(to); + int ret = -EFAULT; + + if (likely(sz == -1 || sz >= n)) + ret = _copy_from_user(to, from, n); +#ifdef CONFIG_DEBUG_VM + else + WARN(1, "Buffer overflow detected!\n"); +#endif + return ret; +} + long __must_check strncpy_from_user(char *dst, const char __user *src, long count); long __must_check __strncpy_from_user(char *dst, diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index db24b215fc50..ce6fec7ce38d 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -21,10 +21,27 @@ copy_user_generic(void *to, const void *from, unsigned len); __must_check unsigned long copy_to_user(void __user *to, const void *from, unsigned len); __must_check unsigned long -copy_from_user(void *to, const void __user *from, unsigned len); +_copy_from_user(void *to, const void __user *from, unsigned len); __must_check unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len); +static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, + unsigned long n) +{ + int sz = __compiletime_object_size(to); + int ret = -EFAULT; + + if (likely(sz == -1 || sz >= n)) + ret = _copy_from_user(to, from, n); +#ifdef CONFIG_DEBUG_VM + else + WARN(1, "Buffer overflow detected!\n"); +#endif + return ret; +} + + static __always_inline __must_check int __copy_from_user(void *dst, const void __user *src, unsigned size) { diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index 3909e3ba5ce3..a0cdd8cc1d67 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c @@ -30,7 +30,7 @@ EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic); EXPORT_SYMBOL(__copy_user_nocache); -EXPORT_SYMBOL(copy_from_user); +EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(__copy_from_user_inatomic); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index 6ba0f7bb85ea..4be3c415b3e9 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -78,7 +78,7 @@ ENTRY(copy_to_user) ENDPROC(copy_to_user) /* Standard copy_from_user with segment limit checking */ -ENTRY(copy_from_user) +ENTRY(_copy_from_user) CFI_STARTPROC GET_THREAD_INFO(%rax) movq %rsi,%rcx @@ -88,7 +88,7 @@ ENTRY(copy_from_user) jae bad_from_user ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string CFI_ENDPROC -ENDPROC(copy_from_user) +ENDPROC(_copy_from_user) ENTRY(copy_user_generic) CFI_STARTPROC diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1f118d462acc..8498684e45b0 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -874,7 +874,7 @@ EXPORT_SYMBOL(copy_to_user); * data to the requested size using zero bytes. */ unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) +_copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) n = __copy_from_user(to, from, n); @@ -882,4 +882,4 @@ copy_from_user(void *to, const void __user *from, unsigned long n) memset(to, 0, n); return n; } -EXPORT_SYMBOL(copy_from_user); +EXPORT_SYMBOL(_copy_from_user); -- cgit v1.2.3 From 2ce4da2efcaca0dcbfed7a1f24177f18e75e0e89 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 20:42:10 -0700 Subject: sparc: Support HW cache events. First supported chip for HW cache events is Ultra-IIIi. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 145 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 139 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2d6a1b10c81d..48375f694673 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -68,8 +68,19 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +#define C(x) PERF_COUNT_HW_CACHE_##x + +#define CACHE_OP_UNSUPPORTED 0xfffe +#define CACHE_OP_NONSENSE 0xffff + +typedef struct perf_event_map cache_map_t + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + struct sparc_pmu { const struct perf_event_map *(*event_map)(int); + const cache_map_t *cache_map; int max_events; int upper_shift; int lower_shift; @@ -92,8 +103,96 @@ static const struct perf_event_map *ultra3i_event_map(int event_id) return &ultra3i_perfmon_event_map[event_id]; } +static const cache_map_t ultra3i_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, + [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + static const struct sparc_pmu ultra3i_pmu = { .event_map = ultra3i_event_map, + .cache_map = &ultra3i_cache_map, .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, @@ -375,6 +474,37 @@ void perf_event_release_pmc(void) } } +static const struct perf_event_map *sparc_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct perf_event_map *pmap; + + if (!sparc_pmu->cache_map) + return ERR_PTR(-ENOENT); + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); + + if (pmap->encoding == CACHE_OP_UNSUPPORTED) + return ERR_PTR(-ENOENT); + + if (pmap->encoding == CACHE_OP_NONSENSE) + return ERR_PTR(-EINVAL); + + return pmap; +} + static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); @@ -390,12 +520,17 @@ static int __hw_perf_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; - if (attr->type != PERF_TYPE_HARDWARE) + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + pmap = sparc_pmu->event_map(attr->config); + } else if (attr->type == PERF_TYPE_HW_CACHE) { + pmap = sparc_map_cache_event(attr->config); + if (IS_ERR(pmap)) + return PTR_ERR(pmap); + } else return -EOPNOTSUPP; - if (attr->config >= sparc_pmu->max_events) - return -EINVAL; - perf_event_grab_pmc(); event->destroy = hw_perf_event_destroy; @@ -417,8 +552,6 @@ static int __hw_perf_event_init(struct perf_event *event) atomic64_set(&hwc->period_left, hwc->sample_period); } - pmap = sparc_pmu->event_map(attr->config); - enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; -- cgit v1.2.3 From 28e8f9bead060aafc630a4256d23e2a55fb8b97d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 20:54:22 -0700 Subject: sparc: Support all ultra3 and ultra4 derivatives. For the generic events we support, all of these chips have the same encodings as ultra3i. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 48375f694673..8abdc4d1baa5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -91,19 +91,19 @@ struct sparc_pmu { int lower_nop; }; -static const struct perf_event_map ultra3i_perfmon_event_map[] = { +static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; -static const struct perf_event_map *ultra3i_event_map(int event_id) +static const struct perf_event_map *ultra3_event_map(int event_id) { - return &ultra3i_perfmon_event_map[event_id]; + return &ultra3_perfmon_event_map[event_id]; } -static const cache_map_t ultra3i_cache_map = { +static const cache_map_t ultra3_cache_map = { [C(L1D)] = { [C(OP_READ)] = { [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, @@ -190,10 +190,10 @@ static const cache_map_t ultra3i_cache_map = { }, }; -static const struct sparc_pmu ultra3i_pmu = { - .event_map = ultra3i_event_map, - .cache_map = &ultra3i_cache_map, - .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), +static const struct sparc_pmu ultra3_pmu = { + .event_map = ultra3_event_map, + .cache_map = &ultra3_cache_map, + .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, @@ -658,8 +658,11 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { static bool __init supported_pmu(void) { - if (!strcmp(sparc_pmu_type, "ultra3i")) { - sparc_pmu = &ultra3i_pmu; + if (!strcmp(sparc_pmu_type, "ultra3") || + !strcmp(sparc_pmu_type, "ultra3+") || + !strcmp(sparc_pmu_type, "ultra3i") || + !strcmp(sparc_pmu_type, "ultra4+")) { + sparc_pmu = &ultra3_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2")) { -- cgit v1.2.3 From d0b86480f5b33f4a86d7c106706d6e0dcd1935ce Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 21:04:16 -0700 Subject: sparc: Add Niagara2 HW cache event support. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 88 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 8abdc4d1baa5..6f01e04cc323 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -215,8 +215,96 @@ static const struct perf_event_map *niagara2_event_map(int event_id) return &niagara2_perfmon_event_map[event_id]; } +static const cache_map_t niagara2_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, + [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, + .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .upper_shift = 19, .lower_shift = 6, -- cgit v1.2.3 From 7eebda60d57a0862a410f45122c73b8bbe6e260c Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sat, 26 Sep 2009 21:23:41 -0700 Subject: sparc: Niagara1 perf event support. This chip is extremely limited, and many of the events supported are approximations at best. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 119 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6f01e04cc323..9541b456c3ee 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -201,6 +201,121 @@ static const struct sparc_pmu ultra3_pmu = { .lower_nop = 0x14, }; +/* Niagara1 is very limited. The upper PIC is hard-locked to count + * only instructions, so it is free running which creates all kinds of + * problems. Some hardware designs make one wonder if the creastor + * even looked at how this stuff gets used by software. + */ +static const struct perf_event_map niagara1_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, + [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, +}; + +static const struct perf_event_map *niagara1_event_map(int event_id) +{ + return &niagara1_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara1_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, + [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static const struct sparc_pmu niagara1_pmu = { + .event_map = niagara1_event_map, + .cache_map = &niagara1_cache_map, + .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), + .upper_shift = 0, + .lower_shift = 4, + .event_mask = 0x7, + .upper_nop = 0x0, + .lower_nop = 0x0, +}; + static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, @@ -753,6 +868,10 @@ static bool __init supported_pmu(void) sparc_pmu = &ultra3_pmu; return true; } + if (!strcmp(sparc_pmu_type, "niagara")) { + sparc_pmu = &niagara1_pmu; + return true; + } if (!strcmp(sparc_pmu_type, "niagara2")) { sparc_pmu = &niagara2_pmu; return true; -- cgit v1.2.3 From d6b58772dc39262629708e5f3c30ef06de290894 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 26 Apr 2009 23:53:34 -0400 Subject: parisc: tracehook_report_syscall This makes parisc use the standard tracehook_report_syscall_entry and tracehook_report_syscall_exit hooks in . To do this, we need to access current->thread.regs, and to know whether we're entering or exiting the syscall, so add this to syscall_trace. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/ptrace.c | 23 +++++++++++------------ arch/parisc/kernel/syscall.S | 4 +++- 2 files changed, 14 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 927db3668b6f..2118ed02e886 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -264,21 +265,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, #endif -void syscall_trace(void) +void syscall_trace(int why) { + struct pt_regs *regs = ¤t->thread.regs; + if (!test_thread_flag(TIF_SYSCALL_TRACE)) return; - if (!(current->ptrace & PT_PTRACED)) - return; - ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) - ? 0x80 : 0)); /* - * this isn't the same as continuing with a signal, but it will do - * for normal use. strace only continues with a signal if the - * stopping signal is not SIGTRAP. -brl + * Report the system call for tracing. Entry tracing can + * decide to abort the call. We handle that by setting an + * invalid syscall number (-1) to force an ENOSYS error. */ - if (current->exit_code) { - send_sig(current->exit_code, current, 1); - current->exit_code = 0; - } + if (why) + tracehook_report_syscall_exit(regs, 0); + else if (tracehook_report_syscall_entry(regs)) + regs->gr[20] = -1; /* force ENOSYS */ } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 59fc1a43ec3e..5698668166cd 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -288,6 +288,7 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ + ldi 0,%r26 /* syscall entry */ ldil L%syscall_trace,%r1 ldil L%tracesys_next,%r2 be R%syscall_trace(%sr7,%r1) @@ -336,6 +337,7 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif + ldi 1,%r26 /* syscall exit */ bl syscall_trace, %r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ @@ -358,7 +360,7 @@ tracesys_sigexit: ldo -16(%r30),%r29 /* Reference param save area */ #endif bl syscall_trace, %r2 - nop + ldi 1,%r26 /* syscall exit */ ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) -- cgit v1.2.3 From ecf02de5a1491592d1b68d8095ff62ea3aaee67e Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Mon, 27 Apr 2009 00:29:53 -0400 Subject: parisc: tracehook_signal_handler This makes parisc call the standard tracehook_signal_handler hook in after setting up a signal handler. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/signal.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 8eb3c63c407a..c5f3d823d42b 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -468,6 +469,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); + + tracehook_signal_handler(sig, info, ka, regs, 0); + return 1; } -- cgit v1.2.3 From 5837d42f8ae9874fd93928c6784b2224f0edd4c9 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 14:39:58 -0400 Subject: parisc: add missing TI_TASK macro in syscall.S LDREG 0(%r1),%r1 really wants to be accessing thread_info.task, instead of hardcoding the 0. Signed-off-by: Kyle McMartin --- arch/parisc/kernel/syscall.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 5698668166cd..07de1bbb69a5 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -355,7 +355,7 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG 0(%r1), %r1 + LDREG TI_TASK(%r1), %r1 #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif -- cgit v1.2.3 From 2798af1abb5d560b926fd07ec58c5dcc6d3484d8 Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 14:36:16 -0400 Subject: parisc: split syscall_trace into two halves Instead of fiddling with gr[20], restructure code to return whether or not to -ENOSYS. (Also do a bit of fiddling to let them take pt_regs directly instead of re-computing it.) Signed-off-by: Kyle McMartin --- arch/parisc/kernel/ptrace.c | 25 ++++++++++++------------- arch/parisc/kernel/syscall.S | 22 +++++++++++++--------- 2 files changed, 25 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2118ed02e886..08f6d2cbf0e0 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -264,20 +264,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } #endif +long do_syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + tracehook_report_syscall_entry(regs)) + return -1L; + + return regs->gr[20]; +} -void syscall_trace(int why) +void do_syscall_trace_exit(struct pt_regs *regs) { - struct pt_regs *regs = ¤t->thread.regs; + int stepping = !!(current->ptrace & (PT_SINGLESTEP|PT_BLOCKSTEP)); - if (!test_thread_flag(TIF_SYSCALL_TRACE)) - return; - /* - * Report the system call for tracing. Entry tracing can - * decide to abort the call. We handle that by setting an - * invalid syscall number (-1) to force an ENOSYS error. - */ - if (why) - tracehook_report_syscall_exit(regs, 0); - else if (tracehook_report_syscall_entry(regs)) - regs->gr[20] = -1; /* force ENOSYS */ + if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall_exit(regs, stepping); } diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 07de1bbb69a5..f5f96021caa0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -288,19 +288,23 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ - ldi 0,%r26 /* syscall entry */ - ldil L%syscall_trace,%r1 + copy %r2,%r26 + ldil L%do_syscall_trace_enter,%r1 ldil L%tracesys_next,%r2 - be R%syscall_trace(%sr7,%r1) + be R%do_syscall_trace_enter(%sr7,%r1) ldo R%tracesys_next(%r2),%r2 -tracesys_next: +tracesys_next: + /* do_syscall_trace_enter either returned the syscallno, or -1L, + * so we skip restoring the PT_GR20 below, since we pulled it from + * task->thread.regs.gr[20] above. + */ + copy %ret0,%r20 ldil L%sys_call_table,%r1 ldo R%sys_call_table(%r1), %r19 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 - LDREG TASK_PT_GR20(%r1), %r20 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 @@ -337,8 +341,8 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldi 1,%r26 /* syscall exit */ - bl syscall_trace, %r2 + ldo TASK_REGS(%r1),%r26 + bl do_syscall_trace_exit,%r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -359,8 +363,8 @@ tracesys_sigexit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl syscall_trace, %r2 - ldi 1,%r26 /* syscall exit */ + bl do_syscall_trace_exit,%r2 + ldo TASK_REGS(%r1),%r26 ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) -- cgit v1.2.3 From ecd3d4bc06e48357d12e730482577c756a9f2dbc Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 27 Sep 2009 23:03:02 -0400 Subject: parisc: stop using task->ptrace for {single,block}step flags task->ptrace flags belong to generic code, so instead thief some TIF_ bits to use. Somewhat risky conversion of code to test TASK_FLAGS instead of TASK_PTRACE in assembly, but it looks alright in the end. Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/thread_info.h | 4 ++++ arch/parisc/kernel/asm-offsets.c | 4 ++-- arch/parisc/kernel/entry.S | 21 +++++++++++---------- arch/parisc/kernel/ptrace.c | 14 ++++++++------ 4 files changed, 25 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index ac775a76bff7..0399548c5234 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -60,6 +60,8 @@ struct thread_info { #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ #define TIF_FREEZE 7 /* is freezing for suspend */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ +#define TIF_SINGLESTEP 9 /* single stepping? */ +#define TIF_BLOCKSTEP 10 /* branch stepping? */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -69,6 +71,8 @@ struct thread_info { #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) +#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 699cf8ef2118..fcd3c707bf12 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -270,8 +270,8 @@ int main(void) DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); BLANK(); - DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); - DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); + DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); + DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 8c4712b74dc1..3a44f7f704fa 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -2047,12 +2047,13 @@ syscall_do_signal: b,n syscall_check_sig syscall_restore: - /* Are we being ptraced? */ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 - ldw TASK_PTRACE(%r1), %r19 - bb,< %r19,31,syscall_restore_rfi - nop + /* Are we being ptraced? */ + ldw TASK_FLAGS(%r1),%r19 + ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 + and,COND(=) %r19,%r2,%r0 + b,n syscall_restore_rfi ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ rest_fp %r19 @@ -2113,16 +2114,16 @@ syscall_restore_rfi: ldi 0x0b,%r20 /* Create new PSW */ depi -1,13,1,%r20 /* C, Q, D, and I bits */ - /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are - * set in include/linux/ptrace.h and converted to PA bitmap + /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are + * set in thread_info.h and converted to PA bitmap * numbers in asm-offsets.c */ - /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ - extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 + /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ + extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 depi -1,27,1,%r20 /* R bit */ - /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ - extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 + /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ + extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ STREG %r20,TASK_PT_PSW(%r1) diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 08f6d2cbf0e0..c4f49e45129d 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -36,7 +36,8 @@ */ void ptrace_disable(struct task_struct *task) { - task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); /* make sure the trap bits are not set */ pa_psw(task)->r = 0; @@ -56,8 +57,8 @@ void user_disable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { - task->ptrace &= ~PT_BLOCKSTEP; - task->ptrace |= PT_SINGLESTEP; + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); + set_tsk_thread_flag(task, TIF_SINGLESTEP); if (pa_psw(task)->n) { struct siginfo si; @@ -99,8 +100,8 @@ void user_enable_single_step(struct task_struct *task) void user_enable_block_step(struct task_struct *task) { - task->ptrace &= ~PT_SINGLESTEP; - task->ptrace |= PT_BLOCKSTEP; + clear_tsk_thread_flag(task, TIF_SINGLESTEP); + set_tsk_thread_flag(task, TIF_BLOCKSTEP); /* Enable taken branch trap. */ pa_psw(task)->r = 0; @@ -275,7 +276,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) void do_syscall_trace_exit(struct pt_regs *regs) { - int stepping = !!(current->ptrace & (PT_SINGLESTEP|PT_BLOCKSTEP)); + int stepping = test_thread_flag(TIF_SINGLESTEP) || + test_thread_flag(TIF_BLOCKSTEP); if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, stepping); -- cgit v1.2.3 From 40e03b6857051ea61ebdb4a53e65485acfb1d98e Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 5 Jul 2009 15:59:56 -0400 Subject: parisc: add skeleton syscall.h Needed for lib/syscall.c when HAVE_ARCH_TRACEHOOK. Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/ptrace.h | 5 ++++- arch/parisc/include/asm/syscall.h | 40 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 arch/parisc/include/asm/syscall.h (limited to 'arch') diff --git a/arch/parisc/include/asm/ptrace.h b/arch/parisc/include/asm/ptrace.h index 302f68dc889c..aead40b16dd8 100644 --- a/arch/parisc/include/asm/ptrace.h +++ b/arch/parisc/include/asm/ptrace.h @@ -59,8 +59,11 @@ void user_enable_block_step(struct task_struct *task); #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) +#define user_stack_pointer(regs) ((regs)->gr[30]) unsigned long profile_pc(struct pt_regs *); extern void show_regs(struct pt_regs *); -#endif + + +#endif /* __KERNEL__ */ #endif diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h new file mode 100644 index 000000000000..8bdfd2c8c39f --- /dev/null +++ b/arch/parisc/include/asm/syscall.h @@ -0,0 +1,40 @@ +/* syscall.h */ + +#ifndef _ASM_PARISC_SYSCALL_H_ +#define _ASM_PARISC_SYSCALL_H_ + +#include +#include + +static inline long syscall_get_nr(struct task_struct *tsk, + struct pt_regs *regs) +{ + return regs->gr[20]; +} + +static inline void syscall_get_arguments(struct task_struct *tsk, + struct pt_regs *regs, unsigned int i, + unsigned int n, unsigned long *args) +{ + BUG_ON(i); + + switch (n) { + case 6: + args[5] = regs->gr[21]; + case 5: + args[4] = regs->gr[22]; + case 4: + args[3] = regs->gr[23]; + case 3: + args[2] = regs->gr[24]; + case 2: + args[1] = regs->gr[25]; + case 1: + args[0] = regs->gr[26]; + break; + default: + BUG(); + } +} + +#endif /*_ASM_PARISC_SYSCALL_H_*/ -- cgit v1.2.3 From 81bf550d9cdfe0325eb1504b06c9f6511b442c1a Mon Sep 17 00:00:00 2001 From: Kyle McMartin Date: Sun, 27 Sep 2009 23:04:47 -0400 Subject: parisc: HAVE_ARCH_TRACEHOOK Let 'er rip. Signed-off-by: Kyle McMartin --- arch/parisc/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index f388dc68f605..524d9352f17e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -18,6 +18,7 @@ config PARISC select BUG select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT + select HAVE_ARCH_TRACEHOOK help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, -- cgit v1.2.3 From 530e949cc99ec6afba5207cbcd5f45c8cc8accc6 Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 22 Sep 2009 13:35:32 +0000 Subject: parisc: includecheck fix: signal.c fix the following 'make includecheck' warning: arch/parisc/kernel/signal.c: linux/compat.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Signed-off-by: Kyle McMartin --- arch/parisc/kernel/signal.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index c5f3d823d42b..e8467e4aa8d1 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -35,7 +35,6 @@ #include #ifdef CONFIG_COMPAT -#include #include "signal32.h" #endif -- cgit v1.2.3 From 4255f0d2a132fb38dbe5b5ad74e27ba472507415 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sun, 27 Sep 2009 23:26:01 -0400 Subject: parisc: rename parisc's vmalloc_start to parisc_vmalloc_start building kernel 2.6.32(pre), gives this compiler warning: /linus-linux-2.6/mm/vmalloc.c: In function 'pcpu_get_vm_areas': /linus-linux-2.6/mm/vmalloc.c:2104: warning: 'vmalloc_start' is used uninitialized in this function The reason is, that the code in mm/vmalloc defines a local variable called vmalloc_start, which is already defined as global variable in parisc's code. To avoid this kind of problems in future, I suggest to rename the parisc variable to parisc_vmalloc_start. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/fixmap.h | 4 ++-- arch/parisc/mm/init.c | 11 ++++++----- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index de3fe3a18229..6fec4d4a1a18 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -21,9 +21,9 @@ #define KERNEL_MAP_END (TMPALIAS_MAP_START) #ifndef __ASSEMBLY__ -extern void *vmalloc_start; +extern void *parisc_vmalloc_start; #define PCXL_DMA_MAP_SIZE (8*1024*1024) -#define VMALLOC_START ((unsigned long)vmalloc_start) +#define VMALLOC_START ((unsigned long)parisc_vmalloc_start) #define VMALLOC_END (KERNEL_MAP_END) #endif /*__ASSEMBLY__*/ diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index d5aca31fddbb..13b6e3e59b99 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -434,8 +434,8 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *vmalloc_start __read_mostly; -EXPORT_SYMBOL(vmalloc_start); +void *parisc_vmalloc_start __read_mostly; +EXPORT_SYMBOL(parisc_vmalloc_start); #ifdef CONFIG_PA11 unsigned long pcxl_dma_start __read_mostly; @@ -496,13 +496,14 @@ void __init mem_init(void) #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); - vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); + parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + + PCXL_DMA_MAP_SIZE); } else { pcxl_dma_start = 0; - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); } #else - vmalloc_start = SET_MAP_OFFSET(MAP_START); + parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", -- cgit v1.2.3 From fe579c69c6d437009460feeb4d748c2f2020a5f2 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 4 Aug 2009 22:27:07 +0200 Subject: parisc: correct use of SHF_ALLOC SHF_ALLOC is suitable for testing against the sh_flags field, not the sh_type field. Signed-off-by: Julia Lawall Signed-off-by: Kyle McMartin --- arch/parisc/kernel/module.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 61ee0eec4e69..212074653df7 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, * ourselves */ for (i = 1; i < hdr->e_shnum; i++) { if(sechdrs[i].sh_type == SHT_SYMTAB - && (sechdrs[i].sh_type & SHF_ALLOC)) { + && (sechdrs[i].sh_flags & SHF_ALLOC)) { int strindex = sechdrs[i].sh_link; /* FIXME: AWFUL HACK * The cast is to drop the const from -- cgit v1.2.3 From f32ed3954c5e365907738b1206e849b6bbe9bcef Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Sun, 27 Sep 2009 17:25:08 -0400 Subject: parisc: Make THREAD_SIZE available to assembly files and linker scripts. Signed-off-by: Tim Abbott Acked-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/thread_info.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 0399548c5234..7ecc1039cfed 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -32,6 +32,11 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) +/* how to get the thread information struct from C */ +#define current_thread_info() ((struct thread_info *)mfctl(30)) + +#endif /* !__ASSEMBLY */ + /* thread information allocation */ #define THREAD_SIZE_ORDER 2 @@ -40,11 +45,6 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) -/* how to get the thread information struct from C */ -#define current_thread_info() ((struct thread_info *)mfctl(30)) - -#endif /* !__ASSEMBLY */ - #define PREEMPT_ACTIVE_BIT 28 #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) -- cgit v1.2.3 From 01552f765cae873d0ea3cca1e64e41dfd62659e6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 27 Sep 2009 20:43:07 -0700 Subject: sparc64: Add initial perf event conflict resolution and checks. Cribbed from powerpc code, as usual. :-) Currently it is only used to validate that all counters have the same user/kernel/hv attributes. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 82 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 9541b456c3ee..919952498155 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -713,12 +713,66 @@ static void hw_perf_event_destroy(struct perf_event *event) perf_event_release_pmc(); } +static int check_excludes(struct perf_event **evts, int n_prev, int n_new) +{ + int eu = 0, ek = 0, eh = 0; + struct perf_event *event; + int i, n, first; + + n = n_prev + n_new; + if (n <= 1) + return 0; + + first = 1; + for (i = 0; i < n; i++) { + event = evts[i]; + if (first) { + eu = event->attr.exclude_user; + ek = event->attr.exclude_kernel; + eh = event->attr.exclude_hv; + first = 0; + } else if (event->attr.exclude_user != eu || + event->attr.exclude_kernel != ek || + event->attr.exclude_hv != eh) { + return -EAGAIN; + } + } + + return 0; +} + +static int collect_events(struct perf_event *group, int max_count, + struct perf_event *evts[], u64 *events) +{ + struct perf_event *event; + int n = 0; + + if (!is_software_event(group)) { + if (n >= max_count) + return -1; + evts[n] = group; + events[n++] = group->hw.config; + } + list_for_each_entry(event, &group->sibling_list, group_entry) { + if (!is_software_event(event) && + event->state != PERF_EVENT_STATE_OFF) { + if (n >= max_count) + return -1; + evts[n] = event; + events[n++] = event->hw.config; + } + } + return n; +} + static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; + struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; const struct perf_event_map *pmap; - u64 enc; + u64 enc, events[MAX_HWEVENTS]; + int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; @@ -734,9 +788,6 @@ static int __hw_perf_event_init(struct perf_event *event) } else return -EOPNOTSUPP; - perf_event_grab_pmc(); - event->destroy = hw_perf_event_destroy; - /* We save the enable bits in the config_base. So to * turn off sampling just write 'config', and to enable * things write 'config | config_base'. @@ -749,13 +800,34 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; + enc = pmap->encoding; + + n = 0; + if (event->group_leader != event) { + n = collect_events(event->group_leader, + perf_max_events - 1, + evts, events); + if (n < 0) + return -EINVAL; + } + events[n] = enc; + evts[n] = event; + + if (check_excludes(evts, n, 1)) + return -EINVAL; + + /* Try to do all error checking before this point, as unwinding + * state after grabbing the PMC is difficult. + */ + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); } - enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; enc <<= sparc_pmu->upper_shift; -- cgit v1.2.3 From 9609bfec6d869bc0d82ccfb909d5e72b7002d813 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Aug 2009 14:54:41 +0200 Subject: parisc: convert to asm-generic/hardirq.h Signed-off-by: Christoph Hellwig Signed-off-by: Kyle McMartin --- arch/parisc/include/asm/hardirq.h | 20 +------------------- arch/parisc/kernel/irq.c | 5 ----- 2 files changed, 1 insertion(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index ce93133d5112..0d68184a76cb 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h @@ -1,29 +1,11 @@ /* hardirq.h: PA-RISC hard IRQ support. * * Copyright (C) 2001 Matthew Wilcox - * - * The locking is really quite interesting. There's a cpu-local - * count of how many interrupts are being handled, and a global - * lock. An interrupt can only be serviced if the global lock - * is free. You can't be sure no more interrupts are being - * serviced until you've acquired the lock and then checked - * all the per-cpu interrupt counts are all zero. It's a specialised - * br_lock, and that's exactly how Sparc does it. We don't because - * it's more locking for us. This way is lock-free in the interrupt path. */ #ifndef _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H -#include -#include - -typedef struct { - unsigned long __softirq_pending; /* set_bit is used on this */ -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -void ack_bad_irq(unsigned int irq); +#include #endif /* _PARISC_HARDIRQ_H */ diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 330f536a9324..2e7610cb33d5 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -423,8 +423,3 @@ void __init init_IRQ(void) set_eiem(cpu_eiem); /* EIEM : enable all external intr */ } - -void ack_bad_irq(unsigned int irq) -{ - printk(KERN_WARNING "unexpected IRQ %d\n", irq); -} -- cgit v1.2.3 From 8cf06fc9bd660c28afea4115fd91a2cc4978eb54 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Mon, 28 Sep 2009 00:39:47 +0200 Subject: parisc: Fix linker script breakage. Signed-off-by: Helge Deller Signed-off-by: Kyle McMartin --- arch/parisc/kernel/vmlinux.lds.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 775be2791bc2..fda4baa059b5 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -28,6 +28,7 @@ #include #include #include +#include /* ld script to make hppa Linux kernel */ #ifndef CONFIG_64BIT @@ -134,6 +135,15 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(16384) INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); -- cgit v1.2.3 From 48ff3e04ffd5e1b578462eb1958f15ca122c7fbf Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:04:04 +0900 Subject: sh: Handle ioport_map() cases for >= P1SEG addresses. This fixes up the case where certain drivers already do their own remapping and subsequently attempt to use the PIO calls for I/O. In this case there is no additional remapping that needs to be done, and the address can be casted in to the cookie directly. Signed-off-by: Paul Mundt --- arch/sh/kernel/io_generic.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/io_generic.c b/arch/sh/kernel/io_generic.c index 4ff507239286..b8fa6524760a 100644 --- a/arch/sh/kernel/io_generic.c +++ b/arch/sh/kernel/io_generic.c @@ -147,6 +147,9 @@ void generic_outsl(unsigned long port, const void *src, unsigned long count) void __iomem *generic_ioport_map(unsigned long addr, unsigned int size) { + if (PXSEG(addr) >= P1SEG) + return (void __iomem *)addr; + return (void __iomem *)(addr + generic_io_base); } -- cgit v1.2.3 From d44ee12ad61ff7aa7a6344560bd430cb72fcbc27 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:05:41 +0900 Subject: sh: Set the default I/O port base to P2SEG. This bumps up the default I/O base to P2SEG, which allows legacy probing to bail out gracefully rather than oopsing. Platforms that have a real PIO offset still need to fix this up on their own, although most platforms are content with P2SEG already. The previous change to teach ioport_map() about >= P1SEG offsets in combination with this patch allows both the already remapped and the legacy address probing to pass through and succeed. Fixes up an oops with i8042 on the sh7785lcr board. Signed-off-by: Paul Mundt --- arch/sh/kernel/machvec.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c index 548f6607fd0f..cbce639b108a 100644 --- a/arch/sh/kernel/machvec.c +++ b/arch/sh/kernel/machvec.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -133,4 +134,6 @@ void __init sh_mv_setup(void) if (!sh_mv.mv_nr_irqs) sh_mv.mv_nr_irqs = NR_IRQS; + + __set_io_port_base(P2SEG); } -- cgit v1.2.3 From 421f7a5dbdd1720c718eed187c46cf202529cf7d Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 26 Sep 2009 19:44:05 +0000 Subject: sh: Don't enable interrupts in the page fault path There's already code in do_page_fault() to conditionally enable interrupts, so we don't need to unconditonally enable them before calling it. This fixes a lockdep warning where we called trace_hardirqs_off() but with irqs still enabled. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 0151933e5253..bb407ef0b91e 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -152,8 +152,6 @@ call_do_page_fault: mov.l 1f, r0 mov.l @r0, r6 - sti - mov.l 3f, r0 mov.l 4f, r1 mov r15, r4 -- cgit v1.2.3 From fe1dbfd3f992d5432d7463a5bd6c2fc96d3eccd8 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 28 Sep 2009 15:22:10 +0900 Subject: sh: magicpanelr2 and dreamcast can use the generic I/O base. There is now no need for the magicpanelr2 and dreamcast platforms to set their own I/O port bas values, given that the generic machvec code now sets this to P2SEG for everyone. Signed-off-by: Paul Mundt --- arch/sh/boards/board-magicpanelr2.c | 2 -- arch/sh/boards/mach-dreamcast/setup.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/board-magicpanelr2.c b/arch/sh/boards/board-magicpanelr2.c index 0a37c8bfc959..99ffc5f1c0dd 100644 --- a/arch/sh/boards/board-magicpanelr2.c +++ b/arch/sh/boards/board-magicpanelr2.c @@ -205,8 +205,6 @@ static void __init setup_port_multiplexing(void) static void __init mpr2_setup(char **cmdline_p) { - __set_io_port_base(0xa0000000); - /* set Pin Select Register A: * /PCC_CD1, /PCC_CD2, PCC_BVD1, PCC_BVD2, * /IOIS16, IRQ4, IRQ5, USB1d_SUSPEND diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c index ebe99227d4e6..a4b7402d6176 100644 --- a/arch/sh/boards/mach-dreamcast/setup.c +++ b/arch/sh/boards/mach-dreamcast/setup.c @@ -42,8 +42,6 @@ static void __init dreamcast_setup(char **cmdline_p) /* Acknowledge any previous events */ /* XXX */ - __set_io_port_base(0xa0000000); - /* Assign all virtual IRQs to the System ASIC int. handler */ for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) set_irq_chip_and_handler(i, &systemasic_int, -- cgit v1.2.3 From 7f8b7170cdda6645ed4fec4b2d415bf4cebd7839 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 16:50:05 +0100 Subject: ARM: Fix section mismatch warning in Integrator pci_v3 WARNING: vmlinux.o(.text+0xc9d4): Section mismatch in reference from the function pci_v3_scan_bus() to the function .devinit.text:pci_scan_bus_parented() The function pci_v3_scan_bus() references the function __devinit pci_scan_bus_parented(). This is often because pci_v3_scan_bus lacks a __devinit annotation or the annotation of pci_scan_bus_parented is wrong. Signed-off-by: Russell King Acked-by: Sam Ravnborg --- arch/arm/mach-integrator/pci_v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index f1d72b225450..901cc205015e 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c @@ -486,7 +486,7 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys) return ret; } -struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *sys) +struct pci_bus * __init pci_v3_scan_bus(int nr, struct pci_sys_data *sys) { return pci_scan_bus(sys->busnr, &pci_v3_ops, sys); } -- cgit v1.2.3 From fac28e6df9b5fdfa0cf6b99eae022609dd1108d5 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 17:32:47 +0100 Subject: ARM: Fix SA11x0 clocksource warning 8e19608 missed updating SA11x0, and thus: arch/arm/mach-sa1100/time.c:88: warning: initialization from incompatible pointer type Signed-off-by: Russell King --- arch/arm/mach-sa1100/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index 95d92e8e56a8..b9cbb56d6e9d 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c @@ -77,7 +77,7 @@ static struct clock_event_device ckevt_sa1100_osmr0 = { .set_mode = sa1100_osmr0_set_mode, }; -static cycle_t sa1100_read_oscr(void) +static cycle_t sa1100_read_oscr(struct clocksource *s) { return OSCR; } -- cgit v1.2.3 From d80ade7b323152672bf66e74ec11c324332f6d1e Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 17:40:03 +0100 Subject: ARM: Fix warning: #warning syscall migrate_pages not implemented We're not implementing this syscall (we're not NUMA) so we might as well silence this warning. Signed-off-by: Russell King --- arch/arm/include/asm/unistd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 89f7eade20af..7020217fc49f 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -456,6 +456,7 @@ * Unimplemented (or alternatively implemented) syscalls */ #define __IGNORE_fadvise64_64 1 +#define __IGNORE_migrate_pages 1 #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ -- cgit v1.2.3 From 041d785f807c8e92e11cc1bd2b3438f7823e63d2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 17:40:42 +0100 Subject: ARM: Fix warning: unused variable 'highmem' Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4426ee67ceca..ce551ec2cb23 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -726,6 +726,8 @@ static void __init sanity_check_meminfo(void) bank->size = VMALLOC_MIN - __va(bank->start); } #else + bank->highmem = highmem; + /* * Check whether this memory bank would entirely overlap * the vmalloc area. -- cgit v1.2.3 From d5fc79cca2ec8d70200aee3c01cfc95fa35892ea Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Sat, 26 Sep 2009 20:20:06 +0530 Subject: ARM: Remove unused CONFIG SA1100_H3XXX Removed unused CONFIG SA1100_H3XXX from Kconfig and defconfig Reported-by: Robert P. J. Day Signed-off-by: Jaswinder Singh Rajput Acked-by: Dmitry Artamonow Cc: Philipp Zabel Cc: Kristoffer Ericson Signed-off-by: Russell King --- arch/arm/configs/h3600_defconfig | 1 - arch/arm/mach-sa1100/Kconfig | 5 ----- 2 files changed, 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig index 1502957db2c3..f6aed7747d4d 100644 --- a/arch/arm/configs/h3600_defconfig +++ b/arch/arm/configs/h3600_defconfig @@ -90,7 +90,6 @@ CONFIG_ARCH_SA1100=y # CONFIG_SA1100_COLLIE is not set # CONFIG_SA1100_H3100 is not set CONFIG_SA1100_H3600=y -CONFIG_SA1100_H3XXX=y # CONFIG_SA1100_BADGE4 is not set # CONFIG_SA1100_JORNADA720 is not set # CONFIG_SA1100_HACKKIT is not set diff --git a/arch/arm/mach-sa1100/Kconfig b/arch/arm/mach-sa1100/Kconfig index 81ffff7ed498..4e5c07f4e456 100644 --- a/arch/arm/mach-sa1100/Kconfig +++ b/arch/arm/mach-sa1100/Kconfig @@ -71,11 +71,6 @@ config SA1100_H3600 -config SA1100_H3XXX - bool - depends on SA1100_H3100 || SA1100_H3600 - default y - config SA1100_BADGE4 bool "HP Labs BadgePAD 4" select SA1111 -- cgit v1.2.3 From 9a0f6b4646a0acaf7f06fd2e02f10f303fe85d8f Mon Sep 17 00:00:00 2001 From: Jaswinder Singh Rajput Date: Tue, 22 Sep 2009 18:37:01 +0530 Subject: ARM: includecheck fix: mach-davinci, board-dm365-evm.c fix the following 'make includecheck' warning: arch/arm/mach-davinci/board-dm365-evm.c: mach/common.h is included more than once. Signed-off-by: Jaswinder Singh Rajput Acked-by: Kevin Hilman Signed-off-by: Russell King --- arch/arm/mach-davinci/board-dm365-evm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index a1d5e7dac741..52dd8046b305 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From e616c591405c168f6dc3dfd1221e105adfe49b8d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 20:55:43 +0100 Subject: ARM: Don't allow highmem on SMP platforms without h/w TLB ops broadcast We suffer an unfortunate combination of "features" which makes highmem support on platforms without hardware TLB maintainence broadcast difficult: - we need kmap_high_get() support for DMA cache coherence - this requires kmap_high() to take a spinlock with IRQs disabled - kmap_high() occasionally calls flush_all_zero_pkmaps() to clear out old mappings - flush_all_zero_pkmaps() calls flush_tlb_kernel_range(), which on s/w IPI'd systems eventually calls smp_call_function_many() - smp_call_function_many() must not be called with IRQs disabled: WARNING: at kernel/smp.c:380 smp_call_function_many+0xc4/0x240() Modules linked in: Backtrace: [] (dump_backtrace+0x0/0x108) from [] (dump_stack+0x18/0x1c) r6:c007cd18 r5:c02ff228 r4:0000017c [] (dump_stack+0x0/0x1c) from [] (warn_slowpath_common+0x50/0x80) [] (warn_slowpath_common+0x0/0x80) from [] (warn_slowpath_null+0x18/0x1c) r7:00000003 r6:00000001 r5:c1ff4000 r4:c035fa34 [] (warn_slowpath_null+0x0/0x1c) from [] (smp_call_function_many+0xc4/0x240) [] (smp_call_function_many+0x0/0x240) from [] (smp_call_function+0x2c/0x38) [] (smp_call_function+0x0/0x38) from [] (on_each_cpu+0x1c/0x38) [] (on_each_cpu+0x0/0x38) from [] (flush_tlb_kernel_range+0x50/0x58) r6:00000001 r5:00000800 r4:c05f3590 [] (flush_tlb_kernel_range+0x0/0x58) from [] (flush_all_zero_pkmaps+0xc0/0xe8) [] (flush_all_zero_pkmaps+0x0/0xe8) from [] (kmap_high+0x8c/0x1e0) [] (kmap_high+0x0/0x1e0) from [] (kmap+0x44/0x5c) [] (kmap+0x0/0x5c) from [] (cramfs_readpage+0x3c/0x194) [] (cramfs_readpage+0x0/0x194) from [] (__do_page_cache_readahead+0x1f0/0x290) [] (__do_page_cache_readahead+0x0/0x290) from [] (ra_submit+0x30/0x38) [] (ra_submit+0x0/0x38) from [] (filemap_fault+0x3dc/0x438) r4:c1819988 [] (filemap_fault+0x0/0x438) from [] (__do_fault+0x58/0x43c) [] (__do_fault+0x0/0x43c) from [] (handle_mm_fault+0x104/0x318) [] (handle_mm_fault+0x0/0x318) from [] (do_page_fault+0x188/0x1e4) [] (do_page_fault+0x0/0x1e4) from [] (do_translation_fault+0x7c/0x84) [] (do_translation_fault+0x0/0x84) from [] (do_DataAbort+0x40/0xa4) r8:c1ff5e20 r7:c0340120 r6:00000805 r5:c1ff5e54 r4:c03400d0 [] (do_DataAbort+0x0/0xa4) from [] (__dabt_svc+0x4c/0x60) ... So we disable highmem support on these systems. Signed-off-by: Russell King --- arch/arm/include/asm/smp_plat.h | 16 ++++++++++++++++ arch/arm/kernel/smp.c | 7 +------ arch/arm/mm/mmu.c | 37 +++++++++++++++++++++++++++++++++---- 3 files changed, 50 insertions(+), 10 deletions(-) create mode 100644 arch/arm/include/asm/smp_plat.h (limited to 'arch') diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h new file mode 100644 index 000000000000..59303e200845 --- /dev/null +++ b/arch/arm/include/asm/smp_plat.h @@ -0,0 +1,16 @@ +/* + * ARM specific SMP header, this contains our implementation + * details. + */ +#ifndef __ASMARM_SMP_PLAT_H +#define __ASMARM_SMP_PLAT_H + +#include + +/* all SMP configurations have the extended CPUID registers */ +static inline int tlb_ops_need_broadcast(void) +{ + return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; +} + +#endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e0d32770bb3d..9d015ee5747a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -36,6 +36,7 @@ #include #include #include +#include /* * as from 2.5, kernels no longer have an init_tasks structure @@ -586,12 +587,6 @@ struct tlb_args { unsigned long ta_end; }; -/* all SMP configurations have the extended CPUID registers */ -static inline int tlb_ops_need_broadcast(void) -{ - return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; -} - static inline void ipi_flush_tlb_all(void *ignored) { local_flush_tlb_all(); diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce551ec2cb23..02243eeccf50 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -709,10 +710,6 @@ static void __init sanity_check_meminfo(void) if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " "ignoring high memory\n"); - } else if (cache_is_vipt_aliasing()) { - printk(KERN_CRIT "HIGHMEM is not yet supported " - "with VIPT aliasing cache, " - "ignoring high memory\n"); } else { memmove(bank + 1, bank, (meminfo.nr_banks - i) * sizeof(*bank)); @@ -756,6 +753,38 @@ static void __init sanity_check_meminfo(void) #endif j++; } +#ifdef CONFIG_HIGHMEM + if (highmem) { + const char *reason = NULL; + + if (cache_is_vipt_aliasing()) { + /* + * Interactions between kmap and other mappings + * make highmem support with aliasing VIPT caches + * rather difficult. + */ + reason = "with VIPT aliasing cache"; +#ifdef CONFIG_SMP + } else if (tlb_ops_need_broadcast()) { + /* + * kmap_high needs to occasionally flush TLB entries, + * however, if the TLB entries need to be broadcast + * we may deadlock: + * kmap_high(irqs off)->flush_all_zero_pkmaps-> + * flush_tlb_kernel_range->smp_call_function_many + * (must not be called with irqs off) + */ + reason = "without hardware TLB ops broadcasting"; +#endif + } + if (reason) { + printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", + reason); + while (j > 0 && meminfo.bank[j - 1].highmem) + j--; + } + } +#endif meminfo.nr_banks = j; } -- cgit v1.2.3 From 90140c30a7b8c77e8872a389d48678d78e58789f Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 27 Sep 2009 21:04:48 +0100 Subject: ARM: Fix __cpuexit section mismatch warnings Fix: WARNING: vmlinux.o(.text+0x247c): Section mismatch in reference from the function cpu_idle() to the function .cpuexit.text:cpu_die() The function cpu_idle() references a function in an exit section. Often the function cpu_die() has valid usage outside the exit section and the fix is to remove the __cpuexit annotation of cpu_die. WARNING: vmlinux.o(.cpuexit.text+0x3c): Section mismatch in reference from the function cpu_die() to the function .cpuinit.text:secondary_start_kernel() The function __cpuexit cpu_die() references a function __cpuinit secondary_start_kernel(). This is often seen when error handling in the exit function uses functionality in the init path. The fix is often to remove the __cpuinit annotation of secondary_start_kernel() so it may be used outside an init section. Sam says: > The annotation of cpu_die() is wrong. > To be annotated __cpuexit the function shall: > - be used in exit context and only in exit context with HOTPLUG_CPU=n > - be used outside exit context with HOTPLUG_CPU=y So, this also means __cpu_disable(), __cpu_die() and twd_timer_stop() are also wrong. However, removing __cpuexit from cpu_die() creates: WARNING: vmlinux.o(.text+0x6834): Section mismatch in reference from the function cpu_die() to the function .cpuinit.text:secondary_start_kernel() The function cpu_die() references the function __cpuinit secondary_start_kernel(). This is often because cpu_die lacks a __cpuinit annotation or the annotation of secondary_start_kernel is wrong. so fix this using __ref. Signed-off-by: Russell King Acked-by: Sam Ravnborg --- arch/arm/kernel/smp.c | 6 +++--- arch/arm/kernel/smp_twd.c | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 9d015ee5747a..57162af53dc9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -154,7 +154,7 @@ int __cpuinit __cpu_up(unsigned int cpu) /* * __cpu_disable runs on the processor to be shutdown. */ -int __cpuexit __cpu_disable(void) +int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); struct task_struct *p; @@ -201,7 +201,7 @@ int __cpuexit __cpu_disable(void) * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ -void __cpuexit __cpu_die(unsigned int cpu) +void __cpu_die(unsigned int cpu) { if (!platform_cpu_kill(cpu)) printk("CPU%u: unable to kill\n", cpu); @@ -215,7 +215,7 @@ void __cpuexit __cpu_die(unsigned int cpu) * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ -void __cpuexit cpu_die(void) +void __ref cpu_die(void) { unsigned int cpu = smp_processor_id(); diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index d8c88c633c6f..a73a34dccf2a 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -166,10 +166,12 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk) clockevents_register_device(clk); } +#ifdef CONFIG_HOTPLUG_CPU /* * take a local timer down */ -void __cpuexit twd_timer_stop(void) +void twd_timer_stop(void) { __raw_writel(0, twd_base + TWD_TIMER_CONTROL); } +#endif -- cgit v1.2.3 From aa45ee8fc0ee87c1711b5fe8eb3556d06530c39e Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 28 Sep 2009 11:41:51 +0100 Subject: ARM: Ensure do_cache_op takes mmap_sem do_cache_op() uses find_vma() to validate its arguments without holding any locking. This means that the VMA could vanish beneath us. Fix this by taking a read lock on mmap_sem. Signed-off-by: Russell King --- arch/arm/kernel/traps.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 57eb0f6f6005..467b69ed1021 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -418,12 +418,14 @@ static int bad_syscall(int n, struct pt_regs *regs) static inline void do_cache_op(unsigned long start, unsigned long end, int flags) { + struct mm_struct *mm = current->active_mm; struct vm_area_struct *vma; if (end < start || flags) return; - vma = find_vma(current->active_mm, start); + down_read(&mm->mmap_sem); + vma = find_vma(mm, start); if (vma && vma->vm_start < end) { if (start < vma->vm_start) start = vma->vm_start; @@ -432,6 +434,7 @@ do_cache_op(unsigned long start, unsigned long end, int flags) flush_cache_user_range(vma, start, end); } + up_read(&mm->mmap_sem); } /* -- cgit v1.2.3 From 1b6b9d62475bdea00b366cb80b0fadba446f176d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 14:39:58 -0700 Subject: sparc64: Increase vmalloc size to fix percpu regressions. Since we now use the embedding percpu allocator we have to make the vmalloc area at least as large as the stretch can be between nodes. Besides some minor asm adjustments, this turned out to be pretty trivial. Signed-off-by: David S. Miller --- arch/sparc/include/asm/pgtable_64.h | 4 ++-- arch/sparc/kernel/ktlb.S | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 0ff92fa22064..f3cb790fa2ae 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -41,8 +41,8 @@ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define VMALLOC_START _AC(0x0000000100000000,UL) -#define VMALLOC_END _AC(0x0000000200000000,UL) -#define VMEMMAP_BASE _AC(0x0000000200000000,UL) +#define VMALLOC_END _AC(0x0000010000000000,UL) +#define VMEMMAP_BASE _AC(0x0000010000000000,UL) #define vmemmap ((struct page *)VMEMMAP_BASE) diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 3ea6e8cde8c5..1d361477d7d6 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: #ifdef CONFIG_SPARSEMEM_VMEMMAP /* Do not use the TSB for vmemmap. */ - mov (VMEMMAP_BASE >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMEMMAP_BASE >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4,%g5 bgeu,pn %xcc, kvmap_vmemmap nop @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 blu,pn %xcc, kvmap_dtlb_longpath - mov (VMALLOC_END >> 24), %g5 - sllx %g5, 24, %g5 + mov (VMALLOC_END >> 40), %g5 + sllx %g5, 40, %g5 cmp %g4, %g5 bgeu,pn %xcc, kvmap_dtlb_longpath nop -- cgit v1.2.3 From ff60fab71bb3b4fdbf8caf57ff3739ffd0887396 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Mon, 28 Sep 2009 14:21:22 +0200 Subject: x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy GCC provides reasonable memset/memcpy functions itself, with __builtin_memset and __builtin_memcpy. For the "unknown" cases, it'll fall back to our current existing functions, but for fixed size versions it'll inline something smart. Quite often that will be the same as we have now, but sometimes it can do something smarter (for example, if the code then sets the first member of a struct, it can do a shorter memset). In addition, and this is more important, gcc knows which registers and such are not clobbered (while for our asm version it pretty much acts like a compiler barrier), so for various cases it can avoid reloading values. The effect on codesize is shown below on my typical laptop .config: text data bss dec hex filename 5605675 2041100 6525148 14171923 d83f13 vmlinux.before 5595849 2041668 6525148 14162665 d81ae9 vmlinux.after Due to some not-so-good behavior in the gcc 3.x series, this change is only done for GCC 4.x and above. Signed-off-by: Arjan van de Ven LKML-Reference: <20090928142122.6fc57e9c@infradead.org> Signed-off-by: H. Peter Anvin --- arch/x86/include/asm/string_32.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index ae907e617181..3d3e8353ee5c 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -177,10 +177,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) */ #ifndef CONFIG_KMEMCHECK + +#if (__GNUC__ >= 4) +#define memcpy(t, f, n) __builtin_memcpy(t, f, n) +#else #define memcpy(t, f, n) \ (__builtin_constant_p((n)) \ ? __constant_memcpy((t), (f), (n)) \ : __memcpy((t), (f), (n))) +#endif #else /* * kmemcheck becomes very happy if we use the REP instructions unconditionally, @@ -316,11 +321,15 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, : __memset_generic((s), (c), (count))) #define __HAVE_ARCH_MEMSET +#if (__GNUC__ >= 4) +#define memset(s, c, count) __builtin_memset(s, c, count) +#else #define memset(s, c, count) \ (__builtin_constant_p(c) \ ? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \ (count)) \ : __memset((s), (c), (count))) +#endif /* * find the first occurrence of byte 'c', or 1 past the area if none -- cgit v1.2.3 From a72a8a5f2ea32074e98803d4b15d0e093c5a9e4d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 17:35:20 -0700 Subject: sparc64: Add a basic conflict engine in preparation for multi-counter support. The hardware counter ->event_base state records and encoding of the "struct perf_event_map" entry used for the event. We use this to make sure that when we have more than 1 event, both can be scheduled into the hardware at the same time. As usual, structure of code is largely cribbed from powerpc. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 69 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 64 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 919952498155..2b7743466ae4 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -68,6 +68,17 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +static unsigned long perf_event_encode(const struct perf_event_map *pmap) +{ + return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; +} + +static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) +{ + *msk = val & 0xff; + *enc = val >> 16; +} + #define C(x) PERF_COUNT_HW_CACHE_##x #define CACHE_OP_UNSUPPORTED 0xfffe @@ -713,6 +724,48 @@ static void hw_perf_event_destroy(struct perf_event *event) perf_event_release_pmc(); } +/* Make sure all events can be scheduled into the hardware at + * the same time. This is simplified by the fact that we only + * need to support 2 simultaneous HW events. + */ +static int sparc_check_constraints(unsigned long *events, int n_ev) +{ + if (n_ev <= perf_max_events) { + u8 msk1, msk2; + u16 dummy; + + if (n_ev == 1) + return 0; + BUG_ON(n_ev != 2); + perf_event_decode(events[0], &dummy, &msk1); + perf_event_decode(events[1], &dummy, &msk2); + + /* If both events can go on any counter, OK. */ + if (msk1 == (PIC_UPPER | PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If one event is limited to a specific counter, + * and the other can go on both, OK. + */ + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && + msk2 == (PIC_UPPER | PIC_LOWER)) + return 0; + if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) + return 0; + + /* If the events are fixed to different counters, OK. */ + if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || + (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) + return 0; + + /* Otherwise, there is a conflict. */ + } + + return -1; +} + static int check_excludes(struct perf_event **evts, int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; @@ -742,7 +795,7 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new) } static int collect_events(struct perf_event *group, int max_count, - struct perf_event *evts[], u64 *events) + struct perf_event *evts[], unsigned long *events) { struct perf_event *event; int n = 0; @@ -751,7 +804,7 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = group; - events[n++] = group->hw.config; + events[n++] = group->hw.event_base; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && @@ -759,7 +812,7 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = event; - events[n++] = event->hw.config; + events[n++] = event->hw.event_base; } } return n; @@ -770,8 +823,9 @@ static int __hw_perf_event_init(struct perf_event *event) struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; + unsigned long events[MAX_HWEVENTS]; const struct perf_event_map *pmap; - u64 enc, events[MAX_HWEVENTS]; + u64 enc; int n; if (atomic_read(&nmi_active) < 0) @@ -800,6 +854,8 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; + hwc->event_base = perf_event_encode(pmap); + enc = pmap->encoding; n = 0; @@ -810,12 +866,15 @@ static int __hw_perf_event_init(struct perf_event *event) if (n < 0) return -EINVAL; } - events[n] = enc; + events[n] = hwc->event_base; evts[n] = event; if (check_excludes(evts, n, 1)) return -EINVAL; + if (sparc_check_constraints(events, n + 1)) + return -EINVAL; + /* Try to do all error checking before this point, as unwinding * state after grabbing the PMC is difficult. */ -- cgit v1.2.3 From d29862f03575cdfa8819f78b0f3f78eec3b44629 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 28 Sep 2009 17:37:12 -0700 Subject: sparc64: Minor coding style fixups in perf code. These got introduced during the counter --> event tree-wide renaming. Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 2b7743466ae4..03b041c4d95c 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -464,8 +464,7 @@ static u64 nop_for_index(int idx) sparc_pmu->lower_nop, idx); } -static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, int idx) { u64 val, mask = mask_for_index(idx); @@ -473,8 +472,7 @@ static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, pcr_ops->write((val & ~mask) | hwc->config); } -static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, - int idx) +static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); @@ -555,7 +553,7 @@ static void write_pmc(int idx, u64 val) } static int sparc_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -607,7 +605,7 @@ static int sparc_pmu_enable(struct perf_event *event) } static u64 sparc_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; @@ -939,7 +937,7 @@ void perf_event_print_debug(void) } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) + unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; -- cgit v1.2.3 From 6e804251d119bbd5522d76bdb0f48f5c9a7abf51 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 29 Sep 2009 15:10:23 -0700 Subject: sparc64: Fix comment typo in perf_event.c Signed-off-by: David S. Miller --- arch/sparc/kernel/perf_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 03b041c4d95c..32fc974bf8b5 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -214,7 +214,7 @@ static const struct sparc_pmu ultra3_pmu = { /* Niagara1 is very limited. The upper PIC is hard-locked to count * only instructions, so it is free running which creates all kinds of - * problems. Some hardware designs make one wonder if the creastor + * problems. Some hardware designs make one wonder if the creator * even looked at how this stuff gets used by software. */ static const struct perf_event_map niagara1_perfmon_event_map[] = { -- cgit v1.2.3 From cd15e8c3313f2831d2d6c7951a93eb5de9621e5a Mon Sep 17 00:00:00 2001 From: Lennart Sorensen Date: Thu, 17 Sep 2009 11:49:41 -0400 Subject: m68knommu: show KiB rather than pages in "Freeing initrd memory:" message Fix "Freeing initrd memory:" message m68knommu to show kilobytes as claimed rather than number of pages. Signed-off-by: Lennart Sorensen Signed-off-by: Greg Ungerer --- arch/m68knommu/mm/init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68knommu/mm/init.c b/arch/m68knommu/mm/init.c index b1703c67a4f1..f3236d0b522d 100644 --- a/arch/m68knommu/mm/init.c +++ b/arch/m68knommu/mm/init.c @@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) totalram_pages++; pages++; } - printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); + printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); } #endif -- cgit v1.2.3 From 63335138334e5ae40bf485a07b46be59397e50da Mon Sep 17 00:00:00 2001 From: Huang Weiyi Date: Fri, 18 Sep 2009 06:18:56 +0800 Subject: m68knommu: remove duplicated #include Signed-off-by: Huang Weiyi Signed-off-by: Greg Ungerer --- arch/m68knommu/platform/5206e/config.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c index 0f41ba82a3b5..942397984c66 100644 --- a/arch/m68knommu/platform/5206e/config.c +++ b/arch/m68knommu/platform/5206e/config.c @@ -17,7 +17,6 @@ #include #include #include -#include /***************************************************************************/ -- cgit v1.2.3 From c84b564e82777b00ab48178a88bc533ba62b9922 Mon Sep 17 00:00:00 2001 From: Greg Ungerer Date: Sun, 20 Sep 2009 21:32:32 +1000 Subject: m68knommu: fix rename of pt_regs offset defines breakage Commit f159ee782990aacb5494738c98f13a2aa61dbb4a ("locking, m68k/asm-offsets: Rename pt_regs offset defines") breaks the m68knommu entry code that relies on these define names. Fix the files to match the new define names. Signed-off-by: Greg Ungerer --- arch/m68knommu/kernel/asm-offsets.c | 28 ++++++++++++++-------------- arch/m68knommu/kernel/entry.S | 6 +++--- arch/m68knommu/platform/68328/entry.S | 32 ++++++++++++++++---------------- arch/m68knommu/platform/68360/entry.S | 16 ++++++++-------- arch/m68knommu/platform/coldfire/entry.S | 20 ++++++++++---------- 5 files changed, 51 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index 594ee0e657fe..9a8876f715d8 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c @@ -45,25 +45,25 @@ int main(void) DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); /* offsets into the pt_regs */ - DEFINE(PT_D0, offsetof(struct pt_regs, d0)); - DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); - DEFINE(PT_D1, offsetof(struct pt_regs, d1)); - DEFINE(PT_D2, offsetof(struct pt_regs, d2)); - DEFINE(PT_D3, offsetof(struct pt_regs, d3)); - DEFINE(PT_D4, offsetof(struct pt_regs, d4)); - DEFINE(PT_D5, offsetof(struct pt_regs, d5)); - DEFINE(PT_A0, offsetof(struct pt_regs, a0)); - DEFINE(PT_A1, offsetof(struct pt_regs, a1)); - DEFINE(PT_A2, offsetof(struct pt_regs, a2)); - DEFINE(PT_PC, offsetof(struct pt_regs, pc)); - DEFINE(PT_SR, offsetof(struct pt_regs, sr)); + DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); + DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); + DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); + DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); + DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); + DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); + DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); + DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); #ifdef CONFIG_COLDFIRE /* bitfields are a bit difficult */ - DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); + DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); #else /* bitfields are a bit difficult */ - DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); + DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); #endif /* signal defines */ diff --git a/arch/m68knommu/kernel/entry.S b/arch/m68knommu/kernel/entry.S index f56faa5c9cd9..56043ade3941 100644 --- a/arch/m68knommu/kernel/entry.S +++ b/arch/m68knommu/kernel/entry.S @@ -46,7 +46,7 @@ ENTRY(buserr) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr buserr_c addql #4,%sp @@ -55,7 +55,7 @@ ENTRY(buserr) ENTRY(trap) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr trap_c addql #4,%sp @@ -67,7 +67,7 @@ ENTRY(trap) ENTRY(dbginterrupt) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr dbginterrupt_c addql #4,%sp diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S index b1aef72f3baf..9d80d2c42866 100644 --- a/arch/m68knommu/platform/68328/entry.S +++ b/arch/m68knommu/platform/68328/entry.S @@ -39,17 +39,17 @@ .globl inthandler7 badsys: - movel #-ENOSYS,%sp@(PT_D0) + movel #-ENOSYS,%sp@(PT_OFF_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_ORIG_D0),%d1 + movel %sp@(PT_OFF_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -57,7 +57,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_D0) /* save the return value */ +1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -75,7 +75,7 @@ ENTRY(system_call) jbsr set_esp0 addql #4,%sp - movel %sp@(PT_ORIG_D0),%d0 + movel %sp@(PT_OFF_ORIG_D0),%d0 movel %sp,%d1 /* get thread_info pointer */ andl #-THREAD_SIZE,%d1 @@ -88,10 +88,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value*/ + movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -133,7 +133,7 @@ Lreturn: */ inthandler1: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -144,7 +144,7 @@ inthandler1: inthandler2: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -155,7 +155,7 @@ inthandler2: inthandler3: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -166,7 +166,7 @@ inthandler3: inthandler4: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -177,7 +177,7 @@ inthandler4: inthandler5: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -188,7 +188,7 @@ inthandler5: inthandler6: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -199,7 +199,7 @@ inthandler6: inthandler7: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -210,7 +210,7 @@ inthandler7: inthandler: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -224,7 +224,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_SR), %d0 + moveb %sp@(PT_OFF_SR), %d0 and #7, %d0 jhi 2b diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S index 55dfefe38642..6d3460a39cac 100644 --- a/arch/m68knommu/platform/68360/entry.S +++ b/arch/m68knommu/platform/68360/entry.S @@ -35,17 +35,17 @@ .globl inthandler badsys: - movel #-ENOSYS,%sp@(PT_D0) + movel #-ENOSYS,%sp@(PT_OFF_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_ORIG_D0),%d1 + movel %sp@(PT_OFF_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -53,7 +53,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_D0) /* save the return value */ +1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -79,10 +79,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value*/ + movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -124,7 +124,7 @@ Lreturn: */ inthandler: SAVE_ALL - movew %sp@(PT_VECTOR), %d0 + movew %sp@(PT_OFF_VECTOR), %d0 and.l #0x3ff, %d0 lsr.l #0x02, %d0 @@ -139,7 +139,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_SR), %d0 + moveb %sp@(PT_OFF_SR), %d0 and #7, %d0 jhi 2b /* check if we need to do software interrupts */ diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S index 3b471c0da24a..dd7d591f70ea 100644 --- a/arch/m68knommu/platform/coldfire/entry.S +++ b/arch/m68knommu/platform/coldfire/entry.S @@ -81,11 +81,11 @@ ENTRY(system_call) movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value */ + movel %d0,%sp@(PT_OFF_D0) /* save the return value */ jra ret_from_exception 1: - movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ - movel %d2,PT_D0(%sp) /* on syscall entry */ + movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ + movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace @@ -93,7 +93,7 @@ ENTRY(system_call) addql #4,%sp movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_D0) /* save the return value */ + movel %d0,%sp@(PT_OFF_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -104,7 +104,7 @@ ret_from_signal: ret_from_exception: move #0x2700,%sr /* disable intrs */ - btst #5,%sp@(PT_SR) /* check if returning to kernel */ + btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ jeq Luser_return /* if so, skip resched, signals */ #ifdef CONFIG_PREEMPT @@ -142,8 +142,8 @@ Luser_return: Lreturn: move #0x2700,%sr /* disable intrs */ movel sw_usp,%a0 /* get usp */ - movel %sp@(PT_PC),%a0@- /* copy exception program counter */ - movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ + movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ + movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ moveml %sp@,%d1-%d5/%a0-%a2 lea %sp@(32),%sp /* space for 8 regs */ movel %sp@+,%d0 @@ -181,9 +181,9 @@ Lsignal_return: ENTRY(inthandler) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_ORIG_D0) + movel %d0,%sp@(PT_OFF_ORIG_D0) - movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ + movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ @@ -203,7 +203,7 @@ ENTRY(inthandler) ENTRY(fasthandler) SAVE_LOCAL - movew %sp@(PT_FORMATVEC),%d0 + movew %sp@(PT_OFF_FORMATVEC),%d0 andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ -- cgit v1.2.3 From 8810e0553fec6ff0a0db1431e388de39e2a2a512 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Mon, 28 Sep 2009 08:21:41 +0000 Subject: sh: mach-ecovec24: Add TouchScreen support Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/boards/mach-ecovec24/setup.c | 42 ++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'arch') diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 5f9881e16e2f..52912a6149c7 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include