diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 13:12:47 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-09 13:12:47 -0800 |
commit | b64bb1d758163814687eb3b84d74e56f04d0c9d1 (patch) | |
tree | 59f1db8b718e98d13c6cf9d3486221cfff6e7eef /arch/arm64/kernel | |
parent | 50569687e9c688a8688982805be6d8e3c8879042 (diff) | |
parent | eb8a653137b7e74f7cdc01f814eb9d094a65aed9 (diff) | |
download | linux-b64bb1d758163814687eb3b84d74e56f04d0c9d1.tar.gz linux-b64bb1d758163814687eb3b84d74e56f04d0c9d1.tar.bz2 linux-b64bb1d758163814687eb3b84d74e56f04d0c9d1.zip |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon:
"Here's the usual mixed bag of arm64 updates, also including some
related EFI changes (Acked by Matt) and the MMU gather range cleanup
(Acked by you).
Changes include:
- support for alternative instruction patching from Andre
- seccomp from Akashi
- some AArch32 instruction emulation, required by the Android folks
- optimisations for exception entry/exit code, cmpxchg, pcpu atomics
- mmu_gather range calculations moved into core code
- EFI updates from Ard, including long-awaited SMBIOS support
- /proc/cpuinfo fixes to align with the format used by arch/arm/
- a few non-critical fixes across the architecture"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (70 commits)
arm64: remove the unnecessary arm64_swiotlb_init()
arm64: add module support for alternatives fixups
arm64: perf: Prevent wraparound during overflow
arm64/include/asm: Fixed a warning about 'struct pt_regs'
arm64: Provide a namespace to NCAPS
arm64: bpf: lift restriction on last instruction
arm64: Implement support for read-mostly sections
arm64: compat: align cacheflush syscall with arch/arm
arm64: add seccomp support
arm64: add SIGSYS siginfo for compat task
arm64: add seccomp syscall for compat task
asm-generic: add generic seccomp.h for secure computing mode 1
arm64: ptrace: allow tracer to skip a system call
arm64: ptrace: add NT_ARM_SYSTEM_CALL regset
arm64: Move some head.text functions to executable section
arm64: jump labels: NOP out NOP -> NOP replacement
arm64: add support to dump the kernel page tables
arm64: Add FIX_HOLE to permanent fixed addresses
arm64: alternatives: fix pr_fmt string for consistency
arm64: vmlinux.lds.S: don't discard .exit.* sections at link-time
...
Diffstat (limited to 'arch/arm64/kernel')
27 files changed, 1539 insertions, 404 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5bd029b43644..eaa77ed7766a 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -5,6 +5,7 @@ CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) +CFLAGS_armv8_deprecated.o := -I$(src) CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_insn.o = -pg @@ -15,10 +16,11 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o + cpuinfo.o cpu_errata.o alternative.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ - sys_compat.o + sys_compat.o \ + ../../arm/kernel/opcodes.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o @@ -31,6 +33,7 @@ arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o arm64-obj-$(CONFIG_PCI) += pci.o +arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c new file mode 100644 index 000000000000..ad7821d64a1d --- /dev/null +++ b/arch/arm64/kernel/alternative.c @@ -0,0 +1,85 @@ +/* + * alternative runtime patching + * inspired by the x86 version + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "alternatives: " fmt + +#include <linux/init.h> +#include <linux/cpu.h> +#include <asm/cacheflush.h> +#include <asm/alternative.h> +#include <asm/cpufeature.h> +#include <linux/stop_machine.h> + +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + +struct alt_region { + struct alt_instr *begin; + struct alt_instr *end; +}; + +static int __apply_alternatives(void *alt_region) +{ + struct alt_instr *alt; + struct alt_region *region = alt_region; + u8 *origptr, *replptr; + + for (alt = region->begin; alt < region->end; alt++) { + if (!cpus_have_cap(alt->cpufeature)) + continue; + + BUG_ON(alt->alt_len > alt->orig_len); + + pr_info_once("patching kernel code\n"); + + origptr = (u8 *)&alt->orig_offset + alt->orig_offset; + replptr = (u8 *)&alt->alt_offset + alt->alt_offset; + memcpy(origptr, replptr, alt->alt_len); + flush_icache_range((uintptr_t)origptr, + (uintptr_t)(origptr + alt->alt_len)); + } + + return 0; +} + +void apply_alternatives_all(void) +{ + struct alt_region region = { + .begin = __alt_instructions, + .end = __alt_instructions_end, + }; + + /* better not try code patching on a live SMP system */ + stop_machine(__apply_alternatives, ®ion, NULL); +} + +void apply_alternatives(void *start, size_t length) +{ + struct alt_region region = { + .begin = start, + .end = start + length, + }; + + __apply_alternatives(®ion); +} + +void free_alternatives_memory(void) +{ + free_reserved_area(__alt_instructions, __alt_instructions_end, + 0, "alternatives"); +} diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c new file mode 100644 index 000000000000..c363671d7509 --- /dev/null +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -0,0 +1,553 @@ +/* + * Copyright (C) 2014 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/cpu.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/perf_event.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/sysctl.h> + +#include <asm/insn.h> +#include <asm/opcodes.h> +#include <asm/system_misc.h> +#include <asm/traps.h> +#include <asm/uaccess.h> + +#define CREATE_TRACE_POINTS +#include "trace-events-emulation.h" + +/* + * The runtime support for deprecated instruction support can be in one of + * following three states - + * + * 0 = undef + * 1 = emulate (software emulation) + * 2 = hw (supported in hardware) + */ +enum insn_emulation_mode { + INSN_UNDEF, + INSN_EMULATE, + INSN_HW, +}; + +enum legacy_insn_status { + INSN_DEPRECATED, + INSN_OBSOLETE, +}; + +struct insn_emulation_ops { + const char *name; + enum legacy_insn_status status; + struct undef_hook *hooks; + int (*set_hw_mode)(bool enable); +}; + +struct insn_emulation { + struct list_head node; + struct insn_emulation_ops *ops; + int current_mode; + int min; + int max; +}; + +static LIST_HEAD(insn_emulation); +static int nr_insn_emulated; +static DEFINE_RAW_SPINLOCK(insn_emulation_lock); + +static void register_emulation_hooks(struct insn_emulation_ops *ops) +{ + struct undef_hook *hook; + + BUG_ON(!ops->hooks); + + for (hook = ops->hooks; hook->instr_mask; hook++) + register_undef_hook(hook); + + pr_notice("Registered %s emulation handler\n", ops->name); +} + +static void remove_emulation_hooks(struct insn_emulation_ops *ops) +{ + struct undef_hook *hook; + + BUG_ON(!ops->hooks); + + for (hook = ops->hooks; hook->instr_mask; hook++) + unregister_undef_hook(hook); + + pr_notice("Removed %s emulation handler\n", ops->name); +} + +static int update_insn_emulation_mode(struct insn_emulation *insn, + enum insn_emulation_mode prev) +{ + int ret = 0; + + switch (prev) { + case INSN_UNDEF: /* Nothing to be done */ + break; + case INSN_EMULATE: + remove_emulation_hooks(insn->ops); + break; + case INSN_HW: + if (insn->ops->set_hw_mode) { + insn->ops->set_hw_mode(false); + pr_notice("Disabled %s support\n", insn->ops->name); + } + break; + } + + switch (insn->current_mode) { + case INSN_UNDEF: + break; + case INSN_EMULATE: + register_emulation_hooks(insn->ops); + break; + case INSN_HW: + if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true)) + pr_notice("Enabled %s support\n", insn->ops->name); + else + ret = -EINVAL; + break; + } + + return ret; +} + +static void register_insn_emulation(struct insn_emulation_ops *ops) +{ + unsigned long flags; + struct insn_emulation *insn; + + insn = kzalloc(sizeof(*insn), GFP_KERNEL); + insn->ops = ops; + insn->min = INSN_UNDEF; + + switch (ops->status) { + case INSN_DEPRECATED: + insn->current_mode = INSN_EMULATE; + insn->max = INSN_HW; + break; + case INSN_OBSOLETE: + insn->current_mode = INSN_UNDEF; + insn->max = INSN_EMULATE; + break; + } + + raw_spin_lock_irqsave(&insn_emulation_lock, flags); + list_add(&insn->node, &insn_emulation); + nr_insn_emulated++; + raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); + + /* Register any handlers if required */ + update_insn_emulation_mode(insn, INSN_UNDEF); +} + +static int emulation_proc_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret = 0; + struct insn_emulation *insn = (struct insn_emulation *) table->data; + enum insn_emulation_mode prev_mode = insn->current_mode; + + table->data = &insn->current_mode; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (ret || !write || prev_mode == insn->current_mode) + goto ret; + + ret = update_insn_emulation_mode(insn, prev_mode); + if (ret) { + /* Mode change failed, revert to previous mode. */ + insn->current_mode = prev_mode; + update_insn_emulation_mode(insn, INSN_UNDEF); + } +ret: + table->data = insn; + return ret; +} + +static struct ctl_table ctl_abi[] = { + { + .procname = "abi", + .mode = 0555, + }, + { } +}; + +static void register_insn_emulation_sysctl(struct ctl_table *table) +{ + unsigned long flags; + int i = 0; + struct insn_emulation *insn; + struct ctl_table *insns_sysctl, *sysctl; + + insns_sysctl = kzalloc(sizeof(*sysctl) * (nr_insn_emulated + 1), + GFP_KERNEL); + + raw_spin_lock_irqsave(&insn_emulation_lock, flags); + list_for_each_entry(insn, &insn_emulation, node) { + sysctl = &insns_sysctl[i]; + + sysctl->mode = 0644; + sysctl->maxlen = sizeof(int); + + sysctl->procname = insn->ops->name; + sysctl->data = insn; + sysctl->extra1 = &insn->min; + sysctl->extra2 = &insn->max; + sysctl->proc_handler = emulation_proc_handler; + i++; + } + raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); + + table->child = insns_sysctl; + register_sysctl_table(table); +} + +/* + * Implement emulation of the SWP/SWPB instructions using load-exclusive and + * store-exclusive. + * + * Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>] + * Where: Rt = destination + * Rt2 = source + * Rn = address + */ + +/* + * Error-checking SWP macros implemented using ldxr{b}/stxr{b} + */ +#define __user_swpX_asm(data, addr, res, temp, B) \ + __asm__ __volatile__( \ + " mov %w2, %w1\n" \ + "0: ldxr"B" %w1, [%3]\n" \ + "1: stxr"B" %w0, %w2, [%3]\n" \ + " cbz %w0, 2f\n" \ + " mov %w0, %w4\n" \ + "2:\n" \ + " .pushsection .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %w0, %w5\n" \ + " b 2b\n" \ + " .popsection" \ + " .pushsection __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .quad 0b, 3b\n" \ + " .quad 1b, 3b\n" \ + " .popsection" \ + : "=&r" (res), "+r" (data), "=&r" (temp) \ + : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \ + : "memory") + +#define __user_swp_asm(data, addr, res, temp) \ + __user_swpX_asm(data, addr, res, temp, "") +#define __user_swpb_asm(data, addr, res, temp) \ + __user_swpX_asm(data, addr, res, temp, "b") + +/* + * Bit 22 of the instruction encoding distinguishes between + * the SWP and SWPB variants (bit set means SWPB). + */ +#define TYPE_SWPB (1 << 22) + +/* + * Set up process info to signal segmentation fault - called on access error. + */ +static void set_segfault(struct pt_regs *regs, unsigned long addr) +{ + siginfo_t info; + + down_read(¤t->mm->mmap_sem); + if (find_vma(current->mm, addr) == NULL) + info.si_code = SEGV_MAPERR; + else + info.si_code = SEGV_ACCERR; + up_read(¤t->mm->mmap_sem); + + info.si_signo = SIGSEGV; + info.si_errno = 0; + info.si_addr = (void *) instruction_pointer(regs); + + pr_debug("SWP{B} emulation: access caused memory abort!\n"); + arm64_notify_die("Illegal memory access", regs, &info, 0); +} + +static int emulate_swpX(unsigned int address, unsigned int *data, + unsigned int type) +{ + unsigned int res = 0; + + if ((type != TYPE_SWPB) && (address & 0x3)) { + /* SWP to unaligned address not permitted */ + pr_debug("SWP instruction on unaligned pointer!\n"); + return -EFAULT; + } + + while (1) { + unsigned long temp; + + if (type == TYPE_SWPB) + __user_swpb_asm(*data, address, res, temp); + else + __user_swp_asm(*data, address, res, temp); + + if (likely(res != -EAGAIN) || signal_pending(current)) + break; + + cond_resched(); + } + + return res; +} + +/* + * swp_handler logs the id of calling process, dissects the instruction, sanity + * checks the memory location, calls emulate_swpX for the actual operation and + * deals with fixup/error handling before returning + */ +static int swp_handler(struct pt_regs *regs, u32 instr) +{ + u32 destreg, data, type, address = 0; + int rn, rt2, res = 0; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + type = instr & TYPE_SWPB; + + switch (arm_check_condition(instr, regs->pstate)) { + case ARM_OPCODE_CONDTEST_PASS: + break; + case ARM_OPCODE_CONDTEST_FAIL: + /* Condition failed - return to next instruction */ + goto ret; + case ARM_OPCODE_CONDTEST_UNCOND: + /* If unconditional encoding - not a SWP, undef */ + return -EFAULT; + default: + return -EINVAL; + } + + rn = aarch32_insn_extract_reg_num(instr, A32_RN_OFFSET); + rt2 = aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET); + + address = (u32)regs->user_regs.regs[rn]; + data = (u32)regs->user_regs.regs[rt2]; + destreg = aarch32_insn_extract_reg_num(instr, A32_RT_OFFSET); + + pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n", + rn, address, destreg, + aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data); + + /* Check access in reasonable access range for both SWP and SWPB */ + if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) { + pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n", + address); + goto fault; + } + + res = emulate_swpX(address, &data, type); + if (res == -EFAULT) + goto fault; + else if (res == 0) + regs->user_regs.regs[destreg] = data; + +ret: + if (type == TYPE_SWPB) + trace_instruction_emulation("swpb", regs->pc); + else + trace_instruction_emulation("swp", regs->pc); + + pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + regs->pc += 4; + return 0; + +fault: + set_segfault(regs, address); + + return 0; +} + +/* + * Only emulate SWP/SWPB executed in ARM state/User mode. + * The kernel must be SWP free and SWP{B} does not exist in Thumb. + */ +static struct undef_hook swp_hooks[] = { + { + .instr_mask = 0x0fb00ff0, + .instr_val = 0x01000090, + .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_val = COMPAT_PSR_MODE_USR, + .fn = swp_handler + }, + { } +}; + +static struct insn_emulation_ops swp_ops = { + .name = "swp", + .status = INSN_OBSOLETE, + .hooks = swp_hooks, + .set_hw_mode = NULL, +}; + +static int cp15barrier_handler(struct pt_regs *regs, u32 instr) +{ + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + switch (arm_check_condition(instr, regs->pstate)) { + case ARM_OPCODE_CONDTEST_PASS: + break; + case ARM_OPCODE_CONDTEST_FAIL: + /* Condition failed - return to next instruction */ + goto ret; + case ARM_OPCODE_CONDTEST_UNCOND: + /* If unconditional encoding - not a barrier instruction */ + return -EFAULT; + default: + return -EINVAL; + } + + switch (aarch32_insn_mcr_extract_crm(instr)) { + case 10: + /* + * dmb - mcr p15, 0, Rt, c7, c10, 5 + * dsb - mcr p15, 0, Rt, c7, c10, 4 + */ + if (aarch32_insn_mcr_extract_opc2(instr) == 5) { + dmb(sy); + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c10, 5 ; dmb", regs->pc); + } else { + dsb(sy); + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c10, 4 ; dsb", regs->pc); + } + break; + case 5: + /* + * isb - mcr p15, 0, Rt, c7, c5, 4 + * + * Taking an exception or returning from one acts as an + * instruction barrier. So no explicit barrier needed here. + */ + trace_instruction_emulation( + "mcr p15, 0, Rt, c7, c5, 4 ; isb", regs->pc); + break; + } + +ret: + pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + regs->pc += 4; + return 0; +} + +#define SCTLR_EL1_CP15BEN (1 << 5) + +static inline void config_sctlr_el1(u32 clear, u32 set) +{ + u32 val; + + asm volatile("mrs %0, sctlr_el1" : "=r" (val)); + val &= ~clear; + val |= set; + asm volatile("msr sctlr_el1, %0" : : "r" (val)); +} + +static void enable_cp15_ben(void *info) +{ + config_sctlr_el1(0, SCTLR_EL1_CP15BEN); +} + +static void disable_cp15_ben(void *info) +{ + config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); +} + +static int cpu_hotplug_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + switch (action) { + case CPU_STARTING: + case CPU_STARTING_FROZEN: + enable_cp15_ben(NULL); + return NOTIFY_DONE; + case CPU_DYING: + case CPU_DYING_FROZEN: + disable_cp15_ben(NULL); + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block cpu_hotplug_notifier = { + .notifier_call = cpu_hotplug_notify, +}; + +static int cp15_barrier_set_hw_mode(bool enable) +{ + if (enable) { + register_cpu_notifier(&cpu_hotplug_notifier); + on_each_cpu(enable_cp15_ben, NULL, true); + } else { + unregister_cpu_notifier(&cpu_hotplug_notifier); + on_each_cpu(disable_cp15_ben, NULL, true); + } + + return true; +} + +static struct undef_hook cp15_barrier_hooks[] = { + { + .instr_mask = 0x0fff0fdf, + .instr_val = 0x0e070f9a, + .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_val = COMPAT_PSR_MODE_USR, + .fn = cp15barrier_handler, + }, + { + .instr_mask = 0x0fff0fff, + .instr_val = 0x0e070f95, + .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_val = COMPAT_PSR_MODE_USR, + .fn = cp15barrier_handler, + }, + { } +}; + +static struct insn_emulation_ops cp15_barrier_ops = { + .name = "cp15_barrier", + .status = INSN_DEPRECATED, + .hooks = cp15_barrier_hooks, + .set_hw_mode = cp15_barrier_set_hw_mode, +}; + +/* + * Invoked as late_initcall, since not needed before init spawned. + */ +static int __init armv8_deprecated_init(void) +{ + if (IS_ENABLED(CONFIG_SWP_EMULATION)) + register_insn_emulation(&swp_ops); + + if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION)) + register_insn_emulation(&cp15_barrier_ops); + + register_insn_emulation_sysctl(ctl_abi); + + return 0; +} + +late_initcall(armv8_deprecated_init); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c new file mode 100644 index 000000000000..fa62637e63a8 --- /dev/null +++ b/arch/arm64/kernel/cpu_errata.c @@ -0,0 +1,111 @@ +/* + * Contains CPU specific errata definitions + * + * Copyright (C) 2014 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "alternatives: " fmt + +#include <linux/types.h> +#include <asm/cpu.h> +#include <asm/cputype.h> +#include <asm/cpufeature.h> + +#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) +#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) + +/* + * Add a struct or another datatype to the union below if you need + * different means to detect an affected CPU. + */ +struct arm64_cpu_capabilities { + const char *desc; + u16 capability; + bool (*is_affected)(struct arm64_cpu_capabilities *); + union { + struct { + u32 midr_model; + u32 midr_range_min, midr_range_max; + }; + }; +}; + +#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ + MIDR_ARCHITECTURE_MASK) + +static bool __maybe_unused +is_affected_midr_range(struct arm64_cpu_capabilities *entry) +{ + u32 midr = read_cpuid_id(); + + if ((midr & CPU_MODEL_MASK) != entry->midr_model) + return false; + + midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; + + return (midr >= entry->midr_range_min && midr <= entry->midr_range_max); +} + +#define MIDR_RANGE(model, min, max) \ + .is_affected = is_affected_midr_range, \ + .midr_model = model, \ + .midr_range_min = min, \ + .midr_range_max = max + +struct arm64_cpu_capabilities arm64_errata[] = { +#if defined(CONFIG_ARM64_ERRATUM_826319) || \ + defined(CONFIG_ARM64_ERRATUM_827319) || \ + defined(CONFIG_ARM64_ERRATUM_824069) + { + /* Cortex-A53 r0p[012] */ + .desc = "ARM errata 826319, 827319, 824069", + .capability = ARM64_WORKAROUND_CLEAN_CACHE, + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_819472 + { + /* Cortex-A53 r0p[01] */ + .desc = "ARM errata 819472", + .capability = ARM64_WORKAROUND_CLEAN_CACHE, + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_832075 + { + /* Cortex-A57 r0p0 - r1p2 */ + .desc = "ARM erratum 832075", + .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), + }, +#endif + { + } +}; + +void check_local_cpu_errata(void) +{ + struct arm64_cpu_capabilities *cpus = arm64_errata; + int i; + + for (i = 0; cpus[i].desc; i++) { + if (!cpus[i].is_affected(&cpus[i])) + continue; + + if (!cpus_have_cap(cpus[i].capability)) + pr_info("enabling workaround for %s\n", cpus[i].desc); + cpus_set_cap(cpus[i].capability); + } +} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 504fdaa8367e..57b641747534 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -18,6 +18,7 @@ #include <asm/cachetype.h> #include <asm/cpu.h> #include <asm/cputype.h> +#include <asm/cpufeature.h> #include <linux/bitops.h> #include <linux/bug.h> @@ -111,6 +112,15 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(cntfrq, boot, cur, cpu); /* + * The kernel uses self-hosted debug features and expects CPUs to + * support identical debug features. We presently need CTX_CMPs, WRPs, + * and BRPs to be identical. + * ID_AA64DFR1 is currently RES0. + */ + diff |= CHECK(id_aa64dfr0, boot, cur, cpu); + diff |= CHECK(id_aa64dfr1, boot, cur, cpu); + + /* * Even in big.LITTLE, processors should be identical instruction-set * wise. */ @@ -143,7 +153,12 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(id_isar3, boot, cur, cpu); diff |= CHECK(id_isar4, boot, cur, cpu); diff |= CHECK(id_isar5, boot, cur, cpu); - diff |= CHECK(id_mmfr0, boot, cur, cpu); + /* + * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and + * ACTLR formats could differ across CPUs and therefore would have to + * be trapped for virtualization anyway. + */ + diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu); diff |= CHECK(id_mmfr1, boot, cur, cpu); diff |= CHECK(id_mmfr2, boot, cur, cpu); diff |= CHECK(id_mmfr3, boot, cur, cpu); @@ -155,7 +170,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) * pretend to support them. */ WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, - "Unsupported CPU feature variation."); + "Unsupported CPU feature variation.\n"); } static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) @@ -165,6 +180,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_dczid = read_cpuid(DCZID_EL0); info->reg_midr = read_cpuid_id(); + info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1); + info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); @@ -186,6 +203,8 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); cpuinfo_detect_icache_policy(info); + + check_local_cpu_errata(); } void cpuinfo_store_cpu(void) diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index d18a44940968..8ce9b0577442 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S @@ -61,7 +61,8 @@ ENTRY(efi_stub_entry) */ mov x20, x0 // DTB address ldr x0, [sp, #16] // relocated _text address - mov x21, x0 + ldr x21, =stext_offset + add x21, x0, x21 /* * Calculate size of the kernel Image (same for original and copy). diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 95c49ebc660d..6fac253bc783 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,6 +11,7 @@ * */ +#include <linux/dmi.h> #include <linux/efi.h> #include <linux/export.h> #include <linux/memblock.h> @@ -112,8 +113,6 @@ static int __init uefi_init(void) efi.systab->hdr.revision & 0xffff, vendor); retval = efi_config_init(NULL); - if (retval == 0) - set_bit(EFI_CONFIG_TABLES, &efi.flags); out: early_memunmap(efi.systab, sizeof(efi_system_table_t)); @@ -125,17 +124,17 @@ out: */ static __init int is_reserve_region(efi_memory_desc_t *md) { - if (!is_normal_ram(md)) + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: return 0; - - if (md->attribute & EFI_MEMORY_RUNTIME) - return 1; - - if (md->type == EFI_ACPI_RECLAIM_MEMORY || - md->type == EFI_RESERVED_TYPE) - return 1; - - return 0; + default: + break; + } + return is_normal_ram(md); } static __init void reserve_regions(void) @@ -471,3 +470,17 @@ err_unmap: return -1; } early_initcall(arm64_enter_virtual_mode); + +static int __init arm64_dmi_init(void) +{ + /* + * On arm64, DMI depends on UEFI, and dmi_scan_machine() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_scan_machine(); + if (dmi_available) + dmi_set_dump_stack_arch_desc(); + return 0; +} +core_initcall(arm64_dmi_init); diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 38e704e597f7..08cafc518b9a 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -98,8 +98,8 @@ ENTRY(_mcount) mcount_enter - ldr x0, =ftrace_trace_function - ldr x2, [x0] + adrp x0, ftrace_trace_function + ldr x2, [x0, #:lo12:ftrace_trace_function] adr x0, ftrace_stub cmp x0, x2 // if (ftrace_trace_function b.eq skip_ftrace_call // != ftrace_stub) { @@ -115,14 +115,15 @@ skip_ftrace_call: // return; mcount_exit // return; // } skip_ftrace_call: - ldr x1, =ftrace_graph_return - ldr x2, [x1] // if ((ftrace_graph_return - cmp x0, x2 // != ftrace_stub) - b.ne ftrace_graph_caller - - ldr x1, =ftrace_graph_entry // || (ftrace_graph_entry - ldr x2, [x1] // != ftrace_graph_entry_stub)) - ldr x0, =ftrace_graph_entry_stub + adrp x1, ftrace_graph_return + ldr x2, [x1, #:lo12:ftrace_graph_return] + cmp x0, x2 // if ((ftrace_graph_return + b.ne ftrace_graph_caller // != ftrace_stub) + + adrp x1, ftrace_graph_entry // || (ftrace_graph_entry + adrp x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) + ldr x2, [x1, #:lo12:ftrace_graph_entry] + add x0, x0, #:lo12:ftrace_graph_entry_stub cmp x0, x2 b.ne ftrace_graph_caller // ftrace_graph_caller(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 726b910fe6ec..fd4fa374e5d2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -64,25 +64,26 @@ #define BAD_ERROR 3 .macro kernel_entry, el, regsize = 64 - sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR + sub sp, sp, #S_FRAME_SIZE .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 .endif - push x28, x29 - push x26, x27 - push x24, x25 - push x22, x23 - push x20, x21 - push x18, x19 - push x16, x17 - push x14, x15 - push x12, x13 - push x10, x11 - push x8, x9 - push x6, x7 - push x4, x5 - push x2, x3 - push x0, x1 + stp x0, x1, [sp, #16 * 0] + stp x2, x3, [sp, #16 * 1] + stp x4, x5, [sp, #16 * 2] + stp x6, x7, [sp, #16 * 3] + stp x8, x9, [sp, #16 * 4] + stp x10, x11, [sp, #16 * 5] + stp x12, x13, [sp, #16 * 6] + stp x14, x15, [sp, #16 * 7] + stp x16, x17, [sp, #16 * 8] + stp x18, x19, [sp, #16 * 9] + stp x20, x21, [sp, #16 * 10] + stp x22, x23, [sp, #16 * 11] + stp x24, x25, [sp, #16 * 12] + stp x26, x27, [sp, #16 * 13] + stp x28, x29, [sp, #16 * 14] + .if \el == 0 mrs x21, sp_el0 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, @@ -118,33 +119,31 @@ .if \el == 0 ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer + msr sp_el0, x23 .endif + msr elr_el1, x21 // set up the return data + msr spsr_el1, x22 .if \ret ldr x1, [sp, #S_X1] // preserve x0 (syscall return) - add sp, sp, S_X2 .else - pop x0, x1 + ldp x0, x1, [sp, #16 * 0] .endif - pop x2, x3 // load the rest of the registers - pop x4, x5 - pop x6, x7 - pop x8, x9 - msr elr_el1, x21 // set up the return data - msr spsr_el1, x22 - .if \el == 0 - msr sp_el0, x23 - .endif - pop x10, x11 - pop x12, x13 - pop x14, x15 - pop x16, x17 - pop x18, x19 - pop x20, x21 - pop x22, x23 - pop x24, x25 - pop x26, x27 - pop x28, x29 - ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP + ldp x2, x3, [sp, #16 * 1] + ldp x4, x5, [sp, #16 * 2] + ldp x6, x7, [sp, #16 * 3] + ldp x8, x9, [sp, #16 * 4] + ldp x10, x11, [sp, #16 * 5] + ldp x12, x13, [sp, #16 * 6] + ldp x14, x15, [sp, #16 * 7] + ldp x16, x17, [sp, #16 * 8] + ldp x18, x19, [sp, #16 * 9] + ldp x20, x21, [sp, #16 * 10] + ldp x22, x23, [sp, #16 * 11] + ldp x24, x25, [sp, #16 * 12] + ldp x26, x27, [sp, #16 * 13] + ldp x28, x29, [sp, #16 * 14] + ldr lr, [sp, #S_LR] + add sp, sp, #S_FRAME_SIZE // restore sp eret // return to kernel .endm @@ -168,7 +167,8 @@ tsk .req x28 // current thread_info * Interrupt handling. */ .macro irq_handler - ldr x1, handle_arch_irq + adrp x1, handle_arch_irq + ldr x1, [x1, #:lo12:handle_arch_irq] mov x0, sp blr x1 .endm @@ -455,8 +455,8 @@ el0_da: bic x0, x26, #(0xff << 56) mov x1, x25 mov x2, sp - adr lr, ret_to_user - b do_mem_abort + bl do_mem_abort + b ret_to_user el0_ia: /* * Instruction abort handling @@ -468,8 +468,8 @@ el0_ia: mov x0, x26 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts mov x2, sp - adr lr, ret_to_user - b do_mem_abort + bl do_mem_abort + b ret_to_user el0_fpsimd_acc: /* * Floating Point or Advanced SIMD access @@ -478,8 +478,8 @@ el0_fpsimd_acc: ct_user_exit mov x0, x25 mov x1, sp - adr lr, ret_to_user - b do_fpsimd_acc + bl do_fpsimd_acc + b ret_to_user el0_fpsimd_exc: /* * Floating Point or Advanced SIMD exception @@ -488,8 +488,8 @@ el0_fpsimd_exc: ct_user_exit mov x0, x25 mov x1, sp - adr lr, ret_to_user - b do_fpsimd_exc + bl do_fpsimd_exc + b ret_to_user el0_sp_pc: /* * Stack or PC alignment exception handling @@ -500,8 +500,8 @@ el0_sp_pc: mov x0, x26 mov x1, x25 mov x2, sp - adr lr, ret_to_user - b do_sp_pc_abort + bl do_sp_pc_abort + b ret_to_user el0_undef: /* * Undefined instruction @@ -510,8 +510,8 @@ el0_undef: enable_dbg_and_irq ct_user_exit mov x0, sp - adr lr, ret_to_user - b do_undefinstr + bl do_undefinstr + b ret_to_user el0_dbg: /* * Debug exception handling @@ -530,8 +530,8 @@ el0_inv: mov x0, sp mov x1, #BAD_SYNC mrs x2, esr_el1 - adr lr, ret_to_user - b bad_mode + bl bad_mode + b ret_to_user ENDPROC(el0_sync) .align 6 @@ -653,14 +653,15 @@ el0_svc_naked: // compat entry point ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks tst x16, #_TIF_SYSCALL_WORK b.ne __sys_trace - adr lr, ret_fast_syscall // return address cmp scno, sc_nr // check upper syscall limit b.hs ni_sys ldr x16, [stbl, scno, lsl #3] // address in the syscall table - br x16 // call sys_* routine + blr x16 // call sys_* routine + b ret_fast_syscall ni_sys: mov x0, sp - b do_ni_syscall + bl do_ni_syscall + b ret_fast_syscall ENDPROC(el0_svc) /* @@ -668,26 +669,38 @@ ENDPROC(el0_svc) * switches, and waiting for our parent to respond. */ __sys_trace: - mov x0, sp + mov w0, #-1 // set default errno for + cmp scno, x0 // user-issued syscall(-1) + b.ne 1f + mov x0, #-ENOSYS + str x0, [sp, #S_X0] +1: mov x0, sp bl syscall_trace_enter - adr lr, __sys_trace_return // return address + cmp w0, #-1 // skip the syscall? + b.eq __sys_trace_return_skipped uxtw scno, w0 // syscall number (possibly new) mov x1, sp // pointer to regs cmp scno, sc_nr // check upper syscall limit - b.hs ni_sys + b.hs __ni_sys_trace ldp x0, x1, [sp] // restore the syscall args ldp x2, x3, [sp, #S_X2] ldp x4, x5, [sp, #S_X4] ldp x6, x7, [sp, #S_X6] ldr x16, [stbl, scno, lsl #3] // address in the syscall table - br x16 // call sys_* routine + blr x16 // call sys_* routine __sys_trace_return: - str x0, [sp] // save returned x0 + str x0, [sp, #S_X0] // save returned x0 +__sys_trace_return_skipped: mov x0, sp bl syscall_trace_exit b ret_to_user +__ni_sys_trace: + mov x0, sp + bl do_ni_syscall + b __sys_trace_return + /* * Special system call wrappers. */ @@ -695,6 +708,3 @@ ENTRY(sys_rt_sigreturn_wrapper) mov x0, sp b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) - -ENTRY(handle_arch_irq) - .quad 0 diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 0a6e4f924df8..8ce88e08c030 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -132,6 +132,8 @@ efi_head: #endif #ifdef CONFIG_EFI + .globl stext_offset + .set stext_offset, stext - efi_head .align 3 pe_header: .ascii "PE" @@ -155,12 +157,12 @@ optional_header: .long 0 // SizeOfInitializedData .long 0 // SizeOfUninitializedData .long efi_stub_entry - efi_head // AddressOfEntryPoint - .long stext - efi_head // BaseOfCode + .long stext_offset // BaseOfCode extra_header_fields: .quad 0 // ImageBase - .long 0x20 // SectionAlignment - .long 0x8 // FileAlignment + .long 0x1000 // SectionAlignment + .long PECOFF_FILE_ALIGNMENT // FileAlignment .short 0 // MajorOperatingSystemVersion .short 0 // MinorOperatingSystemVersion .short 0 // MajorImageVersion @@ -172,7 +174,7 @@ extra_header_fields: .long _end - efi_head // SizeOfImage // Everything before the kernel image is considered part of the header - .long stext - efi_head // SizeOfHeaders + .long stext_offset // SizeOfHeaders .long 0 // CheckSum .short 0xa // Subsystem (EFI application) .short 0 // DllCharacteristics @@ -217,16 +219,24 @@ section_table: .byte 0 .byte 0 // end of 0 padding of section name .long _end - stext // VirtualSize - .long stext - efi_head // VirtualAddress + .long stext_offset // VirtualAddress .long _edata - stext // SizeOfRawData - .long stext - efi_head // PointerToRawData + .long stext_offset // PointerToRawData .long 0 // PointerToRelocations (0 for executables) .long 0 // PointerToLineNumbers (0 for executables) .short 0 // NumberOfRelocations (0 for executables) .short 0 // NumberOfLineNumbers (0 for executables) .long 0xe0500020 // Characteristics (section flags) - .align 5 + + /* + * EFI will load stext onwards at the 4k section alignment + * described in the PE/COFF header. To ensure that instruction + * sequences using an adrp and a :lo12: immediate will function + * correctly at this alignment, we must ensure that stext is + * placed at a 4k boundary in the Image to begin with. + */ + .align 12 #endif ENTRY(stext) @@ -238,7 +248,13 @@ ENTRY(stext) mov x0, x22 bl lookup_processor_type mov x23, x0 // x23=current cpu_table - cbz x23, __error_p // invalid processor (x23=0)? + /* + * __error_p may end up out of range for cbz if text areas are + * aligned up to section sizes. + */ + cbnz x23, 1f // invalid processor (x23=0)? + b __error_p +1: bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* @@ -250,13 +266,214 @@ ENTRY(stext) */ ldr x27, __switch_data // address to jump to after // MMU has been enabled - adr lr, __enable_mmu // return (PIC) address + adrp lr, __enable_mmu // return (PIC) address + add lr, lr, #:lo12:__enable_mmu ldr x12, [x23, #CPU_INFO_SETUP] add x12, x12, x28 // __virt_to_phys br x12 // initialise processor ENDPROC(stext) /* + * Determine validity of the x21 FDT pointer. + * The dtb must be 8-byte aligned and live in the first 512M of memory. + */ +__vet_fdt: + tst x21, #0x7 + b.ne 1f + cmp x21, x24 + b.lt 1f + mov x0, #(1 << 29) + add x0, x0, x24 + cmp x21, x0 + b.ge 1f + ret +1: + mov x21, #0 + ret +ENDPROC(__vet_fdt) +/* + * Macro to create a table entry to the next page. + * + * tbl: page table address + * virt: virtual address + * shift: #imm page table shift + * ptrs: #imm pointers per table page + * + * Preserves: virt + * Corrupts: tmp1, tmp2 + * Returns: tbl -> next level table page address + */ + .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 + lsr \tmp1, \virt, #\shift + and \tmp1, \tmp1, #\ptrs - 1 // table index + add \tmp2, \tbl, #PAGE_SIZE + orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type + str \tmp2, [\tbl, \tmp1, lsl #3] + add \tbl, \tbl, #PAGE_SIZE // next level table page + .endm + +/* + * Macro to populate the PGD (and possibily PUD) for the corresponding + * block entry in the next level (tbl) for the given virtual address. + * + * Preserves: tbl, next, virt + * Corrupts: tmp1, tmp2 + */ + .macro create_pgd_entry, tbl, virt, tmp1, tmp2 + create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 +#if SWAPPER_PGTABLE_LEVELS == 3 + create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 +#endif + .endm + +/* + * Macro to populate block entries in the page table for the start..end + * virtual range (inclusive). + * + * Preserves: tbl, flags + * Corrupts: phys, start, end, pstate + */ + .macro create_block_map, tbl, flags, phys, start, end + lsr \phys, \phys, #BLOCK_SHIFT + lsr \start, \start, #BLOCK_SHIFT + and \start, \start, #PTRS_PER_PTE - 1 // table index + orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry + lsr \end, \end, #BLOCK_SHIFT + and \end, \end, #PTRS_PER_PTE - 1 // table end index +9999: str \phys, [\tbl, \start, lsl #3] // store the entry + add \start, \start, #1 // next entry + add \phys, \phys, #BLOCK_SIZE // next block + cmp \start, \end + b.ls 9999b + .endm + +/* + * Setup the initial page tables. We only setup the barest amount which is + * required to get the kernel running. The following sections are required: + * - identity mapping to enable the MMU (low address, TTBR0) + * - first few MB of the kernel linear mapping to jump to once the MMU has + * been enabled, including the FDT blob (TTBR1) + * - pgd entry for fixed mappings (TTBR1) + */ +__create_page_tables: + pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses + mov x27, lr + + /* + * Invalidate the idmap and swapper page tables to avoid potential + * dirty cache lines being evicted. + */ + mov x0, x25 + add x1, x26, #SWAPPER_DIR_SIZE + bl __inval_cache_range + + /* + * Clear the idmap and swapper page tables. + */ + mov x0, x25 + add x6, x26, #SWAPPER_DIR_SIZE +1: stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + cmp x0, x6 + b.lo 1b + + ldr x7, =MM_MMUFLAGS + + /* + * Create the identity mapping. + */ + mov x0, x25 // idmap_pg_dir + ldr x3, =KERNEL_START + add x3, x3, x28 // __pa(KERNEL_START) + create_pgd_entry x0, x3, x5, x6 + ldr x6, =KERNEL_END + mov x5, x3 // __pa(KERNEL_START) + add x6, x6, x28 // __pa(KERNEL_END) + create_block_map x0, x7, x3, x5, x6 + + /* + * Map the kernel image (starting with PHYS_OFFSET). + */ + mov x0, x26 // swapper_pg_dir + mov x5, #PAGE_OFFSET + create_pgd_entry x0, x5, x3, x6 + ldr x6, =KERNEL_END + mov x3, x24 // phys offset + create_block_map x0, x7, x3, x5, x6 + + /* + * Map the FDT blob (maximum 2MB; must be within 512MB of + * PHYS_OFFSET). + */ + mov x3, x21 // FDT phys address + and x3, x3, #~((1 << 21) - 1) // 2MB aligned + mov x6, #PAGE_OFFSET + sub x5, x3, x24 // subtract PHYS_OFFSET + tst x5, #~((1 << 29) - 1) // within 512MB? + csel x21, xzr, x21, ne // zero the FDT pointer + b.ne 1f + add x5, x5, x6 // __va(FDT blob) + add x6, x5, #1 << 21 // 2MB for the FDT blob + sub x6, x6, #1 // inclusive range + create_block_map x0, x7, x3, x5, x6 +1: + /* + * Since the page tables have been populated with non-cacheable + * accesses (MMU disabled), invalidate the idmap and swapper page + * tables again to remove any speculatively loaded cache lines. + */ + mov x0, x25 + add x1, x26, #SWAPPER_DIR_SIZE + bl __inval_cache_range + + mov lr, x27 + ret +ENDPROC(__create_page_tables) + .ltorg + + .align 3 + .type __switch_data, %object +__switch_data: + .quad __mmap_switched + .quad __bss_start // x6 + .quad __bss_stop // x7 + .quad processor_id // x4 + .quad __fdt_pointer // x5 + .quad memstart_addr // x6 + .quad init_thread_union + THREAD_START_SP // sp + +/* + * The following fragment of code is executed with the MMU on in MMU mode, and + * uses absolute addresses; this is not position independent. + */ +__mmap_switched: + adr x3, __switch_data + 8 + + ldp x6, x7, [x3], #16 +1: cmp x6, x7 + b.hs 2f + str xzr, [x6], #8 // Clear BSS + b 1b +2: + ldp x4, x5, [x3], #16 + ldr x6, [x3], #8 + ldr x16, [x3] + mov sp, x16 + str x22, [x4] // Save processor ID + str x21, [x5] // Save FDT pointer + str x24, [x6] // Save PHYS_OFFSET + mov x29, #0 + b start_kernel +ENDPROC(__mmap_switched) + +/* + * end early head section, begin head code that is also used for + * hotplug and needs to have the same protections as the text region + */ + .section ".text","ax" +/* * If we're fortunate enough to boot at EL2, ensure that the world is * sane before dropping to EL1. * @@ -331,7 +548,8 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems msr vttbr_el2, xzr /* Hypervisor stub */ - adr x0, __hyp_stub_vectors + adrp x0, __hyp_stub_vectors + add x0, x0, #:lo12:__hyp_stub_vectors msr vbar_el2, x0 /* spsr */ @@ -492,183 +710,6 @@ ENDPROC(__calc_phys_offset) .quad PAGE_OFFSET /* - * Macro to create a table entry to the next page. - * - * tbl: page table address - * virt: virtual address - * shift: #imm page table shift - * ptrs: #imm pointers per table page - * - * Preserves: virt - * Corrupts: tmp1, tmp2 - * Returns: tbl -> next level table page address - */ - .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 - lsr \tmp1, \virt, #\shift - and \tmp1, \tmp1, #\ptrs - 1 // table index - add \tmp2, \tbl, #PAGE_SIZE - orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type - str \tmp2, [\tbl, \tmp1, lsl #3] - add \tbl, \tbl, #PAGE_SIZE // next level table page - .endm - -/* - * Macro to populate the PGD (and possibily PUD) for the corresponding - * block entry in the next level (tbl) for the given virtual address. - * - * Preserves: tbl, next, virt - * Corrupts: tmp1, tmp2 - */ - .macro create_pgd_entry, tbl, virt, tmp1, tmp2 - create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 -#if SWAPPER_PGTABLE_LEVELS == 3 - create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 -#endif - .endm - -/* - * Macro to populate block entries in the page table for the start..end - * virtual range (inclusive). - * - * Preserves: tbl, flags - * Corrupts: phys, start, end, pstate - */ - .macro create_block_map, tbl, flags, phys, start, end - lsr \phys, \phys, #BLOCK_SHIFT - lsr \start, \start, #BLOCK_SHIFT - and \start, \start, #PTRS_PER_PTE - 1 // table index - orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry - lsr \end, \end, #BLOCK_SHIFT - and \end, \end, #PTRS_PER_PTE - 1 // table end index -9999: str \phys, [\tbl, \start, lsl #3] // store the entry - add \start, \start, #1 // next entry - add \phys, \phys, #BLOCK_SIZE // next block - cmp \start, \end - b.ls 9999b - .endm - -/* - * Setup the initial page tables. We only setup the barest amount which is - * required to get the kernel running. The following sections are required: - * - identity mapping to enable the MMU (low address, TTBR0) - * - first few MB of the kernel linear mapping to jump to once the MMU has - * been enabled, including the FDT blob (TTBR1) - * - pgd entry for fixed mappings (TTBR1) - */ -__create_page_tables: - pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses - mov x27, lr - - /* - * Invalidate the idmap and swapper page tables to avoid potential - * dirty cache lines being evicted. - */ - mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE - bl __inval_cache_range - - /* - * Clear the idmap and swapper page tables. - */ - mov x0, x25 - add x6, x26, #SWAPPER_DIR_SIZE -1: stp xzr, xzr, [x0], #16 - stp xzr, xzr, [x0], #16 - stp xzr, xzr, [x0], #16 - stp xzr, xzr, [x0], #16 - cmp x0, x6 - b.lo 1b - - ldr x7, =MM_MMUFLAGS - - /* - * Create the identity mapping. - */ - mov x0, x25 // idmap_pg_dir - ldr x3, =KERNEL_START - add x3, x3, x28 // __pa(KERNEL_START) - create_pgd_entry x0, x3, x5, x6 - ldr x6, =KERNEL_END - mov x5, x3 // __pa(KERNEL_START) - add x6, x6, x28 // __pa(KERNEL_END) - create_block_map x0, x7, x3, x5, x6 - - /* - * Map the kernel image (starting with PHYS_OFFSET). - */ - mov x0, x26 // swapper_pg_dir - mov x5, #PAGE_OFFSET - create_pgd_entry x0, x5, x3, x6 - ldr x6, =KERNEL_END - mov x3, x24 // phys offset - create_block_map x0, x7, x3, x5, x6 - - /* - * Map the FDT blob (maximum 2MB; must be within 512MB of - * PHYS_OFFSET). - */ - mov x3, x21 // FDT phys address - and x3, x3, #~((1 << 21) - 1) // 2MB aligned - mov x6, #PAGE_OFFSET - sub x5, x3, x24 // subtract PHYS_OFFSET - tst x5, #~((1 << 29) - 1) // within 512MB? - csel x21, xzr, x21, ne // zero the FDT pointer - b.ne 1f - add x5, x5, x6 // __va(FDT blob) - add x6, x5, #1 << 21 // 2MB for the FDT blob - sub x6, x6, #1 // inclusive range - create_block_map x0, x7, x3, x5, x6 -1: - /* - * Since the page tables have been populated with non-cacheable - * accesses (MMU disabled), invalidate the idmap and swapper page - * tables again to remove any speculatively loaded cache lines. - */ - mov x0, x25 - add x1, x26, #SWAPPER_DIR_SIZE - bl __inval_cache_range - - mov lr, x27 - ret -ENDPROC(__create_page_tables) - .ltorg - - .align 3 - .type __switch_data, %object -__switch_data: - .quad __mmap_switched - .quad __bss_start // x6 - .quad __bss_stop // x7 - .quad processor_id // x4 - .quad __fdt_pointer // x5 - .quad memstart_addr // x6 - .quad init_thread_union + THREAD_START_SP // sp - -/* - * The following fragment of code is executed with the MMU on in MMU mode, and - * uses absolute addresses; this is not position independent. - */ -__mmap_switched: - adr x3, __switch_data + 8 - - ldp x6, x7, [x3], #16 -1: cmp x6, x7 - b.hs 2f - str xzr, [x6], #8 // Clear BSS - b 1b -2: - ldp x4, x5, [x3], #16 - ldr x6, [x3], #8 - ldr x16, [x3] - mov sp, x16 - str x22, [x4] // Save processor ID - str x21, [x5] // Save FDT pointer - str x24, [x6] // Save PHYS_OFFSET - mov x29, #0 - b start_kernel -ENDPROC(__mmap_switched) - -/* * Exception handling. Something went wrong and we can't proceed. We ought to * tell the user, but since we don't have any guarantee that we're even * running on the right architecture, we do virtually nothing. @@ -715,22 +756,3 @@ __lookup_processor_type_data: .quad . .quad cpu_table .size __lookup_processor_type_data, . - __lookup_processor_type_data - -/* - * Determine validity of the x21 FDT pointer. - * The dtb must be 8-byte aligned and live in the first 512M of memory. - */ -__vet_fdt: - tst x21, #0x7 - b.ne 1f - cmp x21, x24 - b.lt 1f - mov x0, #(1 << 29) - add x0, x0, x24 - cmp x21, x0 - b.ge 1f - ret -1: - mov x21, #0 - ret -ENDPROC(__vet_fdt) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 8cd27fedc8b6..7e9327a0986d 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -960,3 +960,29 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); } + +bool aarch32_insn_is_wide(u32 insn) +{ + return insn >= 0xe800; +} + +/* + * Macros/defines for extracting register numbers from instruction. + */ +u32 aarch32_insn_extract_reg_num(u32 insn, int offset) +{ + return (insn & (0xf << offset)) >> offset; +} + +#define OPC2_MASK 0x7 +#define OPC2_OFFSET 5 +u32 aarch32_insn_mcr_extract_opc2(u32 insn) +{ + return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET; +} + +#define CRM_MASK 0xf +u32 aarch32_insn_mcr_extract_crm(u32 insn) +{ + return insn & CRM_MASK; +} diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c index 7d37ead4d199..354be2a872ae 100644 --- a/arch/arm64/kernel/io.c +++ b/arch/arm64/kernel/io.c @@ -25,12 +25,26 @@ */ void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) { - unsigned char *t = to; - while (count) { + while (count && (!IS_ALIGNED((unsigned long)from, 8) || + !IS_ALIGNED((unsigned long)to, 8))) { + *(u8 *)to = __raw_readb(from); + from++; + to++; count--; - *t = readb(from); - t++; + } + + while (count >= 8) { + *(u64 *)to = __raw_readq(from); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + *(u8 *)to = __raw_readb(from); from++; + to++; + count--; } } EXPORT_SYMBOL(__memcpy_fromio); @@ -40,12 +54,26 @@ EXPORT_SYMBOL(__memcpy_fromio); */ void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) { - const unsigned char *f = from; - while (count) { + while (count && (!IS_ALIGNED((unsigned long)to, 8) || + !IS_ALIGNED((unsigned long)from, 8))) { + __raw_writeb(*(volatile u8 *)from, to); + from++; + to++; count--; - writeb(*f, to); - f++; + } + + while (count >= 8) { + __raw_writeq(*(volatile u64 *)from, to); + from += 8; + to += 8; + count -= 8; + } + + while (count) { + __raw_writeb(*(volatile u8 *)from, to); + from++; to++; + count--; } } EXPORT_SYMBOL(__memcpy_toio); @@ -55,10 +83,28 @@ EXPORT_SYMBOL(__memcpy_toio); */ void __memset_io(volatile void __iomem *dst, int c, size_t count) { - while (count) { + u64 qc = (u8)c; + + qc |= qc << 8; + qc |= qc << 16; + qc |= qc << 32; + + while (count && !IS_ALIGNED((unsigned long)dst, 8)) { + __raw_writeb(c, dst); + dst++; count--; - writeb(c, dst); + } + + while (count >= 8) { + __raw_writeq(qc, dst); + dst += 8; + count -= 8; + } + + while (count) { + __raw_writeb(c, dst); dst++; + count--; } } EXPORT_SYMBOL(__memset_io); diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 071a6ec13bd8..240b75c0e94f 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -40,6 +40,8 @@ int arch_show_interrupts(struct seq_file *p, int prec) return 0; } +void (*handle_arch_irq)(struct pt_regs *) = NULL; + void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) { if (handle_arch_irq) diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c index 263a166291fb..4f1fec7a46db 100644 --- a/arch/arm64/kernel/jump_label.c +++ b/arch/arm64/kernel/jump_label.c @@ -22,9 +22,8 @@ #ifdef HAVE_JUMP_LABEL -static void __arch_jump_label_transform(struct jump_entry *entry, - enum jump_label_type type, - bool is_static) +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) { void *addr = (void *)entry->code; u32 insn; @@ -37,22 +36,18 @@ static void __arch_jump_label_transform(struct jump_entry *entry, insn = aarch64_insn_gen_nop(); } - if (is_static) - aarch64_insn_patch_text_nosync(addr, insn); - else - aarch64_insn_patch_text(&addr, &insn, 1); -} - -void arch_jump_label_transform(struct jump_entry *entry, - enum jump_label_type type) -{ - __arch_jump_label_transform(entry, type, false); + aarch64_insn_patch_text(&addr, &insn, 1); } void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type) { - __arch_jump_label_transform(entry, type, true); + /* + * We use the architected A64 NOP in arch_static_branch, so there's no + * need to patch an identical A64 NOP over the top of it here. The core + * will call arch_jump_label_transform from a module notifier if the + * NOP needs to be replaced by a branch. + */ } #endif /* HAVE_JUMP_LABEL */ diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 1eb1cc955139..fd027b101de5 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -26,6 +26,7 @@ #include <linux/moduleloader.h> #include <linux/vmalloc.h> #include <asm/insn.h> +#include <asm/sections.h> #define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX #define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16 @@ -394,3 +395,20 @@ overflow: me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); return -ENOEXEC; } + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s, *se; + const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) { + if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) { + apply_alternatives((void *)s->sh_addr, s->sh_size); + return 0; + } + } + + return 0; +} diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index aa29ecb4f800..25a5308744b1 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -169,8 +169,14 @@ armpmu_event_set_period(struct perf_event *event, ret = 1; } - if (left > (s64)armpmu->max_period) - left = armpmu->max_period; + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (armpmu->max_period >> 1)) + left = armpmu->max_period >> 1; local64_set(&hwc->prev_count, (u64)-left); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 8a4ae8e73213..d882b833dbdb 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -27,6 +27,7 @@ #include <linux/smp.h> #include <linux/ptrace.h> #include <linux/user.h> +#include <linux/seccomp.h> #include <linux/security.h> #include <linux/init.h> #include <linux/signal.h> @@ -551,6 +552,32 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, return ret; } +static int system_call_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int syscallno = task_pt_regs(target)->syscallno; + + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, + &syscallno, 0, -1); +} + +static int system_call_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int syscallno, ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1); + if (ret) + return ret; + + task_pt_regs(target)->syscallno = syscallno; + return ret; +} + enum aarch64_regset { REGSET_GPR, REGSET_FPR, @@ -559,6 +586,7 @@ enum aarch64_regset { REGSET_HW_BREAK, REGSET_HW_WATCH, #endif + REGSET_SYSTEM_CALL, }; static const struct user_regset aarch64_regsets[] = { @@ -608,6 +636,14 @@ static const struct user_regset aarch64_regsets[] = { .set = hw_break_set, }, #endif + [REGSET_SYSTEM_CALL] = { + .core_note_type = NT_ARM_SYSTEM_CALL, + .n = 1, + .size = sizeof(int), + .align = sizeof(int), + .get = system_call_get, + .set = system_call_set, + }, }; static const struct user_regset_view user_aarch64_view = { @@ -1114,6 +1150,10 @@ static void tracehook_report_syscall(struct pt_regs *regs, asmlinkage int syscall_trace_enter(struct pt_regs *regs) { + /* Do the secure computing check first; failures should be fast. */ + if (secure_computing() == -1) + return -1; + if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 2437196cc5d4..b80991166754 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -43,12 +43,14 @@ #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/efi.h> +#include <linux/personality.h> #include <asm/fixmap.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/cputable.h> +#include <asm/cpufeature.h> #include <asm/cpu_ops.h> #include <asm/sections.h> #include <asm/setup.h> @@ -72,13 +74,15 @@ EXPORT_SYMBOL_GPL(elf_hwcap); COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ - COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) + COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ + COMPAT_HWCAP_LPAE) unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap2 __read_mostly; #endif +DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); + static const char *cpu_name; -static const char *machine_name; phys_addr_t __fdt_pointer __initdata; /* @@ -116,12 +120,16 @@ void __init early_print(const char *str, ...) void __init smp_setup_processor_id(void) { + u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + cpu_logical_map(0) = mpidr; + /* * clear __my_cpu_offset on boot CPU to avoid hang caused by * using percpu variable early, for example, lockdep will * access percpu variable inside lock_release */ set_my_cpu_offset(0); + pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr); } bool arch_match_cpu_phys_id(int cpu, u64 phys_id) @@ -311,7 +319,7 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) cpu_relax(); } - machine_name = of_flat_dt_get_machine_name(); + dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); } /* @@ -376,6 +384,7 @@ void __init setup_arch(char **cmdline_p) *cmdline_p = boot_command_line; + early_fixmap_init(); early_ioremap_init(); parse_early_param(); @@ -398,7 +407,6 @@ void __init setup_arch(char **cmdline_p) psci_init(); - cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; cpu_read_bootcpu_ops(); #ifdef CONFIG_SMP smp_init_cpus(); @@ -447,14 +455,50 @@ static const char *hwcap_str[] = { NULL }; +#ifdef CONFIG_COMPAT +static const char *compat_hwcap_str[] = { + "swp", + "half", + "thumb", + "26bit", + "fastmult", + "fpa", + "vfp", + "edsp", + "java", + "iwmmxt", + "crunch", + "thumbee", + "neon", + "vfpv3", + "vfpv3d16", + "tls", + "vfpv4", + "idiva", + "idivt", + "vfpd32", + "lpae", + "evtstrm" +}; + +static const char *compat_hwcap2_str[] = { + "aes", + "pmull", + "sha1", + "sha2", + "crc32", + NULL +}; +#endif /* CONFIG_COMPAT */ + static int c_show(struct seq_file *m, void *v) { - int i; - - seq_printf(m, "Processor\t: %s rev %d (%s)\n", - cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); + int i, j; for_each_online_cpu(i) { + struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); + u32 midr = cpuinfo->reg_midr; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with @@ -463,24 +507,38 @@ static int c_show(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif - } - - /* dump out the processor features */ - seq_puts(m, "Features\t: "); - - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, "%s ", hwcap_str[i]); - - seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); - seq_printf(m, "CPU architecture: AArch64\n"); - seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); - seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); - seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); - seq_puts(m, "\n"); - - seq_printf(m, "Hardware\t: %s\n", machine_name); + /* + * Dump out the common processor features in a single line. + * Userspace should read the hwcaps with getauxval(AT_HWCAP) + * rather than attempting to parse this, but there's a body of + * software which does already (at least for 32-bit). + */ + seq_puts(m, "Features\t:"); + if (personality(current->personality) == PER_LINUX32) { +#ifdef CONFIG_COMPAT + for (j = 0; compat_hwcap_str[j]; j++) + if (compat_elf_hwcap & (1 << j)) + seq_printf(m, " %s", compat_hwcap_str[j]); + + for (j = 0; compat_hwcap2_str[j]; j++) + if (compat_elf_hwcap2 & (1 << j)) + seq_printf(m, " %s", compat_hwcap2_str[j]); +#endif /* CONFIG_COMPAT */ + } else { + for (j = 0; hwcap_str[j]; j++) + if (elf_hwcap & (1 << j)) + seq_printf(m, " %s", hwcap_str[j]); + } + seq_puts(m, "\n"); + + seq_printf(m, "CPU implementer\t: 0x%02x\n", + MIDR_IMPLEMENTOR(midr)); + seq_printf(m, "CPU architecture: 8\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr)); + seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr)); + seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr)); + } return 0; } diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 1b9ad02837cf..5a1ba6e80d4e 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -186,6 +186,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); break; + case __SI_SYS: + err |= __put_user((compat_uptr_t)(unsigned long) + from->si_call_addr, &to->si_call_addr); + err |= __put_user(from->si_syscall, &to->si_syscall); + err |= __put_user(from->si_arch, &to->si_arch); + break; default: /* this is just in case for now ... */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index a564b440416a..ede186cdd452 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -147,14 +147,12 @@ cpu_resume_after_mmu: ret ENDPROC(cpu_resume_after_mmu) - .data ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly #ifdef CONFIG_SMP mrs x1, mpidr_el1 - adr x4, mpidr_hash_ptr - ldr x5, [x4] - add x8, x4, x5 // x8 = struct mpidr_hash phys address + adrp x8, mpidr_hash + add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address /* retrieve mpidr_hash members to compute the hash */ ldr x2, [x8, #MPIDR_HASH_MASK] ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] @@ -164,14 +162,15 @@ ENTRY(cpu_resume) #else mov x7, xzr #endif - adr x0, sleep_save_sp + adrp x0, sleep_save_sp + add x0, x0, #:lo12:sleep_save_sp ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] ldr x0, [x0, x7, lsl #3] /* load sp from context */ ldr x2, [x0, #CPU_CTX_SP] - adr x1, sleep_idmap_phys + adrp x1, sleep_idmap_phys /* load physical address of identity map page table in x1 */ - ldr x1, [x1] + ldr x1, [x1, #:lo12:sleep_idmap_phys] mov sp, x2 /* * cpu_do_resume expects x0 to contain context physical address @@ -180,26 +179,3 @@ ENTRY(cpu_resume) bl cpu_do_resume // PC relative jump, MMU off b cpu_resume_mmu // Resume MMU, never returns ENDPROC(cpu_resume) - - .align 3 -mpidr_hash_ptr: - /* - * offset of mpidr_hash symbol from current location - * used to obtain run-time mpidr_hash address with MMU off - */ - .quad mpidr_hash - . -/* - * physical address of identity mapped page tables - */ - .type sleep_idmap_phys, #object -ENTRY(sleep_idmap_phys) - .quad 0 -/* - * struct sleep_save_sp { - * phys_addr_t *save_ptr_stash; - * phys_addr_t save_ptr_stash_phys; - * }; - */ - .type sleep_save_sp, #object -ENTRY(sleep_save_sp) - .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index b06d1d90ee8c..7ae6ee085261 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -37,6 +37,7 @@ #include <linux/of.h> #include <linux/irq_work.h> +#include <asm/alternative.h> #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/cpu.h> @@ -309,6 +310,7 @@ void cpu_die(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); + apply_alternatives_all(); } void __init smp_prepare_boot_cpu(void) diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 13ad4dbb1615..3771b72b6569 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -126,8 +126,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) return ret; } -extern struct sleep_save_sp sleep_save_sp; -extern phys_addr_t sleep_idmap_phys; +struct sleep_save_sp sleep_save_sp; +phys_addr_t sleep_idmap_phys; static int __init cpu_suspend_init(void) { diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index dc47e53e9e28..28c511b06edf 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -28,29 +28,39 @@ #include <asm/cacheflush.h> #include <asm/unistd.h> -static inline void -do_compat_cache_op(unsigned long start, unsigned long end, int flags) +static long +__do_compat_cache_op(unsigned long start, unsigned long end) { - struct mm_struct *mm = current->active_mm; - struct vm_area_struct *vma; + long ret; - if (end < start || flags) - return; + do { + unsigned long chunk = min(PAGE_SIZE, end - start); - down_read(&mm->mmap_sem); - vma = find_vma(mm, start); - if (vma && vma->vm_start < end) { - if (start < vma->vm_start) - start = vma->vm_start; - if (end > vma->vm_end) - end = vma->vm_end; - up_read(&mm->mmap_sem); - __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end)); - return; - } - up_read(&mm->mmap_sem); + if (fatal_signal_pending(current)) + return 0; + + ret = __flush_cache_user_range(start, start + chunk); + if (ret) + return ret; + + cond_resched(); + start += chunk; + } while (start < end); + + return 0; } +static inline long +do_compat_cache_op(unsigned long start, unsigned long end, int flags) +{ + if (end < start || flags) + return -EINVAL; + + if (!access_ok(VERIFY_READ, start, end - start)) + return -EFAULT; + + return __do_compat_cache_op(start, end); +} /* * Handle all unrecognised system calls. */ @@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs) * the specified region). */ case __ARM_NR_compat_cacheflush: - do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); - return 0; + return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); case __ARM_NR_compat_set_tls: current->thread.tp_value = regs->regs[0]; diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index b6ee26b0939a..fcb8f7b42271 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -255,12 +255,15 @@ void store_cpu_topology(unsigned int cpuid) /* Multiprocessor system : Multi-threads per core */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2) | + MPIDR_AFFINITY_LEVEL(mpidr, 3) << 8; } else { /* Multiprocessor system : Single-thread per core */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1) | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 3) << 16; } pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", diff --git a/arch/arm64/kernel/trace-events-emulation.h b/arch/arm64/kernel/trace-events-emulation.h new file mode 100644 index 000000000000..ae1dd598ea65 --- /dev/null +++ b/arch/arm64/kernel/trace-events-emulation.h @@ -0,0 +1,35 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM emulation + +#if !defined(_TRACE_EMULATION_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EMULATION_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(instruction_emulation, + + TP_PROTO(const char *instr, u64 addr), + TP_ARGS(instr, addr), + + TP_STRUCT__entry( + __string(instr, instr) + __field(u64, addr) + ), + + TP_fast_assign( + __assign_str(instr, instr); + __entry->addr = addr; + ), + + TP_printk("instr=\"%s\" addr=0x%llx", __get_str(instr), __entry->addr) +); + +#endif /* _TRACE_EMULATION_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_PATH . + +#define TRACE_INCLUDE_FILE trace-events-emulation +#include <trace/define_trace.h> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index de1b085e7963..0a801e3743d5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -259,6 +259,69 @@ void arm64_notify_die(const char *str, struct pt_regs *regs, } } +static LIST_HEAD(undef_hook); +static DEFINE_RAW_SPINLOCK(undef_lock); + +void register_undef_hook(struct undef_hook *hook) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&undef_lock, flags); + list_add(&hook->node, &undef_hook); + raw_spin_unlock_irqrestore(&undef_lock, flags); +} + +void unregister_undef_hook(struct undef_hook *hook) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&undef_lock, flags); + list_del(&hook->node); + raw_spin_unlock_irqrestore(&undef_lock, flags); +} + +static int call_undef_hook(struct pt_regs *regs) +{ + struct undef_hook *hook; + unsigned long flags; + u32 instr; + int (*fn)(struct pt_regs *regs, u32 instr) = NULL; + void __user *pc = (void __user *)instruction_pointer(regs); + + if (!user_mode(regs)) + return 1; + + if (compat_thumb_mode(regs)) { + /* 16-bit Thumb instruction */ + if (get_user(instr, (u16 __user *)pc)) + goto exit; + instr = le16_to_cpu(instr); + if (aarch32_insn_is_wide(instr)) { + u32 instr2; + + if (get_user(instr2, (u16 __user *)(pc + 2))) + goto exit; + instr2 = le16_to_cpu(instr2); + instr = (instr << 16) | instr2; + } + } else { + /* 32-bit ARM instruction */ + if (get_user(instr, (u32 __user *)pc)) + goto exit; + instr = le32_to_cpu(instr); + } + + raw_spin_lock_irqsave(&undef_lock, flags); + list_for_each_entry(hook, &undef_hook, node) + if ((instr & hook->instr_mask) == hook->instr_val && + (regs->pstate & hook->pstate_mask) == hook->pstate_val) + fn = hook->fn; + + raw_spin_unlock_irqrestore(&undef_lock, flags); +exit: + return fn ? fn(regs, instr) : 1; +} + asmlinkage void __exception do_undefinstr(struct pt_regs *regs) { siginfo_t info; @@ -268,6 +331,9 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) if (!aarch32_break_handler(regs)) return; + if (call_undef_hook(regs) == 0) + return; + if (show_unhandled_signals && unhandled_signal(current, SIGILL) && printk_ratelimit()) { pr_info("%s[%d]: undefined instruction: pc=%p\n", diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index edf8715ba39b..9965ec87cbec 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -11,8 +11,9 @@ #include "image.h" -#define ARM_EXIT_KEEP(x) -#define ARM_EXIT_DISCARD(x) x +/* .exit.text needed in case of alternative patching */ +#define ARM_EXIT_KEEP(x) x +#define ARM_EXIT_DISCARD(x) OUTPUT_ARCH(aarch64) ENTRY(_text) @@ -32,6 +33,22 @@ jiffies = jiffies_64; *(.hyp.text) \ VMLINUX_SYMBOL(__hyp_text_end) = .; +/* + * The size of the PE/COFF section that covers the kernel image, which + * runs from stext to _edata, must be a round multiple of the PE/COFF + * FileAlignment, which we set to its minimum value of 0x200. 'stext' + * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned + * boundary should be sufficient. + */ +PECOFF_FILE_ALIGNMENT = 0x200; + +#ifdef CONFIG_EFI +#define PECOFF_EDATA_PADDING \ + .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); } +#else +#define PECOFF_EDATA_PADDING +#endif + SECTIONS { /* @@ -100,9 +117,21 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; + . = ALIGN(4); + .altinstructions : { + __alt_instructions = .; + *(.altinstructions) + __alt_instructions_end = .; + } + .altinstr_replacement : { + *(.altinstr_replacement) + } + + . = ALIGN(PAGE_SIZE); _data = .; _sdata = .; RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) + PECOFF_EDATA_PADDING _edata = .; BSS_SECTION(0, 0, 0) |