summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/kernel')
-rw-r--r--arch/loongarch/kernel/Makefile9
-rw-r--r--arch/loongarch/kernel/entry.S91
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c64
-rw-r--r--arch/loongarch/kernel/genex.S8
-rw-r--r--arch/loongarch/kernel/head.S33
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c548
-rw-r--r--arch/loongarch/kernel/inst.c123
-rw-r--r--arch/loongarch/kernel/kprobes.c406
-rw-r--r--arch/loongarch/kernel/kprobes_trampoline.S96
-rw-r--r--arch/loongarch/kernel/process.c7
-rw-r--r--arch/loongarch/kernel/ptrace.c472
-rw-r--r--arch/loongarch/kernel/relocate.c242
-rw-r--r--arch/loongarch/kernel/setup.c14
-rw-r--r--arch/loongarch/kernel/time.c11
-rw-r--r--arch/loongarch/kernel/traps.c68
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S20
16 files changed, 2134 insertions, 78 deletions
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index c8cfbd562921..78d4e3384305 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -8,13 +8,15 @@ extra-y := vmlinux.lds
obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
- alternative.o unaligned.o unwind.o
+ alternative.o unwind.o
obj-$(CONFIG_ACPI) += acpi.o
obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
+obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
+
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
obj-y += mcount.o ftrace.o
@@ -39,6 +41,8 @@ obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
@@ -46,5 +50,8 @@ obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes_trampoline.o
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index d53b631c9022..d737e3cf42d3 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -19,70 +19,71 @@
.cfi_sections .debug_frame
.align 5
SYM_FUNC_START(handle_syscall)
- csrrd t0, PERCPU_BASE_KS
- la.abs t1, kernelsp
- add.d t1, t1, t0
- move t2, sp
- ld.d sp, t1, 0
+ csrrd t0, PERCPU_BASE_KS
+ la.pcrel t1, kernelsp
+ add.d t1, t1, t0
+ move t2, sp
+ ld.d sp, t1, 0
- addi.d sp, sp, -PT_SIZE
- cfi_st t2, PT_R3
+ addi.d sp, sp, -PT_SIZE
+ cfi_st t2, PT_R3
cfi_rel_offset sp, PT_R3
- st.d zero, sp, PT_R0
- csrrd t2, LOONGARCH_CSR_PRMD
- st.d t2, sp, PT_PRMD
- csrrd t2, LOONGARCH_CSR_CRMD
- st.d t2, sp, PT_CRMD
- csrrd t2, LOONGARCH_CSR_EUEN
- st.d t2, sp, PT_EUEN
- csrrd t2, LOONGARCH_CSR_ECFG
- st.d t2, sp, PT_ECFG
- csrrd t2, LOONGARCH_CSR_ESTAT
- st.d t2, sp, PT_ESTAT
- cfi_st ra, PT_R1
- cfi_st a0, PT_R4
- cfi_st a1, PT_R5
- cfi_st a2, PT_R6
- cfi_st a3, PT_R7
- cfi_st a4, PT_R8
- cfi_st a5, PT_R9
- cfi_st a6, PT_R10
- cfi_st a7, PT_R11
- csrrd ra, LOONGARCH_CSR_ERA
- st.d ra, sp, PT_ERA
+ st.d zero, sp, PT_R0
+ csrrd t2, LOONGARCH_CSR_PRMD
+ st.d t2, sp, PT_PRMD
+ csrrd t2, LOONGARCH_CSR_CRMD
+ st.d t2, sp, PT_CRMD
+ csrrd t2, LOONGARCH_CSR_EUEN
+ st.d t2, sp, PT_EUEN
+ csrrd t2, LOONGARCH_CSR_ECFG
+ st.d t2, sp, PT_ECFG
+ csrrd t2, LOONGARCH_CSR_ESTAT
+ st.d t2, sp, PT_ESTAT
+ cfi_st ra, PT_R1
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ csrrd ra, LOONGARCH_CSR_ERA
+ st.d ra, sp, PT_ERA
cfi_rel_offset ra, PT_ERA
- cfi_st tp, PT_R2
- cfi_st u0, PT_R21
- cfi_st fp, PT_R22
+ cfi_st tp, PT_R2
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
SAVE_STATIC
- move u0, t0
- li.d tp, ~_THREAD_MASK
- and tp, tp, sp
+ move u0, t0
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
- move a0, sp
- bl do_syscall
+ move a0, sp
+ bl do_syscall
RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_syscall)
+_ASM_NOKPROBE(handle_syscall)
SYM_CODE_START(ret_from_fork)
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, sp
- bl syscall_exit_to_user_mode
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, sp
+ bl syscall_exit_to_user_mode
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
SYM_CODE_END(ret_from_fork)
SYM_CODE_START(ret_from_kernel_thread)
- bl schedule_tail # a0 = struct task_struct *prev
- move a0, s1
- jirl ra, s0, 0
- move a0, sp
- bl syscall_exit_to_user_mode
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jirl ra, s0, 0
+ move a0, sp
+ bl syscall_exit_to_user_mode
RESTORE_STATIC
RESTORE_SOME
RESTORE_SP_AND_RET
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index 0f07591cab30..4a3ef8516ccc 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -6,6 +6,7 @@
*/
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <asm/inst.h>
@@ -271,3 +272,66 @@ int ftrace_disable_ftrace_graph_caller(void)
}
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ int bit;
+ struct pt_regs *regs;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+
+ regs = ftrace_get_regs(fregs);
+ if (!regs)
+ goto out;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ instruction_pointer_set(regs, ip);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->csr_era)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+
+ /*
+ * If pre_handler returns !0, it changes regs->csr_era. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ return 0;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
index 7e5c293ed89f..44ff1ff64260 100644
--- a/arch/loongarch/kernel/genex.S
+++ b/arch/loongarch/kernel/genex.S
@@ -34,7 +34,7 @@ SYM_FUNC_END(__arch_cpu_idle)
SYM_FUNC_START(handle_vint)
BACKUP_T0T1
SAVE_ALL
- la.abs t1, __arch_cpu_idle
+ la_abs t1, __arch_cpu_idle
LONG_L t0, sp, PT_ERA
/* 32 byte rollback region */
ori t0, t0, 0x1f
@@ -43,7 +43,7 @@ SYM_FUNC_START(handle_vint)
LONG_S t0, sp, PT_ERA
1: move a0, sp
move a1, sp
- la.abs t0, do_vint
+ la_abs t0, do_vint
jirl ra, t0, 0
RESTORE_ALL_AND_RET
SYM_FUNC_END(handle_vint)
@@ -72,7 +72,7 @@ SYM_FUNC_END(except_vec_cex)
SAVE_ALL
build_prep_\prep
move a0, sp
- la.abs t0, do_\handler
+ la_abs t0, do_\handler
jirl ra, t0, 0
668:
RESTORE_ALL_AND_RET
@@ -93,6 +93,6 @@ SYM_FUNC_END(except_vec_cex)
BUILD_HANDLER reserved reserved none /* others */
SYM_FUNC_START(handle_sys)
- la.abs t0, handle_syscall
+ la_abs t0, handle_syscall
jr t0
SYM_FUNC_END(handle_sys)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 57bada6b4e93..aa64b179744f 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -24,7 +24,7 @@ _head:
.org 0x8
.dword kernel_entry /* Kernel entry point */
.dword _end - _text /* Kernel image effective size */
- .quad 0 /* Kernel image load offset from start of RAM */
+ .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */
.long LINUX_PE_MAGIC
.long pe_header - _head /* Offset to the PE header */
@@ -50,11 +50,8 @@ SYM_CODE_START(kernel_entry) # kernel entry point
li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
csrwr t0, LOONGARCH_CSR_DMWIN1
- /* We might not get launched at the address the kernel is linked to,
- so we jump there. */
- la.abs t0, 0f
- jr t0
-0:
+ JUMP_VIRT_ADDR t0, t1
+
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
@@ -89,6 +86,23 @@ SYM_CODE_START(kernel_entry) # kernel entry point
PTR_ADD sp, sp, tp
set_saved_sp sp, t0, t1
+#ifdef CONFIG_RELOCATABLE
+
+ bl relocate_kernel
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ /* Repoint the sp into the new kernel */
+ PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+#endif
+
+ /* relocate_kernel() returns the new kernel entry point */
+ jr a0
+ ASM_BUG()
+
+#endif
+
bl start_kernel
ASM_BUG()
@@ -106,9 +120,8 @@ SYM_CODE_START(smpboot_entry)
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
- la.abs t0, 0f
- jr t0
-0:
+ JUMP_VIRT_ADDR t0, t1
+
/* Enable PG */
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
@@ -117,7 +130,7 @@ SYM_CODE_START(smpboot_entry)
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
- la.abs t0, cpuboot_data
+ la.pcrel t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..2406c95b34cc
--- /dev/null
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+#define pr_fmt(fmt) "hw-breakpoint: " fmt
+
+#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+
+#include <asm/hw_breakpoint.h>
+
+/* Breakpoint currently in use for each BRP. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[LOONGARCH_MAX_BRP]);
+
+/* Watchpoint currently in use for each WRP. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[LOONGARCH_MAX_WRP]);
+
+int hw_breakpoint_slots(int type)
+{
+ /*
+ * We can be called early, so don't rely on
+ * our static variables being initialised.
+ */
+ switch (type) {
+ case TYPE_INST:
+ return get_num_brps();
+ case TYPE_DATA:
+ return get_num_wrps();
+ default:
+ pr_warn("unknown slot type: %d\n", type);
+ return 0;
+ }
+}
+
+#define READ_WB_REG_CASE(OFF, N, REG, T, VAL) \
+ case (OFF + N): \
+ LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL); \
+ break
+
+#define WRITE_WB_REG_CASE(OFF, N, REG, T, VAL) \
+ case (OFF + N): \
+ LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL); \
+ break
+
+#define GEN_READ_WB_REG_CASES(OFF, REG, T, VAL) \
+ READ_WB_REG_CASE(OFF, 0, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 1, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 2, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 3, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
+
+#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
+ WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 1, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 2, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 3, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
+
+static u64 read_wb_reg(int reg, int n, int t)
+{
+ u64 val = 0;
+
+ switch (reg + n) {
+ GEN_READ_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
+ default:
+ pr_warn("Attempt to read from unknown breakpoint register %d\n", n);
+ }
+
+ return val;
+}
+NOKPROBE_SYMBOL(read_wb_reg);
+
+static void write_wb_reg(int reg, int n, int t, u64 val)
+{
+ switch (reg + n) {
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
+ default:
+ pr_warn("Attempt to write to unknown breakpoint register %d\n", n);
+ }
+}
+NOKPROBE_SYMBOL(write_wb_reg);
+
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+};
+
+/*
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
+ */
+
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp, enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+void ptrace_hw_copy_thread(struct task_struct *tsk)
+{
+ memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break));
+ memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch));
+}
+
+/*
+ * Unregister breakpoints from this task and reset the pointers in the thread_struct.
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (t->hbp_break[i]) {
+ unregister_hw_breakpoint(t->hbp_break[i]);
+ t->hbp_break[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < LOONGARCH_MAX_WRP; i++) {
+ if (t->hbp_watch[i]) {
+ unregister_hw_breakpoint(t->hbp_watch[i]);
+ t->hbp_watch[i] = NULL;
+ }
+ }
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ u32 ctrl;
+ int i, max_slots, enable;
+ struct perf_event **slots;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = boot_cpu_data.watch_ireg_count;
+ } else {
+ /* Watchpoint */
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = boot_cpu_data.watch_dreg_count;
+ }
+
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
+
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
+
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /* Set the FWPnCFG/MWPnCFG 1~4 register. */
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ } else {
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE |
+ 1 << MWPnCFG3_LoadEn | 1 << MWPnCFG3_StoreEn);
+ }
+ enable = csr_read64(LOONGARCH_CSR_CRMD);
+ csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the FWPnCFG/MWPnCFG 1~4 register. */
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
+}
+
+static int get_hbp_len(u8 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case LOONGARCH_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+
+ return len_in_bytes;
+}
+
+/*
+ * Check whether bp virtual address is in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+ unsigned int len;
+ unsigned long va;
+
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
+ * Hopefully this will disappear when ptrace can bypass the conversion
+ * to generic breakpoint descriptions.
+ */
+int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ int *gen_len, int *gen_type, int *offset)
+{
+ /* Type */
+ switch (ctrl.type) {
+ case LOONGARCH_BREAKPOINT_EXECUTE:
+ *gen_type = HW_BREAKPOINT_X;
+ break;
+ case LOONGARCH_BREAKPOINT_LOAD:
+ *gen_type = HW_BREAKPOINT_R;
+ break;
+ case LOONGARCH_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ctrl.len)
+ return -EINVAL;
+
+ *offset = __ffs(ctrl.len);
+
+ /* Len */
+ switch (ctrl.len) {
+ case LOONGARCH_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ /* Type */
+ switch (attr->bp_type) {
+ case HW_BREAKPOINT_X:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ break;
+ case HW_BREAKPOINT_R:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Len */
+ switch (attr->bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Address */
+ hw->address = attr->bp_addr;
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ int ret;
+ u64 alignment_mask, offset;
+
+ /* Build the arch_hw_breakpoint. */
+ ret = arch_build_bp_info(bp, attr, hw);
+ if (ret)
+ return ret;
+
+ if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
+ alignment_mask = 0x7;
+ offset = hw->address & alignment_mask;
+
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
+
+ return 0;
+}
+
+static void update_bp_registers(struct pt_regs *regs, int enable, int type)
+{
+ u32 ctrl;
+ int i, max_slots;
+ struct perf_event **slots;
+ struct arch_hw_breakpoint *info;
+
+ switch (type) {
+ case 0:
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = boot_cpu_data.watch_ireg_count;
+ break;
+ case 1:
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = boot_cpu_data.watch_dreg_count;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < max_slots; ++i) {
+ if (!slots[i])
+ continue;
+
+ info = counter_arch_bp(slots[i]);
+ if (enable) {
+ if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ } else {
+ ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
+ ctrl |= 0x1 << MWPnCFG3_LoadEn;
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
+ ctrl |= 0x1 << MWPnCFG3_StoreEn;
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
+ }
+ regs->csr_prmd |= CSR_PRMD_PWE;
+ } else {
+ if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ } else {
+ ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
+ ctrl &= ~0x1 << MWPnCFG3_LoadEn;
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
+ ctrl &= ~0x1 << MWPnCFG3_StoreEn;
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
+ }
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ }
+ }
+}
+NOKPROBE_SYMBOL(update_bp_registers);
+
+/*
+ * Debug exception handlers.
+ */
+void breakpoint_handler(struct pt_regs *regs)
+{
+ int i;
+ struct perf_event *bp, **slots;
+
+ slots = this_cpu_ptr(bp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ }
+ update_bp_registers(regs, 0, 0);
+}
+NOKPROBE_SYMBOL(breakpoint_handler);
+
+void watchpoint_handler(struct pt_regs *regs)
+{
+ int i;
+ struct perf_event *wp, **slots;
+
+ slots = this_cpu_ptr(wp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ }
+ update_bp_registers(regs, 0, 1);
+}
+NOKPROBE_SYMBOL(watchpoint_handler);
+
+static int __init arch_hw_breakpoint_init(void)
+{
+ int cpu;
+
+ boot_cpu_data.watch_ireg_count = get_num_brps();
+ boot_cpu_data.watch_dreg_count = get_num_wrps();
+
+ pr_info("Found %d breakpoint and %d watchpoint registers.\n",
+ boot_cpu_data.watch_ireg_count, boot_cpu_data.watch_dreg_count);
+
+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
+ cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count;
+ cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count;
+ }
+
+ return 0;
+}
+arch_initcall(arch_hw_breakpoint_init);
+
+void hw_breakpoint_thread_switch(struct task_struct *next)
+{
+ u64 addr, mask;
+ struct pt_regs *regs = task_pt_regs(next);
+
+ if (test_tsk_thread_flag(next, TIF_SINGLESTEP)) {
+ addr = read_wb_reg(CSR_CFG_ADDR, 0, 0);
+ mask = read_wb_reg(CSR_CFG_MASK, 0, 0);
+ if (!((regs->csr_era ^ addr) & ~mask))
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ regs->csr_prmd |= CSR_PRMD_PWE;
+ } else {
+ /* Update breakpoints */
+ update_bp_registers(regs, 1, 0);
+ /* Update watchpoints */
+ update_bp_registers(regs, 1, 1);
+ }
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+/*
+ * Dummy function to register with die_notifier.
+ */
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index badc59087042..258ef267cd30 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -10,6 +10,129 @@
static DEFINE_RAW_SPINLOCK(patch_lock);
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned long pc = regs->csr_era;
+ unsigned int rd = insn.reg1i20_format.rd;
+ unsigned int imm = insn.reg1i20_format.immediate;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ switch (insn.reg1i20_format.opcode) {
+ case pcaddi_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 2, 21);
+ break;
+ case pcaddu12i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
+ break;
+ case pcaddu18i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 18, 37);
+ break;
+ case pcalau12i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
+ regs->regs[rd] &= ~((1 << 12) - 1);
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+}
+
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned int imm, imm_l, imm_h, rd, rj;
+ unsigned long pc = regs->csr_era;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ imm_l = insn.reg0i26_format.immediate_l;
+ imm_h = insn.reg0i26_format.immediate_h;
+ switch (insn.reg0i26_format.opcode) {
+ case b_op:
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
+ return;
+ case bl_op:
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm_l = insn.reg1i21_format.immediate_l;
+ imm_h = insn.reg1i21_format.immediate_h;
+ rj = insn.reg1i21_format.rj;
+ switch (insn.reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm = insn.reg2i16_format.immediate;
+ rj = insn.reg2i16_format.rj;
+ rd = insn.reg2i16_format.rd;
+ switch (insn.reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case jirl_op:
+ regs->csr_era = regs->regs[rj] + sign_extend64(imm << 2, 17);
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+}
+
int larch_insn_read(void *addr, u32 *insnp)
{
int ret;
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
new file mode 100644
index 000000000000..56c8c4b09a42
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <asm/break.h>
+
+static const union loongarch_instruction breakpoint_insn = {
+ .reg0i15_format = {
+ .opcode = break_op,
+ .immediate = BRK_KPROBE_BP,
+ }
+};
+
+static const union loongarch_instruction singlestep_insn = {
+ .reg0i15_format = {
+ .opcode = break_op,
+ .immediate = BRK_KPROBE_SSTEPBP,
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static bool insns_not_supported(union loongarch_instruction insn)
+{
+ switch (insn.reg2i14_format.opcode) {
+ case llw_op:
+ case lld_op:
+ case scw_op:
+ case scd_op:
+ pr_notice("kprobe: ll and sc instructions are not supported\n");
+ return true;
+ }
+
+ switch (insn.reg1i21_format.opcode) {
+ case bceqz_op:
+ pr_notice("kprobe: bceqz and bcnez instructions are not supported\n");
+ return true;
+ }
+
+ return false;
+}
+NOKPROBE_SYMBOL(insns_not_supported);
+
+static bool insns_need_simulation(struct kprobe *p)
+{
+ if (is_pc_ins(&p->opcode))
+ return true;
+
+ if (is_branch_ins(&p->opcode))
+ return true;
+
+ return false;
+}
+NOKPROBE_SYMBOL(insns_need_simulation);
+
+static void arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ if (is_pc_ins(&p->opcode))
+ simu_pc(regs, p->opcode);
+ else if (is_branch_ins(&p->opcode))
+ simu_branch(regs, p->opcode);
+}
+NOKPROBE_SYMBOL(arch_simulate_insn);
+
+static void arch_prepare_ss_slot(struct kprobe *p)
+{
+ p->ainsn.insn[0] = *p->addr;
+ p->ainsn.insn[1] = singlestep_insn;
+ p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE;
+}
+NOKPROBE_SYMBOL(arch_prepare_ss_slot);
+
+static void arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.restore = 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_simulate);
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ if ((unsigned long)p->addr & 0x3)
+ return -EILSEQ;
+
+ /* copy instruction */
+ p->opcode = *p->addr;
+
+ /* decode instruction */
+ if (insns_not_supported(p->opcode))
+ return -EINVAL;
+
+ if (insns_need_simulation(p)) {
+ p->ainsn.insn = NULL;
+ } else {
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
+
+/* Install breakpoint in text */
+void arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+/* Remove breakpoint from text */
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+NOKPROBE_SYMBOL(save_previous_kprobe);
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+NOKPROBE_SYMBOL(restore_previous_kprobe);
+
+static void set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+NOKPROBE_SYMBOL(set_current_kprobe);
+
+/*
+ * Interrupts need to be disabled before single-step mode is set,
+ * and not reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start
+ * of out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_status = regs->csr_prmd;
+ regs->csr_prmd &= ~CSR_PRMD_PIE;
+}
+NOKPROBE_SYMBOL(save_local_irqflag);
+
+static void restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->csr_prmd = kcb->saved_status;
+}
+NOKPROBE_SYMBOL(restore_local_irqflag);
+
+static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.restore);
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ return;
+ }
+
+ /*
+ * update the kcb status even if the cur->post_handler is
+ * not set because reset_curent_kprobe() doesn't update kcb.
+ */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)
+ cur->post_handler(cur, regs, 0);
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+}
+NOKPROBE_SYMBOL(post_kprobe_handler);
+
+static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* IRQs and single stepping do not mix well */
+ save_local_irqflag(kcb, regs);
+ /* set ip register to prepare for single stepping */
+ regs->csr_era = (unsigned long)p->ainsn.insn;
+ } else {
+ /* simulate single steping */
+ arch_simulate_insn(p, regs);
+ /* now go for post processing */
+ post_kprobe_handler(p, kcb, regs);
+ }
+}
+NOKPROBE_SYMBOL(setup_singlestep);
+
+static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_REENTER:
+ pr_warn("Failed to recover from reentered kprobes.\n");
+ dump_kprobe(p);
+ WARN_ON_ONCE(1);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ return true;
+}
+NOKPROBE_SYMBOL(reenter_kprobe);
+
+bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb;
+ struct kprobe *p, *cur_kprobe;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing.
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe(addr);
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return true;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return true;
+ }
+ }
+
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ regs->csr_era = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return true;
+ }
+
+ preempt_enable_no_resched();
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
+
+bool kprobe_singlestep_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long addr = instruction_pointer(regs);
+
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.insn[1] == addr)) {
+ restore_local_irqflag(kcb, regs);
+ post_kprobe_handler(cur, kcb, regs);
+ return true;
+ }
+
+ preempt_enable_no_resched();
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_singlestep_handler);
+
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->csr_era = (unsigned long)cur->addr;
+ WARN_ON_ONCE(!instruction_pointer(regs));
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ restore_local_irqflag(kcb, regs);
+ reset_current_kprobe();
+ }
+ preempt_enable_no_resched();
+ break;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
+
+/* ASM function that handles the kretprobes must not be probed */
+NOKPROBE_SYMBOL(__kretprobe_trampoline);
+
+/* Called from __kretprobe_trampoline */
+void __used *trampoline_probe_handler(struct pt_regs *regs)
+{
+ return (void *)kretprobe_trampoline_handler(regs, NULL);
+}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
+
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->regs[1];
+ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[1] = (unsigned long)&__kretprobe_trampoline;
+}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
+
+int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/loongarch/kernel/kprobes_trampoline.S b/arch/loongarch/kernel/kprobes_trampoline.S
new file mode 100644
index 000000000000..af94b0d213fa
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes_trampoline.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/linkage.h>
+#include <asm/stackframe.h>
+
+ .text
+
+ .macro save_all_base_regs
+ cfi_st ra, PT_R1
+ cfi_st tp, PT_R2
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ cfi_st t0, PT_R12
+ cfi_st t1, PT_R13
+ cfi_st t2, PT_R14
+ cfi_st t3, PT_R15
+ cfi_st t4, PT_R16
+ cfi_st t5, PT_R17
+ cfi_st t6, PT_R18
+ cfi_st t7, PT_R19
+ cfi_st t8, PT_R20
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+ cfi_st s0, PT_R23
+ cfi_st s1, PT_R24
+ cfi_st s2, PT_R25
+ cfi_st s3, PT_R26
+ cfi_st s4, PT_R27
+ cfi_st s5, PT_R28
+ cfi_st s6, PT_R29
+ cfi_st s7, PT_R30
+ cfi_st s8, PT_R31
+ csrrd t0, LOONGARCH_CSR_CRMD
+ andi t0, t0, 0x7 /* extract bit[1:0] PLV, bit[2] IE */
+ LONG_S t0, sp, PT_CRMD
+ .endm
+
+ .macro restore_all_base_regs
+ cfi_ld tp, PT_R2
+ cfi_ld a0, PT_R4
+ cfi_ld a1, PT_R5
+ cfi_ld a2, PT_R6
+ cfi_ld a3, PT_R7
+ cfi_ld a4, PT_R8
+ cfi_ld a5, PT_R9
+ cfi_ld a6, PT_R10
+ cfi_ld a7, PT_R11
+ cfi_ld t0, PT_R12
+ cfi_ld t1, PT_R13
+ cfi_ld t2, PT_R14
+ cfi_ld t3, PT_R15
+ cfi_ld t4, PT_R16
+ cfi_ld t5, PT_R17
+ cfi_ld t6, PT_R18
+ cfi_ld t7, PT_R19
+ cfi_ld t8, PT_R20
+ cfi_ld u0, PT_R21
+ cfi_ld fp, PT_R22
+ cfi_ld s0, PT_R23
+ cfi_ld s1, PT_R24
+ cfi_ld s2, PT_R25
+ cfi_ld s3, PT_R26
+ cfi_ld s4, PT_R27
+ cfi_ld s5, PT_R28
+ cfi_ld s6, PT_R29
+ cfi_ld s7, PT_R30
+ cfi_ld s8, PT_R31
+ LONG_L t0, sp, PT_CRMD
+ li.d t1, 0x7 /* mask bit[1:0] PLV, bit[2] IE */
+ csrxchg t0, t1, LOONGARCH_CSR_CRMD
+ .endm
+
+SYM_CODE_START(__kretprobe_trampoline)
+ addi.d sp, sp, -PT_SIZE
+ save_all_base_regs
+
+ addi.d t0, sp, PT_SIZE
+ LONG_S t0, sp, PT_R3
+
+ move a0, sp /* pt_regs */
+
+ bl trampoline_probe_handler
+
+ /* use the result as the return-address */
+ move ra, a0
+
+ restore_all_base_regs
+ addi.d sp, sp, PT_SIZE
+
+ jr ra
+SYM_CODE_END(__kretprobe_trampoline)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index edfd220a3737..fa2443c7afb2 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -18,6 +18,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
+#include <linux/hw_breakpoint.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
@@ -96,6 +97,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
regs->regs[3] = sp;
}
+void flush_thread(void)
+{
+ flush_ptrace_hw_breakpoint(current);
+}
+
void exit_thread(struct task_struct *tsk)
{
}
@@ -181,6 +187,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
childregs->regs[2] = tls;
out:
+ ptrace_hw_copy_thread(p);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDSIMD);
clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index dc2b82ea894c..06bceae7d104 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -20,7 +20,9 @@
#include <linux/context_tracking.h>
#include <linux/elf.h>
#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
#include <linux/mm.h>
+#include <linux/nospec.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/sched.h>
@@ -29,6 +31,7 @@
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/seccomp.h>
+#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
@@ -39,6 +42,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
+#include <asm/ptrace.h>
#include <asm/reg.h>
#include <asm/syscall.h>
@@ -246,6 +250,384 @@ static int cfg_set(struct task_struct *target,
return 0;
}
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+/*
+ * Handle hitting a HW-breakpoint.
+ */
+static void ptrace_hbptriggered(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int i;
+ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
+ if (current->thread.hbp_break[i] == bp)
+ break;
+
+ for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
+ if (current->thread.hbp_watch[i] == bp)
+ break;
+
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
+}
+
+static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if (idx >= LOONGARCH_MAX_BRP)
+ return ERR_PTR(-EINVAL);
+ idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
+ bp = tsk->thread.hbp_break[idx];
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if (idx >= LOONGARCH_MAX_WRP)
+ return ERR_PTR(-EINVAL);
+ idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
+ bp = tsk->thread.hbp_watch[idx];
+ break;
+ }
+
+ return bp;
+}
+
+static int ptrace_hbp_set_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ struct perf_event *bp)
+{
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if (idx >= LOONGARCH_MAX_BRP)
+ return -EINVAL;
+ idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
+ tsk->thread.hbp_break[idx] = bp;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if (idx >= LOONGARCH_MAX_WRP)
+ return -EINVAL;
+ idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
+ tsk->thread.hbp_watch[idx] = bp;
+ break;
+ }
+
+ return 0;
+}
+
+static struct perf_event *ptrace_hbp_create(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ int err, type;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ type = HW_BREAKPOINT_X;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ptrace_breakpoint_init(&attr);
+
+ /*
+ * Initialise fields to sane defaults
+ * (i.e. values that will pass validation).
+ */
+ attr.bp_addr = 0;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = type;
+ attr.disabled = 1;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
+ if (IS_ERR(bp))
+ return bp;
+
+ err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
+ if (err)
+ return ERR_PTR(err);
+
+ return bp;
+}
+
+static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ struct arch_hw_breakpoint_ctrl ctrl,
+ struct perf_event_attr *attr)
+{
+ int err, len, type, offset;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
+ return -EINVAL;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ attr->bp_len = len;
+ attr->bp_type = type;
+ attr->bp_addr += offset;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_resource_info(unsigned int note_type, u16 *info)
+{
+ u8 num;
+ u16 reg = 0;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ num = hw_breakpoint_slots(TYPE_INST);
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ num = hw_breakpoint_slots(TYPE_DATA);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *info = reg | num;
+
+ return 0;
+}
+
+static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (!bp)
+ bp = ptrace_hbp_create(note_type, tsk, idx);
+
+ return bp;
+}
+
+static int ptrace_hbp_get_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u32 *ctrl)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_mask(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 *mask)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *mask = bp ? counter_arch_bp(bp)->mask : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 *addr)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_set_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u32 uctrl)
+{
+ int err;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ decode_ctrl_reg(uctrl, &ctrl);
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int ptrace_hbp_set_mask(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 mask)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint *info;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ info = counter_arch_bp(bp);
+ info->mask = mask;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int ptrace_hbp_set_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 addr)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+#define PTRACE_HBP_CTRL_SZ sizeof(u32)
+#define PTRACE_HBP_ADDR_SZ sizeof(u64)
+#define PTRACE_HBP_MASK_SZ sizeof(u64)
+
+static int hw_break_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u16 info;
+ u32 ctrl;
+ u64 addr, mask;
+ int ret, idx = 0;
+ unsigned int note_type = regset->core_note_type;
+
+ /* Resource info */
+ ret = ptrace_hbp_get_resource_info(note_type, &info);
+ if (ret)
+ return ret;
+
+ membuf_write(&to, &info, sizeof(info));
+
+ /* (address, ctrl) registers */
+ while (to.left) {
+ ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
+ if (ret)
+ return ret;
+
+ membuf_store(&to, addr);
+ membuf_store(&to, mask);
+ membuf_store(&to, ctrl);
+ idx++;
+ }
+
+ return 0;
+}
+
+static int hw_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u32 ctrl;
+ u64 addr, mask;
+ int ret, idx = 0, offset, limit;
+ unsigned int note_type = regset->core_note_type;
+
+ /* Resource info */
+ offset = offsetof(struct user_watch_state, dbg_regs);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
+
+ /* (address, ctrl) registers */
+ limit = regset->n * regset->size;
+ while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ offset, offset + PTRACE_HBP_ADDR_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_ADDR_SZ;
+
+ if (!count)
+ break;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
+ offset, offset + PTRACE_HBP_ADDR_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_MASK_SZ;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
+ offset, offset + PTRACE_HBP_MASK_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_CTRL_SZ;
+ idx++;
+ }
+
+ return 0;
+}
+
+#endif
+
struct pt_regs_offset {
const char *name;
int offset;
@@ -319,6 +701,10 @@ enum loongarch_regset {
REGSET_GPR,
REGSET_FPR,
REGSET_CPUCFG,
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ REGSET_HW_BREAK,
+ REGSET_HW_WATCH,
+#endif
};
static const struct user_regset loongarch64_regsets[] = {
@@ -346,6 +732,24 @@ static const struct user_regset loongarch64_regsets[] = {
.regset_get = cfg_get,
.set = cfg_set,
},
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ [REGSET_HW_BREAK] = {
+ .core_note_type = NT_LOONGARCH_HW_BREAK,
+ .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+ [REGSET_HW_WATCH] = {
+ .core_note_type = NT_LOONGARCH_HW_WATCH,
+ .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+#endif
};
static const struct user_regset_view user_loongarch64_view = {
@@ -431,3 +835,71 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint *info;
+ struct thread_struct *thread = &tsk->thread;
+
+ bp = thread->hbp_break[0];
+ if (!bp) {
+ ptrace_breakpoint_init(&attr);
+
+ attr.bp_addr = addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_8;
+ attr.bp_type = HW_BREAKPOINT_X;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ thread->hbp_break[0] = bp;
+ } else {
+ int err;
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+
+ /* Reenable breakpoint */
+ attr.disabled = false;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (unlikely(err))
+ return err;
+
+ csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
+ }
+ info = counter_arch_bp(bp);
+ info->mask = TASK_SIZE - 1;
+
+ return 0;
+}
+
+/* ptrace API */
+void user_enable_single_step(struct task_struct *task)
+{
+ struct thread_info *ti = task_thread_info(task);
+
+ set_single_step(task, task_pt_regs(task)->csr_era);
+ task->thread.single_step = task_pt_regs(task)->csr_era;
+ set_ti_thread_flag(ti, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+#endif
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
new file mode 100644
index 000000000000..01f94d1e3edf
--- /dev/null
+++ b/arch/loongarch/kernel/relocate.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/panic_notifier.h>
+#include <linux/start_kernel.h>
+#include <asm/bootinfo.h>
+#include <asm/early_ioremap.h>
+#include <asm/inst.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+#define RELOCATED(x) ((void *)((long)x + reloc_offset))
+#define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
+
+static unsigned long reloc_offset;
+
+static inline void __init relocate_relative(void)
+{
+ Elf64_Rela *rela, *rela_end;
+ rela = (Elf64_Rela *)&__rela_dyn_begin;
+ rela_end = (Elf64_Rela *)&__rela_dyn_end;
+
+ for ( ; rela < rela_end; rela++) {
+ Elf64_Addr addr = rela->r_offset;
+ Elf64_Addr relocated_addr = rela->r_addend;
+
+ if (rela->r_info != R_LARCH_RELATIVE)
+ continue;
+
+ if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
+ relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
+
+ *(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
+ }
+}
+
+static inline void __init relocate_absolute(long random_offset)
+{
+ void *begin, *end;
+ struct rela_la_abs *p;
+
+ begin = RELOCATED_KASLR(&__la_abs_begin);
+ end = RELOCATED_KASLR(&__la_abs_end);
+
+ for (p = begin; (void *)p < end; p++) {
+ long v = p->symvalue;
+ uint32_t lu12iw, ori, lu32id, lu52id;
+ union loongarch_instruction *insn = (void *)p - p->offset;
+
+ lu12iw = (v >> 12) & 0xfffff;
+ ori = v & 0xfff;
+ lu32id = (v >> 32) & 0xfffff;
+ lu52id = v >> 52;
+
+ insn[0].reg1i20_format.immediate = lu12iw;
+ insn[1].reg2i12_format.immediate = ori;
+ insn[2].reg1i20_format.immediate = lu32id;
+ insn[3].reg2i12_format.immediate = lu52id;
+ }
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ size_t i, diff;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+
+ diff = (void *)ptr - area;
+ if (size < diff + sizeof(hash))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long hash = 0;
+ unsigned long entropy = random_get_entropy();
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "nokaslr");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+/* Choose a new address for the kernel */
+static inline void __init *determine_relocation_address(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset;
+ void *destination = _text;
+
+ if (kaslr_disabled())
+ return destination;
+
+ kernel_length = (long)_end - (long)_text;
+
+ random_offset = get_random_boot() << 16;
+ random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (random_offset < kernel_length)
+ random_offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED_KASLR(destination);
+}
+
+static inline int __init relocation_addr_valid(void *location_new)
+{
+ if ((unsigned long)location_new & 0x00000ffff)
+ return 0; /* Inappropriately aligned new location */
+
+ if ((unsigned long)location_new < (unsigned long)_end)
+ return 0; /* New location overlaps original kernel */
+
+ return 1;
+}
+#endif
+
+static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
+
+ *new_addr = (unsigned long)reloc_offset;
+}
+
+void * __init relocate_kernel(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset = 0;
+ void *location_new = _text; /* Default to original kernel start */
+ void *kernel_entry = start_kernel; /* Default to original kernel entry point */
+ char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ location_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(location_new))
+ random_offset = (unsigned long)location_new - (unsigned long)(_text);
+#endif
+ reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+
+ if (random_offset) {
+ kernel_length = (long)(_end) - (long)(_text);
+
+ /* Copy the kernel to it's new location */
+ memcpy(location_new, _text, kernel_length);
+
+ /* Sync the caches ready for execution of new kernel */
+ __asm__ __volatile__ (
+ "ibar 0 \t\n"
+ "dbar 0 \t\n"
+ ::: "memory");
+
+ reloc_offset += random_offset;
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED_KASLR(start_kernel);
+
+ /* The current thread is now within the relocated kernel */
+ __current_thread_info = RELOCATED_KASLR(__current_thread_info);
+
+ update_reloc_offset(&reloc_offset, random_offset);
+ }
+
+ if (reloc_offset)
+ relocate_relative();
+
+ relocate_absolute(random_offset);
+
+ return kernel_entry;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+static void show_kernel_relocation(const char *level)
+{
+ if (reloc_offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated by 0x%lx\n", reloc_offset);
+ pr_cont(" .text @ 0x%px\n", _text);
+ pr_cont(" .data @ 0x%px\n", _sdata);
+ pr_cont(" .bss @ 0x%px\n", __bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+
+arch_initcall(register_kernel_offset_dumper);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 4344502c0b31..bae84ccf6d36 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -234,11 +234,14 @@ static void __init arch_reserve_vmcore(void)
#endif
}
+/* 2MB alignment for crash kernel regions */
+#define CRASH_ALIGN SZ_2M
+#define CRASH_ADDR_MAX SZ_4G
+
static void __init arch_parse_crashkernel(void)
{
#ifdef CONFIG_KEXEC
int ret;
- unsigned long long start;
unsigned long long total_mem;
unsigned long long crash_base, crash_size;
@@ -247,8 +250,13 @@ static void __init arch_parse_crashkernel(void)
if (ret < 0 || crash_size <= 0)
return;
- start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size);
- if (start != crash_base) {
+ if (crash_base <= 0) {
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
+ if (!crash_base) {
+ pr_warn("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
pr_warn("Invalid memory region reserved for crash kernel\n");
return;
}
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index a6576dea590c..4351f69d9950 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -140,16 +140,17 @@ static int get_timer_irq(void)
int constant_clockevent_init(void)
{
- int irq;
unsigned int cpu = smp_processor_id();
unsigned long min_delta = 0x600;
unsigned long max_delta = (1UL << 48) - 1;
struct clock_event_device *cd;
- static int timer_irq_installed = 0;
+ static int irq = 0, timer_irq_installed = 0;
- irq = get_timer_irq();
- if (irq < 0)
- pr_err("Failed to map irq %d (timer)\n", irq);
+ if (!timer_irq_installed) {
+ irq = get_timer_irq();
+ if (irq < 0)
+ pr_err("Failed to map irq %d (timer)\n", irq);
+ }
cd = &per_cpu(constant_clockevent_device, cpu);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index c38a146a973b..de8ebe20b666 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -371,9 +371,14 @@ int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
asmlinkage void noinstr do_ale(struct pt_regs *regs)
{
- unsigned int *pc;
irqentry_state_t state = irqentry_enter(regs);
+#ifndef CONFIG_ARCH_STRICT_ALIGN
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+#else
+ unsigned int *pc;
+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
/*
@@ -397,8 +402,8 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
-
out:
+#endif
irqentry_exit(regs, state);
}
@@ -432,7 +437,9 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
- local_irq_enable();
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
current->thread.trap_nr = read_csr_excode();
if (__get_inst(&opcode, (u32 *)era, user))
goto out_sigsegv;
@@ -445,14 +452,12 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
*/
switch (bcode) {
case BRK_KPROBE_BP:
- if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
- current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ if (kprobe_breakpoint_handler(regs))
goto out;
else
break;
case BRK_KPROBE_SSTEPBP:
- if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
- current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ if (kprobe_singlestep_handler(regs))
goto out;
else
break;
@@ -495,7 +500,9 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
- local_irq_disable();
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
+
irqentry_exit(regs, state);
return;
@@ -506,7 +513,52 @@ out_sigsegv:
asmlinkage void noinstr do_watch(struct pt_regs *regs)
{
+ irqentry_state_t state = irqentry_enter(regs);
+
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
pr_warn("Hardware watch point handler not implemented!\n");
+#else
+ if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
+ int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
+ unsigned long pc = instruction_pointer(regs);
+ union loongarch_instruction *ip = (union loongarch_instruction *)pc;
+
+ if (llbit) {
+ /*
+ * When the ll-sc combo is encountered, it is regarded as an single
+ * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
+ * the llsc execution is completed.
+ */
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
+ goto out;
+ }
+
+ if (pc == current->thread.single_step) {
+ /*
+ * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
+ * set, such as fld.d/fst.d. So singlestep needs to compare whether
+ * the csr_era is equal to the value of singlestep which last time set.
+ */
+ if (!is_self_loop_ins(ip, regs)) {
+ /*
+ * Check if the given instruction the target pc is equal to the
+ * current pc, If yes, then we should not set the CSR.FWPS.SKIP
+ * bit to break the original instruction stream.
+ */
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ goto out;
+ }
+ }
+ } else {
+ breakpoint_handler(regs);
+ watchpoint_handler(regs);
+ }
+
+ force_sig(SIGTRAP);
+out:
+#endif
+ irqentry_exit(regs, state);
}
asmlinkage void noinstr do_ri(struct pt_regs *regs)
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 78506b31ba61..0c7b041be9d8 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -65,10 +65,21 @@ SECTIONS
__alt_instructions_end = .;
}
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(8);
+ .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
+ __la_abs_begin = .;
+ *(.la_abs)
+ __la_abs_end = .;
+ }
+#endif
+
.got : ALIGN(16) { *(.got) }
.plt : ALIGN(16) { *(.plt) }
.got.plt : ALIGN(16) { *(.got.plt) }
+ .data.rel : { *(.data.rel*) }
+
. = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -92,8 +103,6 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
- .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) }
-
.init.bss : {
*(.init.bss)
}
@@ -106,6 +115,12 @@ SECTIONS
RO_DATA(4096)
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+ .rela.dyn : ALIGN(8) {
+ __rela_dyn_begin = .;
+ *(.rela.dyn) *(.rela*)
+ __rela_dyn_end = .;
+ }
+
.sdata : {
*(.sdata)
}
@@ -132,6 +147,7 @@ SECTIONS
DISCARDS
/DISCARD/ : {
+ *(.dynamic .dynsym .dynstr .hash .gnu.hash)
*(.gnu.attributes)
*(.options)
*(.eh_frame)