summaryrefslogtreecommitdiffstats
path: root/arch/riscv/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/include')
-rw-r--r--arch/riscv/include/asm/Kbuild4
-rw-r--r--arch/riscv/include/asm/cmpxchg.h286
-rw-r--r--arch/riscv/include/asm/compat.h1
-rw-r--r--arch/riscv/include/asm/cpufeature-macros.h66
-rw-r--r--arch/riscv/include/asm/cpufeature.h73
-rw-r--r--arch/riscv/include/asm/csr.h16
-rw-r--r--arch/riscv/include/asm/entry-common.h1
-rw-r--r--arch/riscv/include/asm/ftrace.h22
-rw-r--r--arch/riscv/include/asm/hwcap.h7
-rw-r--r--arch/riscv/include/asm/hwprobe.h2
-rw-r--r--arch/riscv/include/asm/jump_label.h50
-rw-r--r--arch/riscv/include/asm/kvm_host.h10
-rw-r--r--arch/riscv/include/asm/kvm_nacl.h245
-rw-r--r--arch/riscv/include/asm/mmu.h7
-rw-r--r--arch/riscv/include/asm/mmu_context.h13
-rw-r--r--arch/riscv/include/asm/page.h7
-rw-r--r--arch/riscv/include/asm/perf_event.h3
-rw-r--r--arch/riscv/include/asm/pgtable.h19
-rw-r--r--arch/riscv/include/asm/processor.h9
-rw-r--r--arch/riscv/include/asm/sbi.h120
-rw-r--r--arch/riscv/include/asm/set_memory.h1
-rw-r--r--arch/riscv/include/asm/spinlock.h47
-rw-r--r--arch/riscv/include/asm/switch_to.h19
-rw-r--r--arch/riscv/include/asm/text-patching.h (renamed from arch/riscv/include/asm/patch.h)0
-rw-r--r--arch/riscv/include/asm/thread_info.h10
-rw-r--r--arch/riscv/include/asm/uaccess.h43
-rw-r--r--arch/riscv/include/asm/uprobes.h2
-rw-r--r--arch/riscv/include/asm/vdso/time_data.h (renamed from arch/riscv/include/asm/vdso/data.h)8
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h4
-rw-r--r--arch/riscv/include/asm/vector.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h6
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h2
32 files changed, 910 insertions, 195 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1461af12da6e..de13d5a234f8 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -6,10 +6,12 @@ generic-y += early_ioremap.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += mmzone.h
+generic-y += mcs_spinlock.h
generic-y += parport.h
-generic-y += spinlock.h
generic-y += spinlock_types.h
+generic-y += ticket_spinlock.h
generic-y += qrwlock.h
generic-y += qrwlock_types.h
+generic-y += qspinlock.h
generic-y += user.h
generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index ebbce134917c..4cadc56220fe 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -12,30 +12,43 @@
#include <asm/fence.h>
#include <asm/hwcap.h>
#include <asm/insn-def.h>
-
-#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \
-({ \
- u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
- ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
- ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
- << __s; \
- ulong __newx = (ulong)(n) << __s; \
- ulong __retx; \
- ulong __rc; \
- \
- __asm__ __volatile__ ( \
- prepend \
- "0: lr.w %0, %2\n" \
- " and %1, %0, %z4\n" \
- " or %1, %1, %z3\n" \
- " sc.w" sc_sfx " %1, %1, %2\n" \
- " bnez %1, 0b\n" \
- append \
- : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
- : "rJ" (__newx), "rJ" (~__mask) \
- : "memory"); \
- \
- r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+#include <asm/cpufeature-macros.h>
+
+#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \
+ swap_append, r, p, n) \
+({ \
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
+ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \
+ __asm__ __volatile__ ( \
+ prepend \
+ " amoswap" swap_sfx " %0, %z2, %1\n" \
+ swap_append \
+ : "=&r" (r), "+A" (*(p)) \
+ : "rJ" (n) \
+ : "memory"); \
+ } else { \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z4\n" \
+ " or %1, %1, %z3\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ sc_append \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" (__newx), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+ } \
})
#define __arch_xchg(sfx, prepend, append, r, p, n) \
@@ -58,8 +71,13 @@
\
switch (sizeof(*__ptr)) { \
case 1: \
+ __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \
+ prepend, sc_append, swap_append, \
+ __ret, __ptr, __new); \
+ break; \
case 2: \
- __arch_xchg_masked(sc_sfx, prepend, sc_append, \
+ __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \
+ prepend, sc_append, swap_append, \
__ret, __ptr, __new); \
break; \
case 4: \
@@ -106,55 +124,90 @@
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
-
-#define __arch_cmpxchg_masked(sc_sfx, prepend, append, r, p, o, n) \
-({ \
- u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
- ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
- ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
- << __s; \
- ulong __newx = (ulong)(n) << __s; \
- ulong __oldx = (ulong)(o) << __s; \
- ulong __retx; \
- ulong __rc; \
- \
- __asm__ __volatile__ ( \
- prepend \
- "0: lr.w %0, %2\n" \
- " and %1, %0, %z5\n" \
- " bne %1, %z3, 1f\n" \
- " and %1, %0, %z6\n" \
- " or %1, %1, %z4\n" \
- " sc.w" sc_sfx " %1, %1, %2\n" \
- " bnez %1, 0b\n" \
- append \
- "1:\n" \
- : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
- : "rJ" ((long)__oldx), "rJ" (__newx), \
- "rJ" (__mask), "rJ" (~__mask) \
- : "memory"); \
- \
- r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ r, p, o, n) \
+({ \
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \
+ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
+ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA) && \
+ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \
+ r = o; \
+ \
+ __asm__ __volatile__ ( \
+ cas_prepend \
+ " amocas" cas_sfx " %0, %z2, %1\n" \
+ cas_append \
+ : "+&r" (r), "+A" (*(p)) \
+ : "rJ" (n) \
+ : "memory"); \
+ } else { \
+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \
+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \
+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \
+ << __s; \
+ ulong __newx = (ulong)(n) << __s; \
+ ulong __oldx = (ulong)(o) << __s; \
+ ulong __retx; \
+ ulong __rc; \
+ \
+ __asm__ __volatile__ ( \
+ sc_prepend \
+ "0: lr.w %0, %2\n" \
+ " and %1, %0, %z5\n" \
+ " bne %1, %z3, 1f\n" \
+ " and %1, %0, %z6\n" \
+ " or %1, %1, %z4\n" \
+ " sc.w" sc_sfx " %1, %1, %2\n" \
+ " bnez %1, 0b\n" \
+ sc_append \
+ "1:\n" \
+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \
+ : "rJ" ((long)__oldx), "rJ" (__newx), \
+ "rJ" (__mask), "rJ" (~__mask) \
+ : "memory"); \
+ \
+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \
+ } \
})
-#define __arch_cmpxchg(lr_sfx, sc_sfx, prepend, append, r, p, co, o, n) \
+#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ r, p, co, o, n) \
({ \
- register unsigned int __rc; \
+ if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \
+ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)) { \
+ r = o; \
\
- __asm__ __volatile__ ( \
- prepend \
- "0: lr" lr_sfx " %0, %2\n" \
- " bne %0, %z3, 1f\n" \
- " sc" sc_sfx " %1, %z4, %2\n" \
- " bnez %1, 0b\n" \
- append \
- "1:\n" \
- : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
- : "rJ" (co o), "rJ" (n) \
- : "memory"); \
+ __asm__ __volatile__ ( \
+ cas_prepend \
+ " amocas" cas_sfx " %0, %z2, %1\n" \
+ cas_append \
+ : "+&r" (r), "+A" (*(p)) \
+ : "rJ" (n) \
+ : "memory"); \
+ } else { \
+ register unsigned int __rc; \
+ \
+ __asm__ __volatile__ ( \
+ sc_prepend \
+ "0: lr" lr_sfx " %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc" sc_sfx " %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ sc_append \
+ "1:\n" \
+ : "=&r" (r), "=&r" (__rc), "+A" (*(p)) \
+ : "rJ" (co o), "rJ" (n) \
+ : "memory"); \
+ } \
})
-#define _arch_cmpxchg(ptr, old, new, sc_sfx, prepend, append) \
+#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(__ptr)) __old = (old); \
@@ -163,17 +216,28 @@
\
switch (sizeof(*__ptr)) { \
case 1: \
+ __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, __old, __new); \
+ break; \
case 2: \
- __arch_cmpxchg_masked(sc_sfx, prepend, append, \
- __ret, __ptr, __old, __new); \
+ __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, __old, __new); \
break; \
case 4: \
- __arch_cmpxchg(".w", ".w" sc_sfx, prepend, append, \
- __ret, __ptr, (long), __old, __new); \
+ __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, (long), __old, __new); \
break; \
case 8: \
- __arch_cmpxchg(".d", ".d" sc_sfx, prepend, append, \
- __ret, __ptr, /**/, __old, __new); \
+ __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \
+ sc_prepend, sc_append, \
+ cas_prepend, cas_append, \
+ __ret, __ptr, /**/, __old, __new); \
break; \
default: \
BUILD_BUG(); \
@@ -181,17 +245,40 @@
(__typeof__(*(__ptr)))__ret; \
})
+/*
+ * These macros are here to improve the readability of the arch_cmpxchg_XXX()
+ * macros.
+ */
+#define SC_SFX(x) x
+#define CAS_SFX(x) x
+#define SC_PREPEND(x) x
+#define SC_APPEND(x) x
+#define CAS_PREPEND(x) x
+#define CAS_APPEND(x) x
+
#define arch_cmpxchg_relaxed(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", "", "")
+ _arch_cmpxchg((ptr), (o), (n), \
+ SC_SFX(""), CAS_SFX(""), \
+ SC_PREPEND(""), SC_APPEND(""), \
+ CAS_PREPEND(""), CAS_APPEND(""))
#define arch_cmpxchg_acquire(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER)
+ _arch_cmpxchg((ptr), (o), (n), \
+ SC_SFX(""), CAS_SFX(""), \
+ SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \
+ CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER))
#define arch_cmpxchg_release(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "")
+ _arch_cmpxchg((ptr), (o), (n), \
+ SC_SFX(""), CAS_SFX(""), \
+ SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \
+ CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND(""))
#define arch_cmpxchg(ptr, o, n) \
- _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n")
+ _arch_cmpxchg((ptr), (o), (n), \
+ SC_SFX(".rl"), CAS_SFX(".aqrl"), \
+ SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \
+ CAS_PREPEND(""), CAS_APPEND(""))
#define arch_cmpxchg_local(ptr, o, n) \
arch_cmpxchg_relaxed((ptr), (o), (n))
@@ -226,6 +313,44 @@
arch_cmpxchg_release((ptr), (o), (n)); \
})
+#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS)
+
+#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS)
+
+union __u128_halves {
+ u128 full;
+ struct {
+ u64 low, high;
+ };
+};
+
+#define __arch_cmpxchg128(p, o, n, cas_sfx) \
+({ \
+ __typeof__(*(p)) __o = (o); \
+ union __u128_halves __hn = { .full = (n) }; \
+ union __u128_halves __ho = { .full = (__o) }; \
+ register unsigned long t1 asm ("t1") = __hn.low; \
+ register unsigned long t2 asm ("t2") = __hn.high; \
+ register unsigned long t3 asm ("t3") = __ho.low; \
+ register unsigned long t4 asm ("t4") = __ho.high; \
+ \
+ __asm__ __volatile__ ( \
+ " amocas.q" cas_sfx " %0, %z3, %2" \
+ : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \
+ : "rJ" (t1), "rJ" (t2) \
+ : "memory"); \
+ \
+ ((u128)t4 << 64) | t3; \
+})
+
+#define arch_cmpxchg128(ptr, o, n) \
+ __arch_cmpxchg128((ptr), (o), (n), ".aqrl")
+
+#define arch_cmpxchg128_local(ptr, o, n) \
+ __arch_cmpxchg128((ptr), (o), (n), "")
+
+#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */
+
#ifdef CONFIG_RISCV_ISA_ZAWRS
/*
* Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to
@@ -245,6 +370,11 @@ static __always_inline void __cmpwait(volatile void *ptr,
: : : : no_zawrs);
switch (size) {
+ case 1:
+ fallthrough;
+ case 2:
+ /* RISC-V doesn't have lr instructions on byte and half-word. */
+ goto no_zawrs;
case 4:
asm volatile(
" lr.w %0, %1\n"
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
index aa103530a5c8..6081327e55f5 100644
--- a/arch/riscv/include/asm/compat.h
+++ b/arch/riscv/include/asm/compat.h
@@ -9,7 +9,6 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
#include <asm-generic/compat.h>
static inline int is_compat_task(void)
diff --git a/arch/riscv/include/asm/cpufeature-macros.h b/arch/riscv/include/asm/cpufeature-macros.h
new file mode 100644
index 000000000000..a8103edbf51f
--- /dev/null
+++ b/arch/riscv/include/asm/cpufeature-macros.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022-2024 Rivos, Inc
+ */
+
+#ifndef _ASM_CPUFEATURE_MACROS_H
+#define _ASM_CPUFEATURE_MACROS_H
+
+#include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+
+#define STANDARD_EXT 0
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor,
+ const unsigned long ext)
+{
+ asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1)
+ :
+ : [vendor] "i" (vendor), [ext] "i" (ext)
+ :
+ : l_no);
+
+ return true;
+l_no:
+ return false;
+}
+
+static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor,
+ const unsigned long ext)
+{
+ asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1)
+ :
+ : [vendor] "i" (vendor), [ext] "i" (ext)
+ :
+ : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_unlikely(STANDARD_EXT, ext);
+
+ return __riscv_isa_extension_available(NULL, ext);
+}
+
+static __always_inline bool riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
+ return __riscv_has_extension_likely(STANDARD_EXT, ext);
+
+ return __riscv_isa_extension_available(NULL, ext);
+}
+
+#endif /* _ASM_CPUFEATURE_MACROS_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 45f9c1171a48..4bd054c54c21 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -8,9 +8,12 @@
#include <linux/bitmap.h>
#include <linux/jump_label.h>
+#include <linux/workqueue.h>
+#include <linux/kconfig.h>
+#include <linux/percpu-defs.h>
+#include <linux/threads.h>
#include <asm/hwcap.h>
-#include <asm/alternative-macros.h>
-#include <asm/errno.h>
+#include <asm/cpufeature-macros.h>
/*
* These are probed via a device_initcall(), via either the SBI or directly
@@ -31,7 +34,7 @@ DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
-void riscv_user_isa_enable(void);
+void __init riscv_user_isa_enable(void);
#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size, _validate) { \
.name = #_name, \
@@ -58,8 +61,9 @@ void riscv_user_isa_enable(void);
#define __RISCV_ISA_EXT_SUPERSET_VALIDATE(_name, _id, _sub_exts, _validate) \
_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
-#if defined(CONFIG_RISCV_MISALIGNED)
bool check_unaligned_access_emulated_all_cpus(void);
+#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
+void check_unaligned_access_emulated(struct work_struct *work __always_unused);
void unaligned_emulation_finish(void);
bool unaligned_ctl_available(void);
DECLARE_PER_CPU(long, misaligned_access_speed);
@@ -70,6 +74,12 @@ static inline bool unaligned_ctl_available(void)
}
#endif
+bool check_vector_unaligned_access_emulated_all_cpus(void);
+#if defined(CONFIG_RISCV_VECTOR_MISALIGNED)
+void check_vector_unaligned_access_emulated(struct work_struct *work __always_unused);
+DECLARE_PER_CPU(long, vector_misaligned_access);
+#endif
+
#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
@@ -103,61 +113,6 @@ extern const size_t riscv_isa_ext_count;
extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-
-#define STANDARD_EXT 0
-
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
-#define riscv_isa_extension_available(isa_bitmap, ext) \
- __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
-
-static __always_inline bool __riscv_has_extension_likely(const unsigned long vendor,
- const unsigned long ext)
-{
- asm goto(ALTERNATIVE("j %l[l_no]", "nop", %[vendor], %[ext], 1)
- :
- : [vendor] "i" (vendor), [ext] "i" (ext)
- :
- : l_no);
-
- return true;
-l_no:
- return false;
-}
-
-static __always_inline bool __riscv_has_extension_unlikely(const unsigned long vendor,
- const unsigned long ext)
-{
- asm goto(ALTERNATIVE("nop", "j %l[l_yes]", %[vendor], %[ext], 1)
- :
- : [vendor] "i" (vendor), [ext] "i" (ext)
- :
- : l_yes);
-
- return false;
-l_yes:
- return true;
-}
-
-static __always_inline bool riscv_has_extension_unlikely(const unsigned long ext)
-{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
- return __riscv_has_extension_unlikely(STANDARD_EXT, ext);
-
- return __riscv_isa_extension_available(NULL, ext);
-}
-
-static __always_inline bool riscv_has_extension_likely(const unsigned long ext)
-{
- compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
-
- if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE))
- return __riscv_has_extension_likely(STANDARD_EXT, ext);
-
- return __riscv_isa_extension_available(NULL, ext);
-}
-
static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
{
compiletime_assert(ext < RISCV_ISA_EXT_MAX, "ext must be < RISCV_ISA_EXT_MAX");
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 25966995da04..fe5d4eb9adea 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -119,6 +119,10 @@
/* HSTATUS flags */
#ifdef CONFIG_64BIT
+#define HSTATUS_HUPMM _AC(0x3000000000000, UL)
+#define HSTATUS_HUPMM_PMLEN_0 _AC(0x0000000000000, UL)
+#define HSTATUS_HUPMM_PMLEN_7 _AC(0x2000000000000, UL)
+#define HSTATUS_HUPMM_PMLEN_16 _AC(0x3000000000000, UL)
#define HSTATUS_VSXL _AC(0x300000000, UL)
#define HSTATUS_VSXL_SHIFT 32
#endif
@@ -195,6 +199,10 @@
/* xENVCFG flags */
#define ENVCFG_STCE (_AC(1, ULL) << 63)
#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
+#define ENVCFG_PMM (_AC(0x3, ULL) << 32)
+#define ENVCFG_PMM_PMLEN_0 (_AC(0x0, ULL) << 32)
+#define ENVCFG_PMM_PMLEN_7 (_AC(0x2, ULL) << 32)
+#define ENVCFG_PMM_PMLEN_16 (_AC(0x3, ULL) << 32)
#define ENVCFG_CBZE (_AC(1, UL) << 7)
#define ENVCFG_CBCFE (_AC(1, UL) << 6)
#define ENVCFG_CBIE_SHIFT 4
@@ -216,6 +224,12 @@
#define SMSTATEEN0_SSTATEEN0_SHIFT 63
#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT)
+/* mseccfg bits */
+#define MSECCFG_PMM ENVCFG_PMM
+#define MSECCFG_PMM_PMLEN_0 ENVCFG_PMM_PMLEN_0
+#define MSECCFG_PMM_PMLEN_7 ENVCFG_PMM_PMLEN_7
+#define MSECCFG_PMM_PMLEN_16 ENVCFG_PMM_PMLEN_16
+
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
@@ -382,6 +396,8 @@
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
+#define CSR_MSECCFG 0x747
+#define CSR_MSECCFGH 0x757
#define CSR_MVENDORID 0xf11
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
index 2293e535f865..b28ccc6cdeea 100644
--- a/arch/riscv/include/asm/entry-common.h
+++ b/arch/riscv/include/asm/entry-common.h
@@ -33,6 +33,7 @@ static inline int handle_misaligned_load(struct pt_regs *regs)
{
return -1;
}
+
static inline int handle_misaligned_store(struct pt_regs *regs)
{
return -1;
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 2cddd79ff21b..3d66437a1029 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -125,8 +125,12 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define arch_ftrace_get_regs(regs) NULL
+#define HAVE_ARCH_FTRACE_REGS
struct ftrace_ops;
-struct ftrace_regs {
+struct ftrace_regs;
+#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs))
+
+struct __arch_ftrace_regs {
unsigned long epc;
unsigned long ra;
unsigned long sp;
@@ -150,42 +154,42 @@ struct ftrace_regs {
static __always_inline unsigned long ftrace_regs_get_instruction_pointer(const struct ftrace_regs
*fregs)
{
- return fregs->epc;
+ return arch_ftrace_regs(fregs)->epc;
}
static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long pc)
{
- fregs->epc = pc;
+ arch_ftrace_regs(fregs)->epc = pc;
}
static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
{
- return fregs->sp;
+ return arch_ftrace_regs(fregs)->sp;
}
static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs,
unsigned int n)
{
if (n < 8)
- return fregs->args[n];
+ return arch_ftrace_regs(fregs)->args[n];
return 0;
}
static __always_inline unsigned long ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
{
- return fregs->a0;
+ return arch_ftrace_regs(fregs)->a0;
}
static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs,
unsigned long ret)
{
- fregs->a0 = ret;
+ arch_ftrace_regs(fregs)->a0 = ret;
}
static __always_inline void ftrace_override_function_with_return(struct ftrace_regs *fregs)
{
- fregs->epc = fregs->ra;
+ arch_ftrace_regs(fregs)->epc = arch_ftrace_regs(fregs)->ra;
}
int ftrace_regs_query_register_offset(const char *name);
@@ -196,7 +200,7 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{
- fregs->t1 = addr;
+ arch_ftrace_regs(fregs)->t1 = addr;
}
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 46d9de54179e..08d2a5697466 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -93,6 +93,11 @@
#define RISCV_ISA_EXT_ZCMOP 84
#define RISCV_ISA_EXT_ZAWRS 85
#define RISCV_ISA_EXT_SVVPTC 86
+#define RISCV_ISA_EXT_SMMPM 87
+#define RISCV_ISA_EXT_SMNPM 88
+#define RISCV_ISA_EXT_SSNPM 89
+#define RISCV_ISA_EXT_ZABHA 90
+#define RISCV_ISA_EXT_ZICCRSE 91
#define RISCV_ISA_EXT_XLINUXENVCFG 127
@@ -101,8 +106,10 @@
#ifdef CONFIG_RISCV_M_MODE
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
+#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SMNPM
#else
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
+#define RISCV_ISA_EXT_SUPM RISCV_ISA_EXT_SSNPM
#endif
#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index ffb9484531af..1ce1df6d0ff3 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -8,7 +8,7 @@
#include <uapi/asm/hwprobe.h>
-#define RISCV_HWPROBE_MAX_KEY 9
+#define RISCV_HWPROBE_MAX_KEY 10
static inline bool riscv_hwprobe_key_is_valid(__s64 key)
{
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
index 1c768d02bd0c..87a71cc6d146 100644
--- a/arch/riscv/include/asm/jump_label.h
+++ b/arch/riscv/include/asm/jump_label.h
@@ -16,21 +16,28 @@
#define JUMP_LABEL_NOP_SIZE 4
+#define JUMP_TABLE_ENTRY(key, label) \
+ ".pushsection __jump_table, \"aw\" \n\t" \
+ ".align " RISCV_LGPTR " \n\t" \
+ ".long 1b - ., " label " - . \n\t" \
+ "" RISCV_PTR " " key " - . \n\t" \
+ ".popsection \n\t"
+
+/* This macro is also expanded on the Rust side. */
+#define ARCH_STATIC_BRANCH_ASM(key, label) \
+ " .align 2 \n\t" \
+ " .option push \n\t" \
+ " .option norelax \n\t" \
+ " .option norvc \n\t" \
+ "1: nop \n\t" \
+ " .option pop \n\t" \
+ JUMP_TABLE_ENTRY(key, label)
+
static __always_inline bool arch_static_branch(struct static_key * const key,
const bool branch)
{
asm goto(
- " .align 2 \n\t"
- " .option push \n\t"
- " .option norelax \n\t"
- " .option norvc \n\t"
- "1: nop \n\t"
- " .option pop \n\t"
- " .pushsection __jump_table, \"aw\" \n\t"
- " .align " RISCV_LGPTR " \n\t"
- " .long 1b - ., %l[label] - . \n\t"
- " " RISCV_PTR " %0 - . \n\t"
- " .popsection \n\t"
+ ARCH_STATIC_BRANCH_ASM("%0", "%l[label]")
: : "i"(&((char *)key)[branch]) : : label);
return false;
@@ -38,21 +45,20 @@ label:
return true;
}
+#define ARCH_STATIC_BRANCH_JUMP_ASM(key, label) \
+ " .align 2 \n\t" \
+ " .option push \n\t" \
+ " .option norelax \n\t" \
+ " .option norvc \n\t" \
+ "1: j " label " \n\t" \
+ " .option pop \n\t" \
+ JUMP_TABLE_ENTRY(key, label)
+
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
const bool branch)
{
asm goto(
- " .align 2 \n\t"
- " .option push \n\t"
- " .option norelax \n\t"
- " .option norvc \n\t"
- "1: j %l[label] \n\t"
- " .option pop \n\t"
- " .pushsection __jump_table, \"aw\" \n\t"
- " .align " RISCV_LGPTR " \n\t"
- " .long 1b - ., %l[label] - . \n\t"
- " " RISCV_PTR " %0 - . \n\t"
- " .popsection \n\t"
+ ARCH_STATIC_BRANCH_JUMP_ASM("%0", "%l[label]")
: : "i"(&((char *)key)[branch]) : : label);
return false;
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 2e2254fd2a2a..35eab6e0f4ae 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -286,6 +286,16 @@ struct kvm_vcpu_arch {
} sta;
};
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For riscv, any event that arrives while a vCPU is
+ * loaded is considered to be "in guest".
+ */
+static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
+}
+
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
diff --git a/arch/riscv/include/asm/kvm_nacl.h b/arch/riscv/include/asm/kvm_nacl.h
new file mode 100644
index 000000000000..4124d5e06a0f
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_nacl.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024 Ventana Micro Systems Inc.
+ */
+
+#ifndef __KVM_NACL_H
+#define __KVM_NACL_H
+
+#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <asm/byteorder.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+
+struct kvm_vcpu_arch;
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_available);
+#define kvm_riscv_nacl_available() \
+ static_branch_unlikely(&kvm_riscv_nacl_available)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_csr_available);
+#define kvm_riscv_nacl_sync_csr_available() \
+ static_branch_unlikely(&kvm_riscv_nacl_sync_csr_available)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_hfence_available);
+#define kvm_riscv_nacl_sync_hfence_available() \
+ static_branch_unlikely(&kvm_riscv_nacl_sync_hfence_available)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_sync_sret_available);
+#define kvm_riscv_nacl_sync_sret_available() \
+ static_branch_unlikely(&kvm_riscv_nacl_sync_sret_available)
+
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_nacl_autoswap_csr_available);
+#define kvm_riscv_nacl_autoswap_csr_available() \
+ static_branch_unlikely(&kvm_riscv_nacl_autoswap_csr_available)
+
+struct kvm_riscv_nacl {
+ void *shmem;
+ phys_addr_t shmem_phys;
+};
+DECLARE_PER_CPU(struct kvm_riscv_nacl, kvm_riscv_nacl);
+
+void __kvm_riscv_nacl_hfence(void *shmem,
+ unsigned long control,
+ unsigned long page_num,
+ unsigned long page_count);
+
+void __kvm_riscv_nacl_switch_to(struct kvm_vcpu_arch *vcpu_arch,
+ unsigned long sbi_ext_id,
+ unsigned long sbi_func_id);
+
+int kvm_riscv_nacl_enable(void);
+
+void kvm_riscv_nacl_disable(void);
+
+void kvm_riscv_nacl_exit(void);
+
+int kvm_riscv_nacl_init(void);
+
+#ifdef CONFIG_32BIT
+#define lelong_to_cpu(__x) le32_to_cpu(__x)
+#define cpu_to_lelong(__x) cpu_to_le32(__x)
+#else
+#define lelong_to_cpu(__x) le64_to_cpu(__x)
+#define cpu_to_lelong(__x) cpu_to_le64(__x)
+#endif
+
+#define nacl_shmem() \
+ this_cpu_ptr(&kvm_riscv_nacl)->shmem
+
+#define nacl_scratch_read_long(__shmem, __offset) \
+({ \
+ unsigned long *__p = (__shmem) + \
+ SBI_NACL_SHMEM_SCRATCH_OFFSET + \
+ (__offset); \
+ lelong_to_cpu(*__p); \
+})
+
+#define nacl_scratch_write_long(__shmem, __offset, __val) \
+do { \
+ unsigned long *__p = (__shmem) + \
+ SBI_NACL_SHMEM_SCRATCH_OFFSET + \
+ (__offset); \
+ *__p = cpu_to_lelong(__val); \
+} while (0)
+
+#define nacl_scratch_write_longs(__shmem, __offset, __array, __count) \
+do { \
+ unsigned int __i; \
+ unsigned long *__p = (__shmem) + \
+ SBI_NACL_SHMEM_SCRATCH_OFFSET + \
+ (__offset); \
+ for (__i = 0; __i < (__count); __i++) \
+ __p[__i] = cpu_to_lelong((__array)[__i]); \
+} while (0)
+
+#define nacl_sync_hfence(__e) \
+ sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_HFENCE, \
+ (__e), 0, 0, 0, 0, 0)
+
+#define nacl_hfence_mkconfig(__type, __order, __vmid, __asid) \
+({ \
+ unsigned long __c = SBI_NACL_SHMEM_HFENCE_CONFIG_PEND; \
+ __c |= ((__type) & SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK) \
+ << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT; \
+ __c |= (((__order) - SBI_NACL_SHMEM_HFENCE_ORDER_BASE) & \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK) \
+ << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT; \
+ __c |= ((__vmid) & SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK) \
+ << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT; \
+ __c |= ((__asid) & SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK); \
+ __c; \
+})
+
+#define nacl_hfence_mkpnum(__order, __addr) \
+ ((__addr) >> (__order))
+
+#define nacl_hfence_mkpcount(__order, __size) \
+ ((__size) >> (__order))
+
+#define nacl_hfence_gvma(__shmem, __gpa, __gpsz, __order) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA, \
+ __order, 0, 0), \
+ nacl_hfence_mkpnum(__order, __gpa), \
+ nacl_hfence_mkpcount(__order, __gpsz))
+
+#define nacl_hfence_gvma_all(__shmem) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL, \
+ 0, 0, 0), 0, 0)
+
+#define nacl_hfence_gvma_vmid(__shmem, __vmid, __gpa, __gpsz, __order) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID, \
+ __order, __vmid, 0), \
+ nacl_hfence_mkpnum(__order, __gpa), \
+ nacl_hfence_mkpcount(__order, __gpsz))
+
+#define nacl_hfence_gvma_vmid_all(__shmem, __vmid) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL, \
+ 0, __vmid, 0), 0, 0)
+
+#define nacl_hfence_vvma(__shmem, __vmid, __gva, __gvsz, __order) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA, \
+ __order, __vmid, 0), \
+ nacl_hfence_mkpnum(__order, __gva), \
+ nacl_hfence_mkpcount(__order, __gvsz))
+
+#define nacl_hfence_vvma_all(__shmem, __vmid) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL, \
+ 0, __vmid, 0), 0, 0)
+
+#define nacl_hfence_vvma_asid(__shmem, __vmid, __asid, __gva, __gvsz, __order)\
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID, \
+ __order, __vmid, __asid), \
+ nacl_hfence_mkpnum(__order, __gva), \
+ nacl_hfence_mkpcount(__order, __gvsz))
+
+#define nacl_hfence_vvma_asid_all(__shmem, __vmid, __asid) \
+__kvm_riscv_nacl_hfence(__shmem, \
+ nacl_hfence_mkconfig(SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL, \
+ 0, __vmid, __asid), 0, 0)
+
+#define nacl_csr_read(__shmem, __csr) \
+({ \
+ unsigned long *__a = (__shmem) + SBI_NACL_SHMEM_CSR_OFFSET; \
+ lelong_to_cpu(__a[SBI_NACL_SHMEM_CSR_INDEX(__csr)]); \
+})
+
+#define nacl_csr_write(__shmem, __csr, __val) \
+do { \
+ void *__s = (__shmem); \
+ unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \
+ unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \
+ u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \
+ __a[__i] = cpu_to_lelong(__val); \
+ __b[__i >> 3] |= 1U << (__i & 0x7); \
+} while (0)
+
+#define nacl_csr_swap(__shmem, __csr, __val) \
+({ \
+ void *__s = (__shmem); \
+ unsigned int __i = SBI_NACL_SHMEM_CSR_INDEX(__csr); \
+ unsigned long *__a = (__s) + SBI_NACL_SHMEM_CSR_OFFSET; \
+ u8 *__b = (__s) + SBI_NACL_SHMEM_DBITMAP_OFFSET; \
+ unsigned long __r = lelong_to_cpu(__a[__i]); \
+ __a[__i] = cpu_to_lelong(__val); \
+ __b[__i >> 3] |= 1U << (__i & 0x7); \
+ __r; \
+})
+
+#define nacl_sync_csr(__csr) \
+ sbi_ecall(SBI_EXT_NACL, SBI_EXT_NACL_SYNC_CSR, \
+ (__csr), 0, 0, 0, 0, 0)
+
+/*
+ * Each ncsr_xyz() macro defined below has it's own static-branch so every
+ * use of ncsr_xyz() macro emits a patchable direct jump. This means multiple
+ * back-to-back ncsr_xyz() macro usage will emit multiple patchable direct
+ * jumps which is sub-optimal.
+ *
+ * Based on the above, it is recommended to avoid multiple back-to-back
+ * ncsr_xyz() macro usage.
+ */
+
+#define ncsr_read(__csr) \
+({ \
+ unsigned long __r; \
+ if (kvm_riscv_nacl_available()) \
+ __r = nacl_csr_read(nacl_shmem(), __csr); \
+ else \
+ __r = csr_read(__csr); \
+ __r; \
+})
+
+#define ncsr_write(__csr, __val) \
+do { \
+ if (kvm_riscv_nacl_sync_csr_available()) \
+ nacl_csr_write(nacl_shmem(), __csr, __val); \
+ else \
+ csr_write(__csr, __val); \
+} while (0)
+
+#define ncsr_swap(__csr, __val) \
+({ \
+ unsigned long __r; \
+ if (kvm_riscv_nacl_sync_csr_available()) \
+ __r = nacl_csr_swap(nacl_shmem(), __csr, __val); \
+ else \
+ __r = csr_swap(__csr, __val); \
+ __r; \
+})
+
+#define nsync_csr(__csr) \
+do { \
+ if (kvm_riscv_nacl_sync_csr_available()) \
+ nacl_sync_csr(__csr); \
+} while (0)
+
+#endif
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index c9e03e9da3dc..1cc90465d75b 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -26,8 +26,15 @@ typedef struct {
unsigned long exec_fdpic_loadmap;
unsigned long interp_fdpic_loadmap;
#endif
+ unsigned long flags;
+#ifdef CONFIG_RISCV_ISA_SUPM
+ u8 pmlen;
+#endif
} mm_context_t;
+/* Lock the pointer masking mode because this mm is multithreaded */
+#define MM_CONTEXT_LOCK_PMLEN 0
+
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
index 7030837adc1a..8c4bc49a3a0f 100644
--- a/arch/riscv/include/asm/mmu_context.h
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -20,6 +20,9 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
static inline void activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
+#ifdef CONFIG_RISCV_ISA_SUPM
+ next->context.pmlen = 0;
+#endif
switch_mm(prev, next, NULL);
}
@@ -30,11 +33,21 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_MMU
atomic_long_set(&mm->context.id, 0);
#endif
+ if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
+ clear_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags);
return 0;
}
DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
+#ifdef CONFIG_RISCV_ISA_SUPM
+#define mm_untag_mask mm_untag_mask
+static inline unsigned long mm_untag_mask(struct mm_struct *mm)
+{
+ return -1UL >> mm->context.pmlen;
+}
+#endif
+
#include <asm-generic/mmu_context.h>
#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 32d308a3355f..71aabc5c6713 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -12,9 +12,7 @@
#include <linux/pfn.h>
#include <linux/const.h>
-#define PAGE_SHIFT CONFIG_PAGE_SHIFT
-#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#include <vdso/page.h>
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
@@ -194,9 +192,6 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
-#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
-#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
-
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
unsigned long kaslr_offset(void);
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
index 665bbc9b2f84..bcc928fd3785 100644
--- a/arch/riscv/include/asm/perf_event.h
+++ b/arch/riscv/include/asm/perf_event.h
@@ -8,6 +8,7 @@
#ifndef _ASM_RISCV_PERF_EVENT_H
#define _ASM_RISCV_PERF_EVENT_H
+#ifdef CONFIG_PERF_EVENTS
#include <linux/perf_event.h>
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
@@ -17,4 +18,6 @@
(regs)->sp = current_stack_pointer; \
(regs)->status = SR_PP; \
}
+#endif
+
#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index e79f15293492..5d7f3e8c2e50 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -963,6 +963,25 @@ void misc_mem_init(void);
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index efa1b3519b23..5f56eb9d114a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -102,6 +102,7 @@ struct thread_struct {
unsigned long s[12]; /* s[0]: frame pointer */
struct __riscv_d_ext_state fstate;
unsigned long bad_cause;
+ unsigned long envcfg;
u32 riscv_v_flags;
u32 vstate_ctrl;
struct __riscv_v_ext_state vstate;
@@ -177,6 +178,14 @@ extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
#define RISCV_SET_ICACHE_FLUSH_CTX(arg1, arg2) riscv_set_icache_flush_ctx(arg1, arg2)
extern int riscv_set_icache_flush_ctx(unsigned long ctx, unsigned long per_thread);
+#ifdef CONFIG_RISCV_ISA_SUPM
+/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
+long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
+long get_tagged_addr_ctrl(struct task_struct *task);
+#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(current, arg)
+#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl(current)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 98f631b051db..6c82318065cf 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -34,6 +34,7 @@ enum sbi_ext_id {
SBI_EXT_PMU = 0x504D55,
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_STA = 0x535441,
+ SBI_EXT_NACL = 0x4E41434C,
/* Experimentals extensions must lie within this range */
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@@ -281,6 +282,125 @@ struct sbi_sta_struct {
#define SBI_SHMEM_DISABLE -1
+enum sbi_ext_nacl_fid {
+ SBI_EXT_NACL_PROBE_FEATURE = 0x0,
+ SBI_EXT_NACL_SET_SHMEM = 0x1,
+ SBI_EXT_NACL_SYNC_CSR = 0x2,
+ SBI_EXT_NACL_SYNC_HFENCE = 0x3,
+ SBI_EXT_NACL_SYNC_SRET = 0x4,
+};
+
+enum sbi_ext_nacl_feature {
+ SBI_NACL_FEAT_SYNC_CSR = 0x0,
+ SBI_NACL_FEAT_SYNC_HFENCE = 0x1,
+ SBI_NACL_FEAT_SYNC_SRET = 0x2,
+ SBI_NACL_FEAT_AUTOSWAP_CSR = 0x3,
+};
+
+#define SBI_NACL_SHMEM_ADDR_SHIFT 12
+#define SBI_NACL_SHMEM_SCRATCH_OFFSET 0x0000
+#define SBI_NACL_SHMEM_SCRATCH_SIZE 0x1000
+#define SBI_NACL_SHMEM_SRET_OFFSET 0x0000
+#define SBI_NACL_SHMEM_SRET_SIZE 0x0200
+#define SBI_NACL_SHMEM_AUTOSWAP_OFFSET (SBI_NACL_SHMEM_SRET_OFFSET + \
+ SBI_NACL_SHMEM_SRET_SIZE)
+#define SBI_NACL_SHMEM_AUTOSWAP_SIZE 0x0080
+#define SBI_NACL_SHMEM_UNUSED_OFFSET (SBI_NACL_SHMEM_AUTOSWAP_OFFSET + \
+ SBI_NACL_SHMEM_AUTOSWAP_SIZE)
+#define SBI_NACL_SHMEM_UNUSED_SIZE 0x0580
+#define SBI_NACL_SHMEM_HFENCE_OFFSET (SBI_NACL_SHMEM_UNUSED_OFFSET + \
+ SBI_NACL_SHMEM_UNUSED_SIZE)
+#define SBI_NACL_SHMEM_HFENCE_SIZE 0x0780
+#define SBI_NACL_SHMEM_DBITMAP_OFFSET (SBI_NACL_SHMEM_HFENCE_OFFSET + \
+ SBI_NACL_SHMEM_HFENCE_SIZE)
+#define SBI_NACL_SHMEM_DBITMAP_SIZE 0x0080
+#define SBI_NACL_SHMEM_CSR_OFFSET (SBI_NACL_SHMEM_DBITMAP_OFFSET + \
+ SBI_NACL_SHMEM_DBITMAP_SIZE)
+#define SBI_NACL_SHMEM_CSR_SIZE ((__riscv_xlen / 8) * 1024)
+#define SBI_NACL_SHMEM_SIZE (SBI_NACL_SHMEM_CSR_OFFSET + \
+ SBI_NACL_SHMEM_CSR_SIZE)
+
+#define SBI_NACL_SHMEM_CSR_INDEX(__csr_num) \
+ ((((__csr_num) & 0xc00) >> 2) | ((__csr_num) & 0xff))
+
+#define SBI_NACL_SHMEM_HFENCE_ENTRY_SZ ((__riscv_xlen / 8) * 4)
+#define SBI_NACL_SHMEM_HFENCE_ENTRY_MAX \
+ (SBI_NACL_SHMEM_HFENCE_SIZE / \
+ SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
+#define SBI_NACL_SHMEM_HFENCE_ENTRY(__num) \
+ (SBI_NACL_SHMEM_HFENCE_OFFSET + \
+ (__num) * SBI_NACL_SHMEM_HFENCE_ENTRY_SZ)
+#define SBI_NACL_SHMEM_HFENCE_ENTRY_CONFIG(__num) \
+ SBI_NACL_SHMEM_HFENCE_ENTRY(__num)
+#define SBI_NACL_SHMEM_HFENCE_ENTRY_PNUM(__num)\
+ (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + (__riscv_xlen / 8))
+#define SBI_NACL_SHMEM_HFENCE_ENTRY_PCOUNT(__num)\
+ (SBI_NACL_SHMEM_HFENCE_ENTRY(__num) + \
+ ((__riscv_xlen / 8) * 3))
+
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS 1
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT \
+ (__riscv_xlen - SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS)
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK \
+ ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_BITS) - 1)
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_PEND \
+ (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_MASK << \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT)
+
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS 3
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT \
+ (SBI_NACL_SHMEM_HFENCE_CONFIG_PEND_SHIFT - \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_BITS)
+
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS 4
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT \
+ (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD1_SHIFT - \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS)
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_MASK \
+ ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_BITS) - 1)
+
+#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA 0x0
+#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_ALL 0x1
+#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID 0x2
+#define SBI_NACL_SHMEM_HFENCE_TYPE_GVMA_VMID_ALL 0x3
+#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA 0x4
+#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ALL 0x5
+#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID 0x6
+#define SBI_NACL_SHMEM_HFENCE_TYPE_VVMA_ASID_ALL 0x7
+
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS 1
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT \
+ (SBI_NACL_SHMEM_HFENCE_CONFIG_TYPE_SHIFT - \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_BITS)
+
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS 7
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_SHIFT \
+ (SBI_NACL_SHMEM_HFENCE_CONFIG_RSVD2_SHIFT - \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS)
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_MASK \
+ ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ORDER_BITS) - 1)
+#define SBI_NACL_SHMEM_HFENCE_ORDER_BASE 12
+
+#if __riscv_xlen == 32
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 9
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 7
+#else
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS 16
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS 14
+#endif
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_SHIFT \
+ SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_MASK \
+ ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_ASID_BITS) - 1)
+#define SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_MASK \
+ ((1UL << SBI_NACL_SHMEM_HFENCE_CONFIG_VMID_BITS) - 1)
+
+#define SBI_NACL_SHMEM_AUTOSWAP_FLAG_HSTATUS BIT(0)
+#define SBI_NACL_SHMEM_AUTOSWAP_HSTATUS ((__riscv_xlen / 8) * 1)
+
+#define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i))
+#define SBI_NACL_SHMEM_SRET_X_LAST 31
+
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index ab92fc84e1fc..ea263d3683ef 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -42,6 +42,7 @@ static inline int set_kernel_memory(char *startp, char *endp,
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
+int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
new file mode 100644
index 000000000000..e5121b89acea
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_RISCV_SPINLOCK_H
+#define __ASM_RISCV_SPINLOCK_H
+
+#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
+#define _Q_PENDING_LOOPS (1 << 9)
+
+#define __no_arch_spinlock_redefine
+#include <asm/ticket_spinlock.h>
+#include <asm/qspinlock.h>
+#include <asm/jump_label.h>
+
+/*
+ * TODO: Use an alternative instead of a static key when we are able to parse
+ * the extensions string earlier in the boot process.
+ */
+DECLARE_STATIC_KEY_TRUE(qspinlock_key);
+
+#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \
+static __always_inline type arch_spin_##op(type_lock lock) \
+{ \
+ if (static_branch_unlikely(&qspinlock_key)) \
+ return queued_spin_##op(lock); \
+ return ticket_spin_##op(lock); \
+}
+
+SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
+SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
+
+#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS)
+
+#include <asm/qspinlock.h>
+
+#else
+
+#include <asm/ticket_spinlock.h>
+
+#endif
+
+#include <asm/qrwlock.h>
+
+#endif /* __ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index 7594df37cc9f..94e33216b2d9 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -70,6 +70,24 @@ static __always_inline bool has_fpu(void) { return false; }
#define __switch_to_fpu(__prev, __next) do { } while (0)
#endif
+static inline void envcfg_update_bits(struct task_struct *task,
+ unsigned long mask, unsigned long val)
+{
+ unsigned long envcfg;
+
+ envcfg = (task->thread.envcfg & ~mask) | val;
+ task->thread.envcfg = envcfg;
+ if (task == current)
+ csr_write(CSR_ENVCFG, envcfg);
+}
+
+static inline void __switch_to_envcfg(struct task_struct *next)
+{
+ asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0",
+ 0, RISCV_ISA_EXT_XLINUXENVCFG, 1)
+ :: "r" (next->thread.envcfg) : "memory");
+}
+
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
@@ -103,6 +121,7 @@ do { \
__switch_to_vector(__prev, __next); \
if (switch_to_should_flush_icache(__next)) \
local_flush_icache_all(); \
+ __switch_to_envcfg(__next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/text-patching.h
index 7228e266b9a1..7228e266b9a1 100644
--- a/arch/riscv/include/asm/patch.h
+++ b/arch/riscv/include/asm/text-patching.h
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 9c10fb180f43..f5916a70879a 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -107,9 +107,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
* - pending work-to-be-done flags are in lowest half-word
* - other flags in upper half-word(s)
*/
-#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
-#define TIF_SIGPENDING 2 /* signal pending */
-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_NEED_RESCHED 0 /* rescheduling necessary */
+#define TIF_NEED_RESCHED_LAZY 1 /* Lazy rescheduling needed */
+#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_SIGPENDING 3 /* signal pending */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
@@ -117,9 +118,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_32BIT 11 /* compat-mode 32bit process */
#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 72ec1d9bd3f3..fee56b0c8058 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -9,8 +9,41 @@
#define _ASM_RISCV_UACCESS_H
#include <asm/asm-extable.h>
+#include <asm/cpufeature.h>
#include <asm/pgtable.h> /* for TASK_SIZE */
+#ifdef CONFIG_RISCV_ISA_SUPM
+static inline unsigned long __untagged_addr_remote(struct mm_struct *mm, unsigned long addr)
+{
+ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM)) {
+ u8 pmlen = mm->context.pmlen;
+
+ /* Virtual addresses are sign-extended; physical addresses are zero-extended. */
+ if (IS_ENABLED(CONFIG_MMU))
+ return (long)(addr << pmlen) >> pmlen;
+ else
+ return (addr << pmlen) >> pmlen;
+ }
+
+ return addr;
+}
+
+#define untagged_addr(addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ (__force __typeof__(addr))__untagged_addr_remote(current->mm, __addr); \
+})
+
+#define untagged_addr_remote(mm, addr) ({ \
+ unsigned long __addr = (__force unsigned long)(addr); \
+ mmap_assert_locked(mm); \
+ (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
+})
+
+#define access_ok(addr, size) likely(__access_ok(untagged_addr(addr), size))
+#else
+#define untagged_addr(addr) (addr)
+#endif
+
/*
* User space memory access functions
*/
@@ -130,7 +163,7 @@ do { \
*/
#define __get_user(x, ptr) \
({ \
- const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ const __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
long __gu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
@@ -246,7 +279,7 @@ do { \
*/
#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_ptr = untagged_addr(ptr); \
__typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
@@ -293,13 +326,13 @@ unsigned long __must_check __asm_copy_from_user(void *to,
static inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- return __asm_copy_from_user(to, from, n);
+ return __asm_copy_from_user(to, untagged_addr(from), n);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- return __asm_copy_to_user(to, from, n);
+ return __asm_copy_to_user(untagged_addr(to), from, n);
}
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -314,7 +347,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
might_fault();
return access_ok(to, n) ?
- __clear_user(to, n) : n;
+ __clear_user(untagged_addr(to), n) : n;
}
#define __get_kernel_nofault(dst, src, type, err_label) \
diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h
index 3fc7deda9190..5008f76cdc27 100644
--- a/arch/riscv/include/asm/uprobes.h
+++ b/arch/riscv/include/asm/uprobes.h
@@ -4,7 +4,7 @@
#define _ASM_RISCV_UPROBES_H
#include <asm/probes.h>
-#include <asm/patch.h>
+#include <asm/text-patching.h>
#include <asm/bug.h>
#define MAX_UINSN_BYTES 8
diff --git a/arch/riscv/include/asm/vdso/data.h b/arch/riscv/include/asm/vdso/time_data.h
index dc2f76f58b76..dfa65228999b 100644
--- a/arch/riscv/include/asm/vdso/data.h
+++ b/arch/riscv/include/asm/vdso/time_data.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __RISCV_ASM_VDSO_DATA_H
-#define __RISCV_ASM_VDSO_DATA_H
+#ifndef __RISCV_ASM_VDSO_TIME_DATA_H
+#define __RISCV_ASM_VDSO_TIME_DATA_H
#include <linux/types.h>
#include <vdso/datapage.h>
#include <asm/hwprobe.h>
-struct arch_vdso_data {
+struct arch_vdso_time_data {
/* Stash static answers to the hwprobe queries when all CPUs are selected. */
__u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
@@ -14,4 +14,4 @@ struct arch_vdso_data {
__u8 homogeneous_cpus;
};
-#endif /* __RISCV_ASM_VDSO_DATA_H */
+#endif /* __RISCV_ASM_VDSO_TIME_DATA_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
index 82fd5d83bd60..e8a9c4b53c0c 100644
--- a/arch/riscv/include/asm/vdso/vsyscall.h
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -4,14 +4,10 @@
#ifndef __ASSEMBLY__
-#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
extern struct vdso_data *vdso_data;
-/*
- * Update the vDSO data page to keep in sync with kernel timekeeping.
- */
static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
{
return vdso_data;
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index be7d309cca8a..c7c023afbacd 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -21,6 +21,7 @@
extern unsigned long riscv_v_vsize;
int riscv_v_setup_vsize(void);
+bool insn_is_vector(u32 insn_buf);
bool riscv_v_first_use_handler(struct pt_regs *regs);
void kernel_vector_begin(void);
void kernel_vector_end(void);
@@ -268,6 +269,7 @@ struct pt_regs;
static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
static __always_inline bool has_vector(void) { return false; }
+static __always_inline bool insn_is_vector(u32 insn_buf) { return false; }
static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index 1e153cda57db..3af142b99f77 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -72,6 +72,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46)
#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47)
#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48)
+#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -88,6 +89,11 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2
#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3
#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4
+#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10
+#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0
+#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2
+#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3
+#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
/* Flags */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index e97db3296456..4f24201376b1 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -175,6 +175,8 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_ZCF,
KVM_RISCV_ISA_EXT_ZCMOP,
KVM_RISCV_ISA_EXT_ZAWRS,
+ KVM_RISCV_ISA_EXT_SMNPM,
+ KVM_RISCV_ISA_EXT_SSNPM,
KVM_RISCV_ISA_EXT_MAX,
};