summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig7
-rw-r--r--arch/riscv/configs/rv32_defconfig139
-rw-r--r--arch/riscv/include/asm/cpu_ops.h14
-rw-r--r--arch/riscv/include/asm/cpufeature.h4
-rw-r--r--arch/riscv/include/asm/hwcap.h38
-rw-r--r--arch/riscv/include/asm/hwprobe.h24
-rw-r--r--arch/riscv/include/asm/kfence.h4
-rw-r--r--arch/riscv/include/asm/pgtable-64.h22
-rw-r--r--arch/riscv/include/asm/pgtable.h33
-rw-r--r--arch/riscv/include/asm/sections.h1
-rw-r--r--arch/riscv/include/asm/thread_info.h1
-rw-r--r--arch/riscv/include/asm/xip_fixup.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h32
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cpu-hotplug.c19
-rw-r--r--arch/riscv/kernel/cpu_ops.c14
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c19
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c11
-rw-r--r--arch/riscv/kernel/cpufeature.c195
-rw-r--r--arch/riscv/kernel/efi.c2
-rw-r--r--arch/riscv/kernel/head.S6
-rw-r--r--arch/riscv/kernel/mcount-dyn.S2
-rw-r--r--arch/riscv/kernel/mcount.S2
-rw-r--r--arch/riscv/kernel/module.c3
-rw-r--r--arch/riscv/kernel/patch.c11
-rw-r--r--arch/riscv/kernel/setup.c1
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/kernel/smp.c2
-rw-r--r--arch/riscv/kernel/smpboot.c38
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c411
-rw-r--r--arch/riscv/kernel/sys_riscv.c285
-rw-r--r--arch/riscv/kernel/traps_misaligned.c6
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c86
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S2
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/riscv/kvm/mmu.c22
-rw-r--r--arch/riscv/lib/clear_page.S2
-rw-r--r--arch/riscv/lib/tishift.S2
-rw-r--r--arch/riscv/lib/uaccess.S2
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/fault.c16
-rw-r--r--arch/riscv/mm/hugetlbpage.c12
-rw-r--r--arch/riscv/mm/init.c8
-rw-r--r--arch/riscv/mm/kasan_init.c45
-rw-r--r--arch/riscv/mm/pageattr.c55
-rw-r--r--arch/riscv/mm/pgtable.c51
46 files changed, 952 insertions, 707 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index cd4c9a204d08..dcb50071b91a 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -59,6 +59,7 @@ config RISCV
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
@@ -902,13 +903,13 @@ config RISCV_ISA_FALLBACK
on the replacement properties, "riscv,isa-base" and
"riscv,isa-extensions".
-endmenu # "Boot options"
-
config BUILTIN_DTB
- bool
+ bool "Built-in device tree"
depends on OF && NONPORTABLE
default y if XIP_KERNEL
+endmenu # "Boot options"
+
config PORTABLE
bool
default !NONPORTABLE
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
deleted file mode 100644
index 89b601e253a6..000000000000
--- a/arch/riscv/configs/rv32_defconfig
+++ /dev/null
@@ -1,139 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CFS_BANDWIDTH=y
-CONFIG_CGROUP_BPF=y
-CONFIG_NAMESPACES=y
-CONFIG_USER_NS=y
-CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_PROFILING=y
-CONFIG_SOC_SIFIVE=y
-CONFIG_SOC_VIRT=y
-CONFIG_NONPORTABLE=y
-CONFIG_ARCH_RV32I=y
-CONFIG_SMP=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PM=y
-CONFIG_CPU_IDLE=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NETLINK_DIAG=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCIE_XILINX=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
-CONFIG_MACB=y
-CONFIG_E1000E=y
-CONFIG_R8169=y
-CONFIG_MICROSEMI_PHY=y
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
-CONFIG_SPI=y
-CONFIG_SPI_SIFIVE=y
-# CONFIG_PTP_1588_CLOCK is not set
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PLATFORM=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_UAS=y
-CONFIG_MMC=y
-CONFIG_MMC_SPI=y
-CONFIG_RTC_CLASS=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_CTRL=y
-CONFIG_RPMSG_VIRTIO=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_2=y
-CONFIG_ROOT_NFS=y
-CONFIG_9P_FS=y
-CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_VM_PGFLAGS=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_PER_CPU_MAPS=y
-CONFIG_SOFTLOCKUP_DETECTOR=y
-CONFIG_WQ_WATCHDOG=y
-CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_RWSEMS=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_STACKTRACE=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PLIST=y
-CONFIG_DEBUG_SG=y
-# CONFIG_RCU_TRACE is not set
-CONFIG_RCU_EQS_DEBUG=y
-# CONFIG_FTRACE is not set
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_MEMTEST=y
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
index aa128466c4d4..176b570ef982 100644
--- a/arch/riscv/include/asm/cpu_ops.h
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -13,33 +13,23 @@
/**
* struct cpu_operations - Callback operations for hotplugging CPUs.
*
- * @name: Name of the boot protocol.
- * @cpu_prepare: Early one-time preparation step for a cpu. If there
- * is a mechanism for doing so, tests whether it is
- * possible to boot the given HART.
* @cpu_start: Boots a cpu into the kernel.
- * @cpu_disable: Prepares a cpu to die. May fail for some
- * mechanism-specific reason, which will cause the hot
- * unplug to be aborted. Called from the cpu to be killed.
* @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
* the cpu being stopped.
* @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
* cpu.
*/
struct cpu_operations {
- const char *name;
- int (*cpu_prepare)(unsigned int cpu);
int (*cpu_start)(unsigned int cpu,
struct task_struct *tidle);
#ifdef CONFIG_HOTPLUG_CPU
- int (*cpu_disable)(unsigned int cpu);
void (*cpu_stop)(void);
int (*cpu_is_stopped)(unsigned int cpu);
#endif
};
extern const struct cpu_operations cpu_ops_spinwait;
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-void __init cpu_set_ops(int cpu);
+extern const struct cpu_operations *cpu_ops;
+void __init cpu_set_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index a418c3112cd6..fbdde8b8a47e 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -59,6 +59,8 @@ struct riscv_isa_ext_data {
const unsigned int id;
const char *name;
const char *property;
+ const unsigned int *subset_ext_ids;
+ const unsigned int subset_ext_size;
};
extern const struct riscv_isa_ext_data riscv_isa_ext[];
@@ -67,7 +69,7 @@ extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 06d30526ef3b..5340f818746b 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -11,19 +11,13 @@
#include <uapi/asm/hwcap.h>
#define RISCV_ISA_EXT_a ('a' - 'a')
-#define RISCV_ISA_EXT_b ('b' - 'a')
#define RISCV_ISA_EXT_c ('c' - 'a')
#define RISCV_ISA_EXT_d ('d' - 'a')
#define RISCV_ISA_EXT_f ('f' - 'a')
#define RISCV_ISA_EXT_h ('h' - 'a')
#define RISCV_ISA_EXT_i ('i' - 'a')
-#define RISCV_ISA_EXT_j ('j' - 'a')
-#define RISCV_ISA_EXT_k ('k' - 'a')
#define RISCV_ISA_EXT_m ('m' - 'a')
-#define RISCV_ISA_EXT_p ('p' - 'a')
#define RISCV_ISA_EXT_q ('q' - 'a')
-#define RISCV_ISA_EXT_s ('s' - 'a')
-#define RISCV_ISA_EXT_u ('u' - 'a')
#define RISCV_ISA_EXT_v ('v' - 'a')
/*
@@ -57,8 +51,38 @@
#define RISCV_ISA_EXT_ZIHPM 42
#define RISCV_ISA_EXT_SMSTATEEN 43
#define RISCV_ISA_EXT_ZICOND 44
+#define RISCV_ISA_EXT_ZBC 45
+#define RISCV_ISA_EXT_ZBKB 46
+#define RISCV_ISA_EXT_ZBKC 47
+#define RISCV_ISA_EXT_ZBKX 48
+#define RISCV_ISA_EXT_ZKND 49
+#define RISCV_ISA_EXT_ZKNE 50
+#define RISCV_ISA_EXT_ZKNH 51
+#define RISCV_ISA_EXT_ZKR 52
+#define RISCV_ISA_EXT_ZKSED 53
+#define RISCV_ISA_EXT_ZKSH 54
+#define RISCV_ISA_EXT_ZKT 55
+#define RISCV_ISA_EXT_ZVBB 56
+#define RISCV_ISA_EXT_ZVBC 57
+#define RISCV_ISA_EXT_ZVKB 58
+#define RISCV_ISA_EXT_ZVKG 59
+#define RISCV_ISA_EXT_ZVKNED 60
+#define RISCV_ISA_EXT_ZVKNHA 61
+#define RISCV_ISA_EXT_ZVKNHB 62
+#define RISCV_ISA_EXT_ZVKSED 63
+#define RISCV_ISA_EXT_ZVKSH 64
+#define RISCV_ISA_EXT_ZVKT 65
+#define RISCV_ISA_EXT_ZFH 66
+#define RISCV_ISA_EXT_ZFHMIN 67
+#define RISCV_ISA_EXT_ZIHINTNTL 68
+#define RISCV_ISA_EXT_ZVFH 69
+#define RISCV_ISA_EXT_ZVFHMIN 70
+#define RISCV_ISA_EXT_ZFA 71
+#define RISCV_ISA_EXT_ZTSO 72
+#define RISCV_ISA_EXT_ZACAS 73
-#define RISCV_ISA_EXT_MAX 64
+#define RISCV_ISA_EXT_MAX 128
+#define RISCV_ISA_EXT_INVALID U32_MAX
#ifdef CONFIG_RISCV_M_MODE
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 5c48f48e79a6..630507dff5ea 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key)
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
}
+static inline bool hwprobe_key_is_bitmask(__s64 key)
+{
+ switch (key) {
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
+ struct riscv_hwprobe *other_pair)
+{
+ if (pair->key != other_pair->key)
+ return false;
+
+ if (hwprobe_key_is_bitmask(pair->key))
+ return (pair->value & other_pair->value) == other_pair->value;
+
+ return pair->value == other_pair->value;
+}
+
#endif
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
index 0bbffd528096..7388edd88986 100644
--- a/arch/riscv/include/asm/kfence.h
+++ b/arch/riscv/include/asm/kfence.h
@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);
if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
else
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 9a2c780a11e9..b42017d76924 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -202,7 +202,7 @@ static inline int pud_user(pud_t pud)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
}
static inline void pud_clear(pud_t *pudp)
@@ -278,7 +278,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
if (pgtable_l4_enabled)
- *p4dp = p4d;
+ WRITE_ONCE(*p4dp, p4d);
else
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
}
@@ -340,18 +340,12 @@ static inline struct page *p4d_page(p4d_t p4d)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset pud_offset
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- if (pgtable_l4_enabled)
- return p4d_pgtable(*p4d) + pud_index(address);
-
- return (pud_t *)p4d;
-}
+pud_t *pud_offset(p4d_t *p4d, unsigned long address);
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
if (pgtable_l5_enabled)
- *pgdp = pgd;
+ WRITE_ONCE(*pgdp, pgd);
else
set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
}
@@ -404,12 +398,6 @@ static inline struct page *pgd_page(pgd_t pgd)
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
#define p4d_offset p4d_offset
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
-{
- if (pgtable_l5_enabled)
- return pgd_pgtable(*pgd) + p4d_index(address);
-
- return (p4d_t *)pgd;
-}
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 7b4287f36054..e3ffef1c6119 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -510,7 +510,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- *ptep = pteval;
+ WRITE_ONCE(*ptep, pteval);
}
void flush_icache_pte(pte_t pte);
@@ -544,19 +544,12 @@ static inline void pte_clear(struct mm_struct *mm,
__set_pte_at(ptep, __pte(0));
}
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty)
-{
- if (!pte_same(*ptep, entry))
- __set_pte_at(ptep, entry);
- /*
- * update_mmu_cache will unconditionally execute, handling both
- * the case that the PTE changed and the spurious fault case.
- */
- return true;
-}
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
@@ -569,16 +562,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
-}
-
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 32336e8a17cb..a393d5035c54 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -13,6 +13,7 @@ extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];
static inline bool is_va_kernel_text(uintptr_t va)
{
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 574779900bfb..4856697c5f25 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -28,7 +28,6 @@
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
#define OVERFLOW_STACK_SIZE SZ_4K
-#define SHADOW_OVERFLOW_STACK_SIZE (1024)
#define IRQ_STACK_SIZE THREAD_SIZE
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index d4ffc3c37649..b65bf6306f69 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -13,7 +13,7 @@
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
+ la t0, __data_loc
REG_L t1, _xip_phys_offset
sub \reg, \reg, t1
add \reg, \reg, t0
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index b659ffcfcdb4..9f2a8e3ff204 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -30,6 +30,35 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
+#define RISCV_HWPROBE_EXT_ZBC (1 << 7)
+#define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
+#define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
+#define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
+#define RISCV_HWPROBE_EXT_ZKND (1 << 11)
+#define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
+#define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
+#define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
+#define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
+#define RISCV_HWPROBE_EXT_ZKT (1 << 16)
+#define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
+#define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
+#define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
+#define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
+#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
+#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
+#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
+#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
+#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
+#define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
+#define RISCV_HWPROBE_EXT_ZFH (1 << 27)
+#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
+#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
+#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
+#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
+#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
+#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -40,4 +69,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
+/* Flags */
+#define RISCV_HWPROBE_WHICH_CPUS (1 << 0)
+
#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 82940b6a79a2..757413d36c5e 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -50,6 +50,7 @@ obj-y += setup.o
obj-y += signal.o
obj-y += syscall_table.o
obj-y += sys_riscv.o
+obj-y += sys_hwprobe.o
obj-y += time.o
obj-y += traps.o
obj-y += riscv_ksyms.o
diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c
index 457a18efcb11..28b58fc5ad19 100644
--- a/arch/riscv/kernel/cpu-hotplug.c
+++ b/arch/riscv/kernel/cpu-hotplug.c
@@ -18,7 +18,7 @@
bool cpu_has_hotplug(unsigned int cpu)
{
- if (cpu_ops[cpu]->cpu_stop)
+ if (cpu_ops->cpu_stop)
return true;
return false;
@@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu)
*/
int __cpu_disable(void)
{
- int ret = 0;
unsigned int cpu = smp_processor_id();
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
+ if (!cpu_ops->cpu_stop)
return -EOPNOTSUPP;
- if (cpu_ops[cpu]->cpu_disable)
- ret = cpu_ops[cpu]->cpu_disable(cpu);
-
- if (ret)
- return ret;
-
remove_cpu_topology(cpu);
numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
riscv_ipi_disable();
irq_migrate_all_off_this_cpu();
- return ret;
+ return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
pr_notice("CPU%u: off\n", cpu);
/* Verify from the firmware if the cpu is really stopped*/
- if (cpu_ops[cpu]->cpu_is_stopped)
- ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
+ if (cpu_ops->cpu_is_stopped)
+ ret = cpu_ops->cpu_is_stopped(cpu);
if (ret)
pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
}
@@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void)
cpuhp_ap_report_dead();
- cpu_ops[smp_processor_id()]->cpu_stop();
+ cpu_ops->cpu_stop();
/* It should never reach here */
BUG();
}
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index eb479a88a954..6a8bd8f4db07 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -13,25 +13,21 @@
#include <asm/sbi.h>
#include <asm/smp.h>
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait;
extern const struct cpu_operations cpu_ops_sbi;
#ifndef CONFIG_RISCV_BOOT_SPINWAIT
const struct cpu_operations cpu_ops_spinwait = {
- .name = "",
- .cpu_prepare = NULL,
.cpu_start = NULL,
};
#endif
-void __init cpu_set_ops(int cpuid)
+void __init cpu_set_ops(void)
{
#if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM)) {
- if (!cpuid)
- pr_info("SBI HSM extension detected\n");
- cpu_ops[cpuid] = &cpu_ops_sbi;
- } else
+ pr_info("SBI HSM extension detected\n");
+ cpu_ops = &cpu_ops_sbi;
+ }
#endif
- cpu_ops[cpuid] = &cpu_ops_spinwait;
}
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index efa0f0816634..1cc7df740edd 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
}
-static int sbi_cpu_prepare(unsigned int cpuid)
-{
- if (!cpu_ops_sbi.cpu_start) {
- pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
- return -ENODEV;
- }
- return 0;
-}
-
#ifdef CONFIG_HOTPLUG_CPU
-static int sbi_cpu_disable(unsigned int cpuid)
-{
- if (!cpu_ops_sbi.cpu_stop)
- return -EOPNOTSUPP;
- return 0;
-}
-
static void sbi_cpu_stop(void)
{
int ret;
@@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid)
#endif
const struct cpu_operations cpu_ops_sbi = {
- .name = "sbi",
- .cpu_prepare = sbi_cpu_prepare,
.cpu_start = sbi_cpu_start,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = sbi_cpu_disable,
.cpu_stop = sbi_cpu_stop,
.cpu_is_stopped = sbi_cpu_is_stopped,
#endif
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index d98d19226b5f..613872b0a21a 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}
-static int spinwait_cpu_prepare(unsigned int cpuid)
-{
- if (!cpu_ops_spinwait.cpu_start) {
- pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
- return -ENODEV;
- }
- return 0;
-}
-
static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
/*
@@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
}
const struct cpu_operations cpu_ops_spinwait = {
- .name = "spinwait",
- .cpu_prepare = spinwait_cpu_prepare,
.cpu_start = spinwait_cpu_start,
};
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b3785ffc1570..e32591e9da90 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
*
* NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
*/
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
{
const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
@@ -102,17 +102,101 @@ static bool riscv_isa_extension_check(int id)
return false;
}
return true;
+ case RISCV_ISA_EXT_INVALID:
+ return false;
}
return true;
}
-#define __RISCV_ISA_EXT_DATA(_name, _id) { \
- .name = #_name, \
- .property = #_name, \
- .id = _id, \
+#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \
+ .name = #_name, \
+ .property = #_name, \
+ .id = _id, \
+ .subset_ext_ids = _subset_exts, \
+ .subset_ext_size = _subset_exts_size \
}
+#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
+
+/* Used to declare pure "lasso" extension (Zk for instance) */
+#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
+ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
+
+/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
+#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
+ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
+
+static const unsigned int riscv_zk_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZBKX,
+ RISCV_ISA_EXT_ZKND,
+ RISCV_ISA_EXT_ZKNE,
+ RISCV_ISA_EXT_ZKR,
+ RISCV_ISA_EXT_ZKT,
+};
+
+static const unsigned int riscv_zkn_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZBKX,
+ RISCV_ISA_EXT_ZKND,
+ RISCV_ISA_EXT_ZKNE,
+ RISCV_ISA_EXT_ZKNH,
+};
+
+static const unsigned int riscv_zks_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZKSED,
+ RISCV_ISA_EXT_ZKSH
+};
+
+#define RISCV_ISA_EXT_ZVKN \
+ RISCV_ISA_EXT_ZVKNED, \
+ RISCV_ISA_EXT_ZVKNHB, \
+ RISCV_ISA_EXT_ZVKB, \
+ RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvkn_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN
+};
+
+static const unsigned int riscv_zvknc_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN,
+ RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvkng_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN,
+ RISCV_ISA_EXT_ZVKG
+};
+
+#define RISCV_ISA_EXT_ZVKS \
+ RISCV_ISA_EXT_ZVKSED, \
+ RISCV_ISA_EXT_ZVKSH, \
+ RISCV_ISA_EXT_ZVKB, \
+ RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvks_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS
+};
+
+static const unsigned int riscv_zvksc_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS,
+ RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvksg_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS,
+ RISCV_ISA_EXT_ZVKG
+};
+
+static const unsigned int riscv_zvbb_exts[] = {
+ RISCV_ISA_EXT_ZVKB
+};
+
/*
* The canonical order of ISA extension names in the ISA string is defined in
* chapter 27 of the unprivileged specification.
@@ -160,10 +244,6 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
- __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
- __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
- __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
- __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
@@ -172,11 +252,49 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
+ __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
+ __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+ __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
+ __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
+ __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
+ __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
+ __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
+ __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
+ __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
+ __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
+ __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
+ __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
+ __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
+ __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
+ __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
+ __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
+ __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
+ __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
+ __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
+ __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
+ __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
+ __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
+ __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
+ __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
+ __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
+ __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
+ __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
+ __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
+ __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
__RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
@@ -189,6 +307,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
+static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
+ const char *name_end, struct riscv_isainfo *isainfo)
+{
+ if ((name_end - name == strlen(ext->name)) &&
+ !strncasecmp(name, ext->name, name_end - name)) {
+ /*
+ * If this is a bundle, enable all the ISA extensions that
+ * comprise the bundle.
+ */
+ if (ext->subset_ext_size) {
+ for (int i = 0; i < ext->subset_ext_size; i++) {
+ if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+ set_bit(ext->subset_ext_ids[i], isainfo->isa);
+ }
+ }
+
+ /*
+ * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
+ * (rejected by riscv_isa_extension_check()).
+ */
+ if (riscv_isa_extension_check(ext->id))
+ set_bit(ext->id, isainfo->isa);
+ }
+}
+
static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
unsigned long *isa2hwcap, const char *isa)
{
@@ -321,14 +464,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
if (*isa == '_')
++isa;
-#define SET_ISA_EXT_MAP(name, bit) \
- do { \
- if ((ext_end - ext == strlen(name)) && \
- !strncasecmp(ext, name, strlen(name)) && \
- riscv_isa_extension_check(bit)) \
- set_bit(bit, isainfo->isa); \
- } while (false) \
-
if (unlikely(ext_err))
continue;
if (!ext_long) {
@@ -340,10 +475,8 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
}
} else {
for (int i = 0; i < riscv_isa_ext_count; i++)
- SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
- riscv_isa_ext[i].id);
+ match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
}
-#undef SET_ISA_EXT_MAP
}
}
@@ -442,18 +575,26 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
}
for (int i = 0; i < riscv_isa_ext_count; i++) {
+ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
+
if (of_property_match_string(cpu_node, "riscv,isa-extensions",
- riscv_isa_ext[i].property) < 0)
+ ext->property) < 0)
continue;
- if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
- continue;
+ if (ext->subset_ext_size) {
+ for (int j = 0; j < ext->subset_ext_size; j++) {
+ if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+ set_bit(ext->subset_ext_ids[j], isainfo->isa);
+ }
+ }
- /* Only single letter extensions get set in hwcap */
- if (strnlen(riscv_isa_ext[i].name, 2) == 1)
- this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+ if (riscv_isa_extension_check(ext->id)) {
+ set_bit(ext->id, isainfo->isa);
- set_bit(riscv_isa_ext[i].id, isainfo->isa);
+ /* Only single letter extensions get set in hwcap */
+ if (strnlen(riscv_isa_ext[i].name, 2) == 1)
+ this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+ }
}
of_node_put(cpu_node);
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
index aa6209a74c83..b64bf1624a05 100644
--- a/arch/riscv/kernel/efi.c
+++ b/arch/riscv/kernel/efi.c
@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
unsigned long val;
if (md->attribute & EFI_MEMORY_RO) {
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 76ace1e0b46f..4236a69c35cb 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -11,7 +11,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/csr.h>
-#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include <asm/scs.h>
@@ -89,6 +88,7 @@ relocate_enable_mmu:
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
la a1, satp_mode
+ XIP_FIXUP_OFFSET a1
REG_L a1, 0(a1)
or a2, a2, a1
@@ -265,10 +265,12 @@ SYM_CODE_START(_start_kernel)
la sp, _end + THREAD_SIZE
XIP_FIXUP_OFFSET sp
mv s0, a0
+ mv s1, a1
call __copy_data
- /* Restore a0 copy */
+ /* Restore a0 & a1 copy */
mv a0, s0
+ mv a1, s1
#endif
#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 58dd96a2a153..79dc81223238 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -3,12 +3,12 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index b4dd9ed6849e..d7ec69ac6910 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -4,12 +4,12 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/cfi_types.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index aac019ed63b1..862834bb1d64 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -894,7 +894,8 @@ void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR,
MODULES_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 13ee7bf589a1..37e87fdcf6a0 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -14,6 +14,7 @@
#include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
+#include <asm/sections.h>
struct patch_insn {
void *addr;
@@ -25,6 +26,14 @@ struct patch_insn {
int riscv_patch_in_stop_machine = false;
#ifdef CONFIG_MMU
+
+static inline bool is_kernel_exittext(uintptr_t addr)
+{
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (uintptr_t)__exittext_begin &&
+ addr < (uintptr_t)__exittext_end;
+}
+
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
* reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
@@ -35,7 +44,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
- if (core_kernel_text(uintaddr))
+ if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 535a837de55d..2bf882804624 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -26,7 +26,6 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
-#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 88b6220b2608..33dfb5078301 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -91,7 +91,7 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
err = __copy_to_user(&state->v_state, &current->thread.vstate,
offsetof(struct __riscv_v_ext_state, datap));
/* Copy the pointer datap itself. */
- err |= __put_user(datap, &state->v_state.datap);
+ err |= __put_user((__force void *)datap, &state->v_state.datap);
/* Copy the whole vector content to user space datap. */
err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize);
/* Copy magic to the user space after saving all vector conetext */
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 40420afbb1a0..45dd4035416e 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_has_hotplug(cpu))
- cpu_ops[cpu]->cpu_stop();
+ cpu_ops->cpu_stop();
#endif
for(;;)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index d162bf339beb..519b6bd946e5 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
- int ret;
unsigned int curr_cpuid;
init_cpu_topology();
@@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpuid) {
if (cpuid == curr_cpuid)
continue;
- if (cpu_ops[cpuid]->cpu_prepare) {
- ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
- if (ret)
- continue;
- }
set_cpu_present(cpuid, true);
numa_store_cpu_info(cpuid);
}
@@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
static void __init acpi_parse_and_init_cpus(void)
{
- int cpuid;
-
- cpu_set_ops(0);
-
acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
#else
#define acpi_parse_and_init_cpus(...) do { } while (0)
@@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void)
int cpuid = 1;
int rc;
- cpu_set_ops(0);
-
for_each_of_cpu_node(dn) {
rc = riscv_early_of_processor_hartid(dn, &hart);
if (rc < 0)
@@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void)
if (cpuid > nr_cpu_ids)
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
cpuid, nr_cpu_ids);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
void __init setup_smp(void)
{
+ int cpuid;
+
+ cpu_set_ops();
+
if (acpi_disabled)
of_parse_and_init_cpus();
else
acpi_parse_and_init_cpus();
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+ set_cpu_possible(cpuid, true);
}
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
{
- if (cpu_ops[cpu]->cpu_start)
- return cpu_ops[cpu]->cpu_start(cpu, tidle);
+ if (cpu_ops->cpu_start)
+ return cpu_ops->cpu_start(cpu, tidle);
return -EOPNOTSUPP;
}
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
new file mode 100644
index 000000000000..a7c56b41efd2
--- /dev/null
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The hwprobe interface, for allowing userspace to probe to see which features
+ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
+ * more details.
+ */
+#include <linux/syscalls.h>
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/hwprobe.h>
+#include <asm/sbi.h>
+#include <asm/switch_to.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/vector.h>
+#include <vdso/vsyscall.h>
+
+
+static void hwprobe_arch_id(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ u64 id = -1ULL;
+ bool first = true;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ u64 cpu_id;
+
+ switch (pair->key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ cpu_id = riscv_cached_mvendorid(cpu);
+ break;
+ case RISCV_HWPROBE_KEY_MIMPID:
+ cpu_id = riscv_cached_mimpid(cpu);
+ break;
+ case RISCV_HWPROBE_KEY_MARCHID:
+ cpu_id = riscv_cached_marchid(cpu);
+ break;
+ }
+
+ if (first) {
+ id = cpu_id;
+ first = false;
+ }
+
+ /*
+ * If there's a mismatch for the given set, return -1 in the
+ * value.
+ */
+ if (id != cpu_id) {
+ id = -1ULL;
+ break;
+ }
+ }
+
+ pair->value = id;
+}
+
+static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ int cpu;
+ u64 missing = 0;
+
+ pair->value = 0;
+ if (has_fpu())
+ pair->value |= RISCV_HWPROBE_IMA_FD;
+
+ if (riscv_isa_extension_available(NULL, c))
+ pair->value |= RISCV_HWPROBE_IMA_C;
+
+ if (has_vector())
+ pair->value |= RISCV_HWPROBE_IMA_V;
+
+ /*
+ * Loop through and record extensions that 1) anyone has, and 2) anyone
+ * doesn't have.
+ */
+ for_each_cpu(cpu, cpus) {
+ struct riscv_isainfo *isainfo = &hart_isa[cpu];
+
+#define EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_EXT_##ext; \
+ } while (false)
+
+ /*
+ * Only use EXT_KEY() for extensions which can be exposed to userspace,
+ * regardless of the kernel's configuration, as no other checks, besides
+ * presence in the hart_isa bitmap, are made.
+ */
+ EXT_KEY(ZBA);
+ EXT_KEY(ZBB);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZICBOZ);
+ EXT_KEY(ZBC);
+
+ EXT_KEY(ZBKB);
+ EXT_KEY(ZBKC);
+ EXT_KEY(ZBKX);
+ EXT_KEY(ZKND);
+ EXT_KEY(ZKNE);
+ EXT_KEY(ZKNH);
+ EXT_KEY(ZKSED);
+ EXT_KEY(ZKSH);
+ EXT_KEY(ZKT);
+ EXT_KEY(ZIHINTNTL);
+ EXT_KEY(ZTSO);
+ EXT_KEY(ZACAS);
+ EXT_KEY(ZICOND);
+
+ if (has_vector()) {
+ EXT_KEY(ZVBB);
+ EXT_KEY(ZVBC);
+ EXT_KEY(ZVKB);
+ EXT_KEY(ZVKG);
+ EXT_KEY(ZVKNED);
+ EXT_KEY(ZVKNHA);
+ EXT_KEY(ZVKNHB);
+ EXT_KEY(ZVKSED);
+ EXT_KEY(ZVKSH);
+ EXT_KEY(ZVKT);
+ EXT_KEY(ZVFH);
+ EXT_KEY(ZVFHMIN);
+ }
+
+ if (has_fpu()) {
+ EXT_KEY(ZFH);
+ EXT_KEY(ZFHMIN);
+ EXT_KEY(ZFA);
+ }
+#undef EXT_KEY
+ }
+
+ /* Now turn off reporting features if any CPU is missing it. */
+ pair->value &= ~missing;
+}
+
+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+{
+ struct riscv_hwprobe pair;
+
+ hwprobe_isa_ext0(&pair, cpus);
+ return (pair.value & ext);
+}
+
+static u64 hwprobe_misaligned(const struct cpumask *cpus)
+{
+ int cpu;
+ u64 perf = -1ULL;
+
+ for_each_cpu(cpu, cpus) {
+ int this_perf = per_cpu(misaligned_access_speed, cpu);
+
+ if (perf == -1ULL)
+ perf = this_perf;
+
+ if (perf != this_perf) {
+ perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+ break;
+ }
+ }
+
+ if (perf == -1ULL)
+ return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+ return perf;
+}
+
+static void hwprobe_one_pair(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ switch (pair->key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ case RISCV_HWPROBE_KEY_MARCHID:
+ case RISCV_HWPROBE_KEY_MIMPID:
+ hwprobe_arch_id(pair, cpus);
+ break;
+ /*
+ * The kernel already assumes that the base single-letter ISA
+ * extensions are supported on all harts, and only supports the
+ * IMA base, so just cheat a bit here and tell that to
+ * userspace.
+ */
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
+ break;
+
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ hwprobe_isa_ext0(pair, cpus);
+ break;
+
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ pair->value = hwprobe_misaligned(cpus);
+ break;
+
+ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+ pair->value = riscv_cboz_block_size;
+ break;
+
+ /*
+ * For forward compatibility, unknown keys don't fail the whole
+ * call, but get their element key set to -1 and value set to 0
+ * indicating they're unrecognized.
+ */
+ default:
+ pair->key = -1;
+ pair->value = 0;
+ break;
+ }
+}
+
+static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ size_t out;
+ int ret;
+ cpumask_t cpus;
+
+ /* Check the reserved flags. */
+ if (flags != 0)
+ return -EINVAL;
+
+ /*
+ * The interface supports taking in a CPU mask, and returns values that
+ * are consistent across that mask. Allow userspace to specify NULL and
+ * 0 as a shortcut to all online CPUs.
+ */
+ cpumask_clear(&cpus);
+ if (!cpusetsize && !cpus_user) {
+ cpumask_copy(&cpus, cpu_online_mask);
+ } else {
+ if (cpusetsize > cpumask_size())
+ cpusetsize = cpumask_size();
+
+ ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ /*
+ * Userspace must provide at least one online CPU, without that
+ * there's no way to define what is supported.
+ */
+ cpumask_and(&cpus, &cpus, cpu_online_mask);
+ if (cpumask_empty(&cpus))
+ return -EINVAL;
+ }
+
+ for (out = 0; out < pair_count; out++, pairs++) {
+ struct riscv_hwprobe pair;
+
+ if (get_user(pair.key, &pairs->key))
+ return -EFAULT;
+
+ pair.value = 0;
+ hwprobe_one_pair(&pair, &cpus);
+ ret = put_user(pair.key, &pairs->key);
+ if (ret == 0)
+ ret = put_user(pair.value, &pairs->value);
+
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ cpumask_t cpus, one_cpu;
+ bool clear_all = false;
+ size_t i;
+ int ret;
+
+ if (flags != RISCV_HWPROBE_WHICH_CPUS)
+ return -EINVAL;
+
+ if (!cpusetsize || !cpus_user)
+ return -EINVAL;
+
+ if (cpusetsize > cpumask_size())
+ cpusetsize = cpumask_size();
+
+ ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ if (cpumask_empty(&cpus))
+ cpumask_copy(&cpus, cpu_online_mask);
+
+ cpumask_and(&cpus, &cpus, cpu_online_mask);
+
+ cpumask_clear(&one_cpu);
+
+ for (i = 0; i < pair_count; i++) {
+ struct riscv_hwprobe pair, tmp;
+ int cpu;
+
+ ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
+ if (ret)
+ return -EFAULT;
+
+ if (!riscv_hwprobe_key_is_valid(pair.key)) {
+ clear_all = true;
+ pair = (struct riscv_hwprobe){ .key = -1, };
+ ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
+ if (ret)
+ return -EFAULT;
+ }
+
+ if (clear_all)
+ continue;
+
+ tmp = (struct riscv_hwprobe){ .key = pair.key, };
+
+ for_each_cpu(cpu, &cpus) {
+ cpumask_set_cpu(cpu, &one_cpu);
+
+ hwprobe_one_pair(&tmp, &one_cpu);
+
+ if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
+ cpumask_clear_cpu(cpu, &cpus);
+
+ cpumask_clear_cpu(cpu, &one_cpu);
+ }
+ }
+
+ if (clear_all)
+ cpumask_clear(&cpus);
+
+ ret = copy_to_user(cpus_user, &cpus, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+
+ return hwprobe_get_values(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+}
+
+#ifdef CONFIG_MMU
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_data *vd = __arch_get_k_vdso_data();
+ struct arch_vdso_data *avd = &vd->arch_data;
+ u64 id_bitsmash = 0;
+ struct riscv_hwprobe pair;
+ int key;
+
+ /*
+ * Initialize vDSO data with the answers for the "all CPUs" case, to
+ * save a syscall in the common case.
+ */
+ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
+ pair.key = key;
+ hwprobe_one_pair(&pair, cpu_online_mask);
+
+ WARN_ON_ONCE(pair.key < 0);
+
+ avd->all_cpu_hwprobe_values[key] = pair.value;
+ /*
+ * Smash together the vendor, arch, and impl IDs to see if
+ * they're all 0 or any negative.
+ */
+ if (key <= RISCV_HWPROBE_KEY_MIMPID)
+ id_bitsmash |= pair.value;
+ }
+
+ /*
+ * If the arch, vendor, and implementation ID are all the same across
+ * all harts, then assume all CPUs are the same, and allow the vDSO to
+ * answer queries for arbitrary masks. However if all values are 0 (not
+ * populated) or any value returns -1 (varies across CPUs), then the
+ * vDSO should defer to the kernel for exotic cpu masks.
+ */
+ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+ return 0;
+}
+
+arch_initcall_sync(init_hwprobe_vdso_data);
+
+#endif /* CONFIG_MMU */
+
+SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
+ size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
+ cpus, unsigned int, flags)
+{
+ return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
+ cpus, flags);
+}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index a2ca5b7756a5..f1c1416a9f1e 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -7,15 +7,7 @@
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
-#include <asm/hwprobe.h>
-#include <asm/sbi.h>
-#include <asm/vector.h>
-#include <asm/switch_to.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
#include <asm-generic/mman-common.h>
-#include <vdso/vsyscall.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -77,283 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
return 0;
}
-/*
- * The hwprobe interface, for allowing userspace to probe to see which features
- * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more
- * details.
- */
-static void hwprobe_arch_id(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- u64 id = -1ULL;
- bool first = true;
- int cpu;
-
- for_each_cpu(cpu, cpus) {
- u64 cpu_id;
-
- switch (pair->key) {
- case RISCV_HWPROBE_KEY_MVENDORID:
- cpu_id = riscv_cached_mvendorid(cpu);
- break;
- case RISCV_HWPROBE_KEY_MIMPID:
- cpu_id = riscv_cached_mimpid(cpu);
- break;
- case RISCV_HWPROBE_KEY_MARCHID:
- cpu_id = riscv_cached_marchid(cpu);
- break;
- }
-
- if (first) {
- id = cpu_id;
- first = false;
- }
-
- /*
- * If there's a mismatch for the given set, return -1 in the
- * value.
- */
- if (id != cpu_id) {
- id = -1ULL;
- break;
- }
- }
-
- pair->value = id;
-}
-
-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- int cpu;
- u64 missing = 0;
-
- pair->value = 0;
- if (has_fpu())
- pair->value |= RISCV_HWPROBE_IMA_FD;
-
- if (riscv_isa_extension_available(NULL, c))
- pair->value |= RISCV_HWPROBE_IMA_C;
-
- if (has_vector())
- pair->value |= RISCV_HWPROBE_IMA_V;
-
- /*
- * Loop through and record extensions that 1) anyone has, and 2) anyone
- * doesn't have.
- */
- for_each_cpu(cpu, cpus) {
- struct riscv_isainfo *isainfo = &hart_isa[cpu];
-
-#define EXT_KEY(ext) \
- do { \
- if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
- pair->value |= RISCV_HWPROBE_EXT_##ext; \
- else \
- missing |= RISCV_HWPROBE_EXT_##ext; \
- } while (false)
-
- /*
- * Only use EXT_KEY() for extensions which can be exposed to userspace,
- * regardless of the kernel's configuration, as no other checks, besides
- * presence in the hart_isa bitmap, are made.
- */
- EXT_KEY(ZBA);
- EXT_KEY(ZBB);
- EXT_KEY(ZBS);
- EXT_KEY(ZICBOZ);
-#undef EXT_KEY
- }
-
- /* Now turn off reporting features if any CPU is missing it. */
- pair->value &= ~missing;
-}
-
-static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
-{
- struct riscv_hwprobe pair;
-
- hwprobe_isa_ext0(&pair, cpus);
- return (pair.value & ext);
-}
-
-static u64 hwprobe_misaligned(const struct cpumask *cpus)
-{
- int cpu;
- u64 perf = -1ULL;
-
- for_each_cpu(cpu, cpus) {
- int this_perf = per_cpu(misaligned_access_speed, cpu);
-
- if (perf == -1ULL)
- perf = this_perf;
-
- if (perf != this_perf) {
- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
- break;
- }
- }
-
- if (perf == -1ULL)
- return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
-
- return perf;
-}
-
-static void hwprobe_one_pair(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- switch (pair->key) {
- case RISCV_HWPROBE_KEY_MVENDORID:
- case RISCV_HWPROBE_KEY_MARCHID:
- case RISCV_HWPROBE_KEY_MIMPID:
- hwprobe_arch_id(pair, cpus);
- break;
- /*
- * The kernel already assumes that the base single-letter ISA
- * extensions are supported on all harts, and only supports the
- * IMA base, so just cheat a bit here and tell that to
- * userspace.
- */
- case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
- pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
- break;
-
- case RISCV_HWPROBE_KEY_IMA_EXT_0:
- hwprobe_isa_ext0(pair, cpus);
- break;
-
- case RISCV_HWPROBE_KEY_CPUPERF_0:
- pair->value = hwprobe_misaligned(cpus);
- break;
-
- case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
- pair->value = 0;
- if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
- pair->value = riscv_cboz_block_size;
- break;
-
- /*
- * For forward compatibility, unknown keys don't fail the whole
- * call, but get their element key set to -1 and value set to 0
- * indicating they're unrecognized.
- */
- default:
- pair->key = -1;
- pair->value = 0;
- break;
- }
-}
-
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
- size_t pair_count, size_t cpu_count,
- unsigned long __user *cpus_user,
- unsigned int flags)
-{
- size_t out;
- int ret;
- cpumask_t cpus;
-
- /* Check the reserved flags. */
- if (flags != 0)
- return -EINVAL;
-
- /*
- * The interface supports taking in a CPU mask, and returns values that
- * are consistent across that mask. Allow userspace to specify NULL and
- * 0 as a shortcut to all online CPUs.
- */
- cpumask_clear(&cpus);
- if (!cpu_count && !cpus_user) {
- cpumask_copy(&cpus, cpu_online_mask);
- } else {
- if (cpu_count > cpumask_size())
- cpu_count = cpumask_size();
-
- ret = copy_from_user(&cpus, cpus_user, cpu_count);
- if (ret)
- return -EFAULT;
-
- /*
- * Userspace must provide at least one online CPU, without that
- * there's no way to define what is supported.
- */
- cpumask_and(&cpus, &cpus, cpu_online_mask);
- if (cpumask_empty(&cpus))
- return -EINVAL;
- }
-
- for (out = 0; out < pair_count; out++, pairs++) {
- struct riscv_hwprobe pair;
-
- if (get_user(pair.key, &pairs->key))
- return -EFAULT;
-
- pair.value = 0;
- hwprobe_one_pair(&pair, &cpus);
- ret = put_user(pair.key, &pairs->key);
- if (ret == 0)
- ret = put_user(pair.value, &pairs->value);
-
- if (ret)
- return -EFAULT;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_MMU
-
-static int __init init_hwprobe_vdso_data(void)
-{
- struct vdso_data *vd = __arch_get_k_vdso_data();
- struct arch_vdso_data *avd = &vd->arch_data;
- u64 id_bitsmash = 0;
- struct riscv_hwprobe pair;
- int key;
-
- /*
- * Initialize vDSO data with the answers for the "all CPUs" case, to
- * save a syscall in the common case.
- */
- for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
- pair.key = key;
- hwprobe_one_pair(&pair, cpu_online_mask);
-
- WARN_ON_ONCE(pair.key < 0);
-
- avd->all_cpu_hwprobe_values[key] = pair.value;
- /*
- * Smash together the vendor, arch, and impl IDs to see if
- * they're all 0 or any negative.
- */
- if (key <= RISCV_HWPROBE_KEY_MIMPID)
- id_bitsmash |= pair.value;
- }
-
- /*
- * If the arch, vendor, and implementation ID are all the same across
- * all harts, then assume all CPUs are the same, and allow the vDSO to
- * answer queries for arbitrary masks. However if all values are 0 (not
- * populated) or any value returns -1 (varies across CPUs), then the
- * vDSO should defer to the kernel for exotic cpu masks.
- */
- avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
- return 0;
-}
-
-arch_initcall_sync(init_hwprobe_vdso_data);
-
-#endif /* CONFIG_MMU */
-
-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
- size_t, pair_count, size_t, cpu_count, unsigned long __user *,
- cpus, unsigned int, flags)
-{
- return do_riscv_hwprobe(pairs, pair_count, cpu_count,
- cpus, flags);
-}
-
/* Not defined using SYSCALL_DEFINE0 to avoid error injection */
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
{
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 5255f8134aef..8ded225e8c5b 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -319,7 +319,7 @@ static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
{
if (user_mode(regs)) {
- return __get_user(*r_val, addr);
+ return __get_user(*r_val, (u8 __user *)addr);
} else {
*r_val = *addr;
return 0;
@@ -329,7 +329,7 @@ static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
{
if (user_mode(regs)) {
- return __put_user(val, addr);
+ return __put_user(val, (u8 __user *)addr);
} else {
*addr = val;
return 0;
@@ -343,7 +343,7 @@ static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
if (user_mode(regs)) { \
__ret = __get_user(insn, insn_addr); \
} else { \
- insn = *insn_addr; \
+ insn = *(__force u16 *)insn_addr; \
__ret = 0; \
} \
\
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index cadf725ef798..1e926e4b5881 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -3,26 +3,22 @@
* Copyright 2023 Rivos, Inc
*/
+#include <linux/string.h>
#include <linux/types.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
+ size_t cpusetsize, unsigned long *cpus,
unsigned int flags);
-/* Add a prototype to avoid -Wmissing-prototypes warning. */
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
- unsigned int flags);
-
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
- unsigned int flags)
+static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
{
const struct vdso_data *vd = __arch_get_vdso_data();
const struct arch_vdso_data *avd = &vd->arch_data;
- bool all_cpus = !cpu_count && !cpus;
+ bool all_cpus = !cpusetsize && !cpus;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
* masks.
*/
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
- return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags);
+ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */
while (p < end) {
@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
return 0;
}
+
+static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+ const struct arch_vdso_data *avd = &vd->arch_data;
+ struct riscv_hwprobe *p = pairs;
+ struct riscv_hwprobe *end = pairs + pair_count;
+ unsigned char *c = (unsigned char *)cpus;
+ bool empty_cpus = true;
+ bool clear_all = false;
+ int i;
+
+ if (!cpusetsize || !cpus)
+ return -EINVAL;
+
+ for (i = 0; i < cpusetsize; i++) {
+ if (c[i]) {
+ empty_cpus = false;
+ break;
+ }
+ }
+
+ if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus)
+ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
+
+ while (p < end) {
+ if (riscv_hwprobe_key_is_valid(p->key)) {
+ struct riscv_hwprobe t = {
+ .key = p->key,
+ .value = avd->all_cpu_hwprobe_values[p->key],
+ };
+
+ if (!riscv_hwprobe_pair_cmp(&t, p))
+ clear_all = true;
+ } else {
+ clear_all = true;
+ p->key = -1;
+ p->value = 0;
+ }
+ p++;
+ }
+
+ if (clear_all) {
+ for (i = 0; i < cpusetsize; i++)
+ c[i] = 0;
+ }
+
+ return 0;
+}
+
+/* Add a prototype to avoid -Wmissing-prototypes warning. */
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags);
+
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
+{
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize,
+ cpus, flags);
+
+ return riscv_vdso_get_values(pairs, pair_count, cpusetsize,
+ cpus, flags);
+}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 50767647fbc6..8c3daa1b0531 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -29,10 +29,12 @@ SECTIONS
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;
.text : {
_text = .;
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 492dd4b8f3d6..002ca58dd998 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -69,10 +69,12 @@ SECTIONS
__soc_builtin_dtb_table_end = .;
}
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;
__init_text_end = .;
. = ALIGN(SECTION_ALIGN);
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 068c74593871..a9e2fd7245e1 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
- while (ptep && pte_val(*ptep)) {
+ while (ptep && pte_val(ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
- ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
if (gstage_pte_leaf(ptep))
return -EEXIST;
- if (!pte_val(*ptep)) {
+ if (!pte_val(ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
- *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE));
+ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
}
current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
- *ptep = *new_pte;
+ set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
BUG_ON(addr & (page_size - 1));
- if (!pte_val(*ptep))
+ if (!pte_val(ptep_get(ptep)))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
+ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
- return pte_young(*ptep);
+ return pte_young(ptep_get(ptep));
}
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S
index b22de1231144..20ff03f5b0f2 100644
--- a/arch/riscv/lib/clear_page.S
+++ b/arch/riscv/lib/clear_page.S
@@ -4,9 +4,9 @@
*/
#include <linux/linkage.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/alternative-macros.h>
-#include <asm-generic/export.h>
#include <asm/hwcap.h>
#include <asm/insn-def.h>
#include <asm/page.h>
diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S
index ef90075c4b0a..c8294bf72c06 100644
--- a/arch/riscv/lib/tishift.S
+++ b/arch/riscv/lib/tishift.S
@@ -4,7 +4,7 @@
*/
#include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
SYM_FUNC_START(__lshrti3)
beqz a2, .L1
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 3ab438f30d13..a9d356d6c03c 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -1,5 +1,5 @@
#include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 3a4dfc8babcf..2c869f8026a8 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,10 +13,9 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
obj-y += cacheflush.o
obj-y += context.o
-obj-y += pgtable.o
obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 081339ddf47e..3ba1d4dde5dd 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -136,24 +136,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
- if (!pgd_present(*pgd_k)) {
+ if (!pgd_present(pgdp_get(pgd_k))) {
no_context(regs, addr);
return;
}
- set_pgd(pgd, *pgd_k);
+ set_pgd(pgd, pgdp_get(pgd_k));
p4d_k = p4d_offset(pgd_k, addr);
- if (!p4d_present(*p4d_k)) {
+ if (!p4d_present(p4dp_get(p4d_k))) {
no_context(regs, addr);
return;
}
pud_k = pud_offset(p4d_k, addr);
- if (!pud_present(*pud_k)) {
+ if (!pud_present(pudp_get(pud_k))) {
no_context(regs, addr);
return;
}
- if (pud_leaf(*pud_k))
+ if (pud_leaf(pudp_get(pud_k)))
goto flush_tlb;
/*
@@ -161,11 +161,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
- if (!pmd_present(*pmd_k)) {
+ if (!pmd_present(pmdp_get(pmd_k))) {
no_context(regs, addr);
return;
}
- if (pmd_leaf(*pmd_k))
+ if (pmd_leaf(pmdp_get(pmd_k)))
goto flush_tlb;
/*
@@ -175,7 +175,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
- if (!pte_present(*pte_k)) {
+ if (!pte_present(ptep_get(pte_k))) {
no_context(regs, addr);
return;
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index b52f0210481f..431596c0e20e 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}
if (sz == PMD_SIZE) {
- if (want_pmd_share(vma, addr) && pud_none(*pud))
+ if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return NULL;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return NULL;
pud = pud_offset(p4d, addr);
@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pud huge, non-present or none */
return (pte_t *)pud;
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return NULL;
pmd = pmd_offset(pud, addr);
@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return NULL;
for_each_napot_order(order) {
@@ -293,7 +293,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
int i, pte_num;
if (!pte_napot(pte)) {
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 2e011cbddf3a..a65937336cdc 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -174,6 +174,9 @@ void __init mem_init(void)
/* Limit the memory size via mem. */
static phys_addr_t memory_limit;
+#ifdef CONFIG_XIP_KERNEL
+#define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit))
+#endif /* CONFIG_XIP_KERNEL */
static int __init early_mem(char *p)
{
@@ -952,7 +955,7 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
* setup_vm_final installs the linear mapping. For 32-bit kernel, as the
* kernel is mapped in the linear mapping, that makes no difference.
*/
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
#endif
dtb_early_pa = dtb_pa;
@@ -1055,9 +1058,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
+ kernel_map.page_offset = PAGE_OFFSET_L3;
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
@@ -1067,6 +1070,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 5e39dcf23fdb..e96251853037 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
phys_addr_t phys_addr;
pte_t *ptep, *p;
- if (pmd_none(*pmd)) {
+ if (pmd_none(pmdp_get(pmd))) {
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
ptep = pte_offset_kernel(pmd, vaddr);
do {
- if (pte_none(*ptep)) {
+ if (pte_none(ptep_get(ptep))) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
pmd_t *pmdp, *p;
unsigned long next;
- if (pud_none(*pud)) {
+ if (pud_none(pudp_get(pud))) {
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
do {
next = pmd_addr_end(vaddr, end);
- if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+ if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
+ (next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
pud_t *pudp, *p;
unsigned long next;
- if (p4d_none(*p4d)) {
+ if (p4d_none(p4dp_get(p4d))) {
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ (next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (phys_addr) {
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
p4d_t *p4dp, *p;
unsigned long next;
- if (pgd_none(*pgd)) {
+ if (pgd_none(pgdp_get(pgd))) {
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ (next - vaddr) >= P4D_SIZE) {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pud_k)) {
+ if (pud_none(pudp_get(pud_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4d_k)) {
+ if (p4d_none(p4dp_get(p4d_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgd_k)) {
+ if (pgd_none(pgdp_get(pgd_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last p4d since it is shared with the kernel mapping. */
if (pgtable_l5_enabled) {
- ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+ ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
@@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last pud since it is shared with the kernel mapping. */
if (pgtable_l4_enabled) {
- ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
+ ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index fc5fc4f785c4..410056a50aa9 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- p4d_t val = READ_ONCE(*p4d);
+ p4d_t val = p4dp_get(p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
@@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pud_t val = READ_ONCE(*pud);
+ pud_t val = pudp_get(pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
@@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pmd_t val = READ_ONCE(*pmd);
+ pmd_t val = pmdp_get(pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pte_t val = READ_ONCE(*pte);
+ pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
@@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
vaddr <= (vaddr & PMD_MASK) && end >= next)
continue;
- if (pmd_leaf(*pmdp)) {
+ if (pmd_leaf(pmdp_get(pmdp))) {
struct page *pte_page;
- unsigned long pfn = _pmd_pfn(*pmdp);
- pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
+ pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
pte_t *ptep_new;
int i;
@@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
vaddr <= (vaddr & PUD_MASK) && end >= next)
continue;
- if (pud_leaf(*pudp)) {
+ if (pud_leaf(pudp_get(pudp))) {
struct page *pmd_page;
- unsigned long pfn = _pud_pfn(*pudp);
- pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pud_pfn(pudp_get(pudp));
+ pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
pmd_t *pmdp_new;
int i;
@@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
vaddr <= (vaddr & P4D_MASK) && end >= next)
continue;
- if (p4d_leaf(*p4dp)) {
+ if (p4d_leaf(p4dp_get(p4dp))) {
struct page *pud_page;
- unsigned long pfn = _p4d_pfn(*p4dp);
- pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
+ pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
pud_t *pudp_new;
int i;
@@ -305,8 +305,13 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
goto unlock;
}
} else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
- lm_start = (unsigned long)lm_alias(start);
- lm_end = (unsigned long)lm_alias(end);
+ if (is_kernel_mapping(start)) {
+ lm_start = (unsigned long)lm_alias(start);
+ lm_end = (unsigned long)lm_alias(end);
+ } else {
+ lm_start = start;
+ lm_end = end;
+ }
ret = split_linear_mapping(lm_start, lm_end);
if (ret)
@@ -378,7 +383,7 @@ int set_direct_map_invalid_noflush(struct page *page)
int set_direct_map_default_noflush(struct page *page)
{
return __set_memory((unsigned long)page_address(page), 1,
- PAGE_KERNEL, __pgprot(0));
+ PAGE_KERNEL, __pgprot(_PAGE_EXEC));
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -406,29 +411,29 @@ bool kernel_page_present(struct page *page)
pte_t *pte;
pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return false;
- if (pgd_leaf(*pgd))
+ if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return false;
- if (p4d_leaf(*p4d))
+ if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return false;
- if (pud_leaf(*pud))
+ if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return false;
- if (pmd_leaf(*pmd))
+ if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
- return pte_present(*pte);
+ return pte_present(ptep_get(pte));
}
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index fef4e7328e49..ef887efcb679 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -5,6 +5,47 @@
#include <linux/kernel.h>
#include <linux/pgtable.h>
+int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(ptep_get(ptep), entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(ptep_get(ptep)))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
+
+#ifdef CONFIG_64BIT
+pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ if (pgtable_l5_enabled)
+ return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
+
+ return (p4d_t *)pgd;
+}
+#endif
+
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (!pud_leaf(READ_ONCE(*pud)))
+ if (!pud_leaf(pudp_get(pud)))
return 0;
pud_clear(pud);
return 1;
@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd = pud_pgtable(*pud);
+ pmd_t *pmd = pud_pgtable(pudp_get(pud));
int i;
pud_clear(pud);
@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (!pmd_leaf(READ_ONCE(*pmd)))
+ if (!pmd_leaf(pmdp_get(pmd)))
return 0;
pmd_clear(pmd);
return 1;
@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
pmd_clear(pmd);
@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(pmd_trans_huge(*pmdp));
+ VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a