diff options
Diffstat (limited to 'arch/arm64/kernel/mte.c')
-rw-r--r-- | arch/arm64/kernel/mte.c | 180 |
1 files changed, 93 insertions, 87 deletions
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 36f51b0e438a..9d314a3bad3b 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -4,6 +4,7 @@ */ #include <linux/bitops.h> +#include <linux/cpu.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/prctl.h> @@ -22,9 +23,7 @@ #include <asm/ptrace.h> #include <asm/sysreg.h> -u64 gcr_kernel_excl __ro_after_init; - -static bool report_fault_once = true; +static DEFINE_PER_CPU_READ_MOSTLY(u64, mte_tcf_preferred); #ifdef CONFIG_KASAN_HW_TAGS /* Whether the MTE asynchronous mode is enabled. */ @@ -101,26 +100,6 @@ int memcmp_pages(struct page *page1, struct page *page2) return ret; } -void mte_init_tags(u64 max_tag) -{ - static bool gcr_kernel_excl_initialized; - - if (!gcr_kernel_excl_initialized) { - /* - * The format of the tags in KASAN is 0xFF and in MTE is 0xF. - * This conversion extracts an MTE tag from a KASAN tag. - */ - u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT, - max_tag), 0); - - gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK; - gcr_kernel_excl_initialized = true; - } - - /* Enable the kernel exclude mask for random tags generation. */ - write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1); -} - static inline void __mte_enable_kernel(const char *mode, unsigned long tcf) { /* Enable MTE Sync Mode for EL1. */ @@ -160,16 +139,6 @@ void mte_enable_kernel_async(void) } #endif -void mte_set_report_once(bool state) -{ - WRITE_ONCE(report_fault_once, state); -} - -bool mte_report_once(void) -{ - return READ_ONCE(report_fault_once); -} - #ifdef CONFIG_KASAN_HW_TAGS void mte_check_tfsr_el1(void) { @@ -193,14 +162,26 @@ void mte_check_tfsr_el1(void) } #endif -static void set_gcr_el1_excl(u64 excl) +static void mte_update_sctlr_user(struct task_struct *task) { - current->thread.gcr_user_excl = excl; - /* - * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value - * by mte_set_user_gcr() in kernel_exit, + * This must be called with preemption disabled and can only be called + * on the current or next task since the CPU must match where the thread + * is going to run. The caller is responsible for calling + * update_sctlr_el1() later in the same preemption disabled block. */ + unsigned long sctlr = task->thread.sctlr_user; + unsigned long mte_ctrl = task->thread.mte_ctrl; + unsigned long pref, resolved_mte_tcf; + + pref = __this_cpu_read(mte_tcf_preferred); + resolved_mte_tcf = (mte_ctrl & pref) ? pref : mte_ctrl; + sctlr &= ~SCTLR_EL1_TCF0_MASK; + if (resolved_mte_tcf & MTE_CTRL_TCF_ASYNC) + sctlr |= SCTLR_EL1_TCF0_ASYNC; + else if (resolved_mte_tcf & MTE_CTRL_TCF_SYNC) + sctlr |= SCTLR_EL1_TCF0_SYNC; + task->thread.sctlr_user = sctlr; } void mte_thread_init_user(void) @@ -212,15 +193,14 @@ void mte_thread_init_user(void) dsb(ish); write_sysreg_s(0, SYS_TFSRE0_EL1); clear_thread_flag(TIF_MTE_ASYNC_FAULT); - /* disable tag checking */ - set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) | - SCTLR_EL1_TCF0_NONE); - /* reset tag generation mask */ - set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); + /* disable tag checking and reset tag generation mask */ + set_mte_ctrl(current, 0); } void mte_thread_switch(struct task_struct *next) { + mte_update_sctlr_user(next); + /* * Check if an async tag exception occurred at EL1. * @@ -248,44 +228,25 @@ void mte_suspend_enter(void) mte_check_tfsr_el1(); } -void mte_suspend_exit(void) -{ - if (!system_supports_mte()) - return; - - sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl); - isb(); -} - long set_mte_ctrl(struct task_struct *task, unsigned long arg) { - u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK; - u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & - SYS_GCR_EL1_EXCL_MASK; + u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & + SYS_GCR_EL1_EXCL_MASK) << MTE_CTRL_GCR_USER_EXCL_SHIFT; if (!system_supports_mte()) return 0; - switch (arg & PR_MTE_TCF_MASK) { - case PR_MTE_TCF_NONE: - sctlr |= SCTLR_EL1_TCF0_NONE; - break; - case PR_MTE_TCF_SYNC: - sctlr |= SCTLR_EL1_TCF0_SYNC; - break; - case PR_MTE_TCF_ASYNC: - sctlr |= SCTLR_EL1_TCF0_ASYNC; - break; - default: - return -EINVAL; - } - - if (task != current) { - task->thread.sctlr_user = sctlr; - task->thread.gcr_user_excl = gcr_excl; - } else { - set_task_sctlr_el1(sctlr); - set_gcr_el1_excl(gcr_excl); + if (arg & PR_MTE_TCF_ASYNC) + mte_ctrl |= MTE_CTRL_TCF_ASYNC; + if (arg & PR_MTE_TCF_SYNC) + mte_ctrl |= MTE_CTRL_TCF_SYNC; + + task->thread.mte_ctrl = mte_ctrl; + if (task == current) { + preempt_disable(); + mte_update_sctlr_user(task); + update_sctlr_el1(task->thread.sctlr_user); + preempt_enable(); } return 0; @@ -294,24 +255,18 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) long get_mte_ctrl(struct task_struct *task) { unsigned long ret; - u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK; + u64 mte_ctrl = task->thread.mte_ctrl; + u64 incl = (~mte_ctrl >> MTE_CTRL_GCR_USER_EXCL_SHIFT) & + SYS_GCR_EL1_EXCL_MASK; if (!system_supports_mte()) return 0; ret = incl << PR_MTE_TAG_SHIFT; - - switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) { - case SCTLR_EL1_TCF0_NONE: - ret |= PR_MTE_TCF_NONE; - break; - case SCTLR_EL1_TCF0_SYNC: - ret |= PR_MTE_TCF_SYNC; - break; - case SCTLR_EL1_TCF0_ASYNC: + if (mte_ctrl & MTE_CTRL_TCF_ASYNC) ret |= PR_MTE_TCF_ASYNC; - break; - } + if (mte_ctrl & MTE_CTRL_TCF_SYNC) + ret |= PR_MTE_TCF_SYNC; return ret; } @@ -450,3 +405,54 @@ int mte_ptrace_copy_tags(struct task_struct *child, long request, return ret; } + +static ssize_t mte_tcf_preferred_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + switch (per_cpu(mte_tcf_preferred, dev->id)) { + case MTE_CTRL_TCF_ASYNC: + return sysfs_emit(buf, "async\n"); + case MTE_CTRL_TCF_SYNC: + return sysfs_emit(buf, "sync\n"); + default: + return sysfs_emit(buf, "???\n"); + } +} + +static ssize_t mte_tcf_preferred_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u64 tcf; + + if (sysfs_streq(buf, "async")) + tcf = MTE_CTRL_TCF_ASYNC; + else if (sysfs_streq(buf, "sync")) + tcf = MTE_CTRL_TCF_SYNC; + else + return -EINVAL; + + device_lock(dev); + per_cpu(mte_tcf_preferred, dev->id) = tcf; + device_unlock(dev); + + return count; +} +static DEVICE_ATTR_RW(mte_tcf_preferred); + +static int register_mte_tcf_preferred_sysctl(void) +{ + unsigned int cpu; + + if (!system_supports_mte()) + return 0; + + for_each_possible_cpu(cpu) { + per_cpu(mte_tcf_preferred, cpu) = MTE_CTRL_TCF_ASYNC; + device_create_file(get_cpu_device(cpu), + &dev_attr_mte_tcf_preferred); + } + + return 0; +} +subsys_initcall(register_mte_tcf_preferred_sysctl); |