summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2024-03-12 20:00:20 +0000
committerCatalin Marinas <catalin.marinas@arm.com>2024-03-13 10:53:20 +0000
commit69ebc0182406541f0be0f086cdfff13ac56e7385 (patch)
tree30d949dc857a11ebd5ebd5920b49cbc46edd65f6 /arch/arm64/kernel
parentf1bbc4e9cfa4c0f29883171e9d01c01cbe94becc (diff)
downloadlinux-stable-69ebc0182406541f0be0f086cdfff13ac56e7385.tar.gz
linux-stable-69ebc0182406541f0be0f086cdfff13ac56e7385.tar.bz2
linux-stable-69ebc0182406541f0be0f086cdfff13ac56e7385.zip
Revert "arm64: mm: add support for WXN memory translation attribute"
This reverts commit 50e3ed0f93f4f62ed2aa83de5db6cb84ecdd5707. The SCTLR_EL1.WXN control forces execute-never when a page has write permissions. While the idea of hardening such write/exec combinations is good, with permissions indirection enabled (FEAT_PIE) this control becomes RES0. FEAT_PIE introduces a slightly different form of WXN which only has an effect when the base permission is RWX and the write is toggled by the permission overlay (FEAT_POE, not yet supported by the arm64 kernel). Revert the patch for now. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Link: https://lore.kernel.org/r/ZfGESD3a91lxH367@arm.com
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/pi/idreg-override.c4
-rw-r--r--arch/arm64/kernel/pi/map_kernel.c23
2 files changed, 1 insertions, 26 deletions
diff --git a/arch/arm64/kernel/pi/idreg-override.c b/arch/arm64/kernel/pi/idreg-override.c
index bccfee34f62f..aad399796e81 100644
--- a/arch/arm64/kernel/pi/idreg-override.c
+++ b/arch/arm64/kernel/pi/idreg-override.c
@@ -189,7 +189,6 @@ static const struct ftr_set_desc sw_features __prel64_initconst = {
FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL),
FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter),
FIELD("rodataoff", ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF, NULL),
- FIELD("nowxn", ARM64_SW_FEATURE_OVERRIDE_NOWXN, NULL),
{}
},
};
@@ -222,9 +221,8 @@ static const struct {
{ "arm64.nomops", "id_aa64isar2.mops=0" },
{ "arm64.nomte", "id_aa64pfr1.mte=0" },
{ "nokaslr", "arm64_sw.nokaslr=1" },
- { "rodata=off", "arm64_sw.rodataoff=1 arm64_sw.nowxn=1" },
+ { "rodata=off", "arm64_sw.rodataoff=1" },
{ "arm64.nolva", "id_aa64mmfr2.varange=0" },
- { "arm64.nowxn", "arm64_sw.nowxn=1" },
};
static int __init parse_hexdigit(const char *p, u64 *v)
diff --git a/arch/arm64/kernel/pi/map_kernel.c b/arch/arm64/kernel/pi/map_kernel.c
index cac1e1f63c44..5fa08e13e17e 100644
--- a/arch/arm64/kernel/pi/map_kernel.c
+++ b/arch/arm64/kernel/pi/map_kernel.c
@@ -132,25 +132,6 @@ static void __init map_kernel(u64 kaslr_offset, u64 va_offset, int root_level)
idmap_cpu_replace_ttbr1(swapper_pg_dir);
}
-static void noinline __section(".idmap.text") disable_wxn(void)
-{
- u64 sctlr = read_sysreg(sctlr_el1) & ~SCTLR_ELx_WXN;
-
- /*
- * We cannot safely clear the WXN bit while the MMU and caches are on,
- * so turn the MMU off, flush the TLBs and turn it on again but with
- * the WXN bit cleared this time.
- */
- asm(" msr sctlr_el1, %0 ;"
- " isb ;"
- " tlbi vmalle1 ;"
- " dsb nsh ;"
- " isb ;"
- " msr sctlr_el1, %1 ;"
- " isb ;"
- :: "r"(sctlr & ~SCTLR_ELx_M), "r"(sctlr));
-}
-
static void noinline __section(".idmap.text") set_ttbr0_for_lpa2(u64 ttbr)
{
u64 sctlr = read_sysreg(sctlr_el1);
@@ -248,10 +229,6 @@ asmlinkage void __init early_map_kernel(u64 boot_status, void *fdt)
if (va_bits > VA_BITS_MIN)
sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
- if (IS_ENABLED(CONFIG_ARM64_WXN) &&
- arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_NOWXN))
- disable_wxn();
-
/*
* The virtual KASLR displacement modulo 2MiB is decided by the
* physical placement of the image, as otherwise, we might not be able