summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pkeys.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pkeys.c')
-rw-r--r--arch/x86/mm/pkeys.c142
1 files changed, 134 insertions, 8 deletions
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index e8c474451928..f88ce0e5efd9 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -11,6 +11,7 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
+#include <linux/debugfs.h> /* debugfs_create_u32() */
#include <linux/mm_types.h> /* mm_struct, vma, etc... */
#include <linux/pkeys.h> /* PKEY_* */
#include <uapi/asm-generic/mman-common.h>
@@ -21,8 +22,19 @@
int __execute_only_pkey(struct mm_struct *mm)
{
+ bool need_to_set_mm_pkey = false;
+ int execute_only_pkey = mm->context.execute_only_pkey;
int ret;
+ /* Do we need to assign a pkey for mm's execute-only maps? */
+ if (execute_only_pkey == -1) {
+ /* Go allocate one to use, which might fail */
+ execute_only_pkey = mm_pkey_alloc(mm);
+ if (execute_only_pkey < 0)
+ return -1;
+ need_to_set_mm_pkey = true;
+ }
+
/*
* We do not want to go through the relatively costly
* dance to set PKRU if we do not need to. Check it
@@ -32,22 +44,33 @@ int __execute_only_pkey(struct mm_struct *mm)
* can make fpregs inactive.
*/
preempt_disable();
- if (fpregs_active() &&
- !__pkru_allows_read(read_pkru(), PKEY_DEDICATED_EXECUTE_ONLY)) {
+ if (!need_to_set_mm_pkey &&
+ fpregs_active() &&
+ !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
preempt_enable();
- return PKEY_DEDICATED_EXECUTE_ONLY;
+ return execute_only_pkey;
}
preempt_enable();
- ret = arch_set_user_pkey_access(current, PKEY_DEDICATED_EXECUTE_ONLY,
+
+ /*
+ * Set up PKRU so that it denies access for everything
+ * other than execution.
+ */
+ ret = arch_set_user_pkey_access(current, execute_only_pkey,
PKEY_DISABLE_ACCESS);
/*
* If the PKRU-set operation failed somehow, just return
* 0 and effectively disable execute-only support.
*/
- if (ret)
- return 0;
+ if (ret) {
+ mm_set_pkey_free(mm, execute_only_pkey);
+ return -1;
+ }
- return PKEY_DEDICATED_EXECUTE_ONLY;
+ /* We got one, store it and use it from here on out */
+ if (need_to_set_mm_pkey)
+ mm->context.execute_only_pkey = execute_only_pkey;
+ return execute_only_pkey;
}
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
@@ -55,7 +78,7 @@ static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
/* Do this check first since the vm_flags should be hot */
if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
return false;
- if (vma_pkey(vma) != PKEY_DEDICATED_EXECUTE_ONLY)
+ if (vma_pkey(vma) != vma->vm_mm->context.execute_only_pkey)
return false;
return true;
@@ -99,3 +122,106 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
*/
return vma_pkey(vma);
}
+
+#define PKRU_AD_KEY(pkey) (PKRU_AD_BIT << ((pkey) * PKRU_BITS_PER_PKEY))
+
+/*
+ * Make the default PKRU value (at execve() time) as restrictive
+ * as possible. This ensures that any threads clone()'d early
+ * in the process's lifetime will not accidentally get access
+ * to data which is pkey-protected later on.
+ */
+u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
+ PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
+ PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
+ PKRU_AD_KEY(10) | PKRU_AD_KEY(11) | PKRU_AD_KEY(12) |
+ PKRU_AD_KEY(13) | PKRU_AD_KEY(14) | PKRU_AD_KEY(15);
+
+/*
+ * Called from the FPU code when creating a fresh set of FPU
+ * registers. This is called from a very specific context where
+ * we know the FPU regstiers are safe for use and we can use PKRU
+ * directly. The fact that PKRU is only available when we are
+ * using eagerfpu mode makes this possible.
+ */
+void copy_init_pkru_to_fpregs(void)
+{
+ u32 init_pkru_value_snapshot = READ_ONCE(init_pkru_value);
+ /*
+ * Any write to PKRU takes it out of the XSAVE 'init
+ * state' which increases context switch cost. Avoid
+ * writing 0 when PKRU was already 0.
+ */
+ if (!init_pkru_value_snapshot && !read_pkru())
+ return;
+ /*
+ * Override the PKRU state that came from 'init_fpstate'
+ * with the baseline from the process.
+ */
+ write_pkru(init_pkru_value_snapshot);
+}
+
+static ssize_t init_pkru_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ unsigned int len;
+
+ len = sprintf(buf, "0x%x\n", init_pkru_value);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t init_pkru_write_file(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ char buf[32];
+ ssize_t len;
+ u32 new_init_pkru;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ /* Make the buffer a valid string that we can not overrun */
+ buf[len] = '\0';
+ if (kstrtouint(buf, 0, &new_init_pkru))
+ return -EINVAL;
+
+ /*
+ * Don't allow insane settings that will blow the system
+ * up immediately if someone attempts to disable access
+ * or writes to pkey 0.
+ */
+ if (new_init_pkru & (PKRU_AD_BIT|PKRU_WD_BIT))
+ return -EINVAL;
+
+ WRITE_ONCE(init_pkru_value, new_init_pkru);
+ return count;
+}
+
+static const struct file_operations fops_init_pkru = {
+ .read = init_pkru_read_file,
+ .write = init_pkru_write_file,
+ .llseek = default_llseek,
+};
+
+static int __init create_init_pkru_value(void)
+{
+ debugfs_create_file("init_pkru", S_IRUSR | S_IWUSR,
+ arch_debugfs_dir, NULL, &fops_init_pkru);
+ return 0;
+}
+late_initcall(create_init_pkru_value);
+
+static __init int setup_init_pkru(char *opt)
+{
+ u32 new_init_pkru;
+
+ if (kstrtouint(opt, 0, &new_init_pkru))
+ return 1;
+
+ WRITE_ONCE(init_pkru_value, new_init_pkru);
+
+ return 1;
+}
+__setup("init_pkru=", setup_init_pkru);