summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c55
-rw-r--r--kernel/bpf/syscall.c4
2 files changed, 49 insertions, 10 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1061968adcc1..a9e6c04d0f4a 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -598,6 +598,8 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
bpf_fill_ill_insns(hdr, size);
hdr->pages = size / PAGE_SIZE;
+ hdr->locked = 0;
+
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1);
@@ -1448,6 +1450,33 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
return 0;
}
+static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
+{
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+ int i, err;
+
+ for (i = 0; i < fp->aux->func_cnt; i++) {
+ err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
+ if (err)
+ return err;
+ }
+
+ return bpf_prog_check_pages_ro_single(fp);
+#endif
+ return 0;
+}
+
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+ fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+ fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
/**
* bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program
@@ -1458,13 +1487,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
- u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+ /* In case of BPF to BPF calls, verifier did all the prep
+ * work with regards to JITing, etc.
+ */
+ if (fp->bpf_func)
+ goto finalize;
- fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
- fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+ bpf_prog_select_func(fp);
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
@@ -1485,6 +1514,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
if (*err)
return fp;
}
+
+finalize:
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
@@ -1493,7 +1524,17 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* all eBPF JITs might immediately support all features.
*/
*err = bpf_check_tail_call(fp);
-
+ if (*err)
+ return fp;
+
+ /* Checkpoint: at this point onwards any cBPF -> eBPF or
+ * native eBPF program is read-only. If we failed to change
+ * the page attributes (e.g. allocation failure from
+ * splitting large pages), then reject the whole program
+ * in order to guarantee not ending up with any W+X pages
+ * from BPF side in kernel.
+ */
+ *err = bpf_prog_check_pages_ro_locked(fp);
return fp;
}
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0f62692fe635..35dc466641f2 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1353,9 +1353,7 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0)
goto free_used_maps;
- /* eBPF program is ready to be JITed */
- if (!prog->bpf_func)
- prog = bpf_prog_select_runtime(prog, &err);
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;