summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-08-16 23:05:36 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-08-25 10:50:03 +0200
commita1fe647042affe713a17243cd10e9b25f3d83948 (patch)
treed978d1cec50075dfadaf1a35bdec895adb62f631 /kernel
parentb3c69acfc7cea96c68b9af8af9ff13415839bbec (diff)
downloadlinux-stable-a1fe647042affe713a17243cd10e9b25f3d83948.tar.gz
linux-stable-a1fe647042affe713a17243cd10e9b25f3d83948.tar.bz2
linux-stable-a1fe647042affe713a17243cd10e9b25f3d83948.zip
bpf: add bpf_jit_limit knob to restrict unpriv allocations
commit ede95a63b5e84ddeea6b0c473b36ab8bfd8c6ce3 upstream. Rick reported that the BPF JIT could potentially fill the entire module space with BPF programs from unprivileged users which would prevent later attempts to load normal kernel modules or privileged BPF programs, for example. If JIT was enabled but unsuccessful to generate the image, then before commit 290af86629b2 ("bpf: introduce BPF_JIT_ALWAYS_ON config") we would always fall back to the BPF interpreter. Nowadays in the case where the CONFIG_BPF_JIT_ALWAYS_ON could be set, then the load will abort with a failure since the BPF interpreter was compiled out. Add a global limit and enforce it for unprivileged users such that in case of BPF interpreter compiled out we fail once the limit has been reached or we fall back to BPF interpreter earlier w/o using module mem if latter was compiled in. In a next step, fair share among unprivileged users can be resolved in particular for the case where we would fail hard once limit is reached. Fixes: 290af86629b2 ("bpf: introduce BPF_JIT_ALWAYS_ON config") Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64") Co-Developed-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Jann Horn <jannh@google.com> Cc: Kees Cook <keescook@chromium.org> Cc: LKML <linux-kernel@vger.kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Ben Hutchings <ben.hutchings@codethink.co.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c49
1 files changed, 46 insertions, 3 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 661fb837b168..2c40159038ef 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -290,10 +290,13 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
}
#ifdef CONFIG_BPF_JIT
+# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
+
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
+int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -489,27 +492,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return ret;
}
+static atomic_long_t bpf_jit_current;
+
+#if defined(MODULES_VADDR)
+static int __init bpf_jit_charge_init(void)
+{
+ /* Only used as heuristic here to derive limit. */
+ bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
+ PAGE_SIZE), INT_MAX);
+ return 0;
+}
+pure_initcall(bpf_jit_charge_init);
+#endif
+
+static int bpf_jit_charge_modmem(u32 pages)
+{
+ if (atomic_long_add_return(pages, &bpf_jit_current) >
+ (bpf_jit_limit >> PAGE_SHIFT)) {
+ if (!capable(CAP_SYS_ADMIN)) {
+ atomic_long_sub(pages, &bpf_jit_current);
+ return -EPERM;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_jit_uncharge_modmem(u32 pages)
+{
+ atomic_long_sub(pages, &bpf_jit_current);
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_binary_header *hdr;
- unsigned int size, hole, start;
+ u32 size, hole, start, pages;
/* Most of BPF filters are really small, but if some of them
* fill a page, allow at least 128 extra bytes to insert a
* random section of illegal instructions.
*/
size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
+ pages = size / PAGE_SIZE;
+
+ if (bpf_jit_charge_modmem(pages))
+ return NULL;
hdr = module_alloc(size);
- if (hdr == NULL)
+ if (!hdr) {
+ bpf_jit_uncharge_modmem(pages);
return NULL;
+ }
/* Fill space with illegal/arch-dep instructions. */
bpf_fill_ill_insns(hdr, size);
- hdr->pages = size / PAGE_SIZE;
+ hdr->pages = pages;
hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
PAGE_SIZE - sizeof(*hdr));
start = (get_random_int() % hole) & ~(alignment - 1);
@@ -522,7 +562,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
+ u32 pages = hdr->pages;
+
module_memfree(hdr);
+ bpf_jit_uncharge_modmem(pages);
}
/* This symbol is only overridden by archs that have different