diff options
author | Stanislav Fomichev <sdf@google.com> | 2019-07-29 14:51:10 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2019-08-01 13:55:52 -0700 |
commit | 9babe825da769cfbd7080f95e6bddbe98ce6b95d (patch) | |
tree | a801f1b825ea6f3eef8cdf1a9306984f91426747 /kernel/bpf/cgroup.c | |
parent | a98bf57391a24a68ec8381b9d35b60c2bee79150 (diff) | |
download | linux-9babe825da769cfbd7080f95e6bddbe98ce6b95d.tar.gz linux-9babe825da769cfbd7080f95e6bddbe98ce6b95d.tar.bz2 linux-9babe825da769cfbd7080f95e6bddbe98ce6b95d.zip |
bpf: always allocate at least 16 bytes for setsockopt hook
Since we always allocate memory, allocate just a little bit more
for the BPF program in case it need to override user input with
bigger value. The canonical example is TCP_CONGESTION where
input string might be too small to override (nv -> bbr or cubic).
16 bytes are chosen to match the size of TCP_CA_NAME_MAX and can
be extended in the future if needed.
Signed-off-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/cgroup.c')
-rw-r--r-- | kernel/bpf/cgroup.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 0a00eaca6fae..6a6a154cfa7b 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -964,7 +964,6 @@ static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) return -ENOMEM; ctx->optval_end = ctx->optval + max_optlen; - ctx->optlen = max_optlen; return 0; } @@ -984,7 +983,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, .level = *level, .optname = *optname, }; - int ret; + int ret, max_optlen; /* Opportunistic check to see whether we have any BPF program * attached to the hook so we don't waste time allocating @@ -994,10 +993,18 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) return 0; - ret = sockopt_alloc_buf(&ctx, *optlen); + /* Allocate a bit more than the initial user buffer for + * BPF program. The canonical use case is overriding + * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). + */ + max_optlen = max_t(int, 16, *optlen); + + ret = sockopt_alloc_buf(&ctx, max_optlen); if (ret) return ret; + ctx.optlen = *optlen; + if (copy_from_user(ctx.optval, optval, *optlen) != 0) { ret = -EFAULT; goto out; @@ -1016,7 +1023,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, if (ctx.optlen == -1) { /* optlen set to -1, bypass kernel */ ret = 1; - } else if (ctx.optlen > *optlen || ctx.optlen < -1) { + } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { /* optlen is out of bounds */ ret = -EFAULT; } else { @@ -1063,6 +1070,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, if (ret) return ret; + ctx.optlen = max_optlen; + if (!retval) { /* If kernel getsockopt finished successfully, * copy whatever was returned to the user back |