summaryrefslogtreecommitdiffstats
path: root/net/bpf
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-02-19 10:54:17 -0800
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-25 22:21:22 +0100
commita439184d515fbf4805f57d11fa5dfd4524d2c0eb (patch)
treea5e9805e70b42c76bb3ce9f7a79a77f1c22d163a /net/bpf
parentfd92d6648feb83888e40f0aa2f74e1d9bbd2ad7d (diff)
downloadlinux-stable-a439184d515fbf4805f57d11fa5dfd4524d2c0eb.tar.gz
linux-stable-a439184d515fbf4805f57d11fa5dfd4524d2c0eb.tar.bz2
linux-stable-a439184d515fbf4805f57d11fa5dfd4524d2c0eb.zip
bpf/test_run: fix unkillable BPF_PROG_TEST_RUN for flow dissector
Syzbot found out that running BPF_PROG_TEST_RUN with repeat=0xffffffff makes process unkillable. The problem is that when CONFIG_PREEMPT is enabled, we never see need_resched() return true. This is due to the fact that preempt_enable() (which we do in bpf_test_run_one on each iteration) now handles resched if it's needed. Let's disable preemption for the whole run, not per test. In this case we can properly see whether resched is needed. Let's also properly return -EINTR to the userspace in case of a signal interrupt. This is a follow up for a recently fixed issue in bpf_test_run, see commit df1a2cb7c74b ("bpf/test_run: fix unkillable BPF_PROG_TEST_RUN"). Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/bpf')
-rw-r--r--net/bpf/test_run.c26
1 files changed, 20 insertions, 6 deletions
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2c5172b33209..619655db8d9e 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -293,31 +293,45 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
if (!repeat)
repeat = 1;
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
- preempt_disable();
- rcu_read_lock();
retval = __skb_flow_bpf_dissect(prog, skb,
&flow_keys_dissector,
&flow_keys);
- rcu_read_unlock();
- preempt_enable();
+
+ if (signal_pending(current)) {
+ preempt_enable();
+ rcu_read_unlock();
+
+ ret = -EINTR;
+ goto out;
+ }
if (need_resched()) {
- if (signal_pending(current))
- break;
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
cond_resched();
+
+ rcu_read_lock();
+ preempt_disable();
time_start = ktime_get_ns();
}
}
time_spent += ktime_get_ns() - time_start;
+ preempt_enable();
+ rcu_read_unlock();
+
do_div(time_spent, repeat);
duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
retval, duration);
+out:
kfree_skb(skb);
kfree(sk);
return ret;