summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorBrendan Jackman <jackmanb@google.com>2021-04-29 13:05:10 +0000
committerAndrii Nakryiko <andrii@kernel.org>2021-05-03 09:54:12 -0700
commit2a30f9440640c418bcfbea9b2b344d268b58e0a2 (patch)
treec49a61cb379ced5ebd365645b999382e1399779a /tools
parent801c6058d14a82179a7ee17a4b532cac6fad067f (diff)
downloadlinux-2a30f9440640c418bcfbea9b2b344d268b58e0a2.tar.gz
linux-2a30f9440640c418bcfbea9b2b344d268b58e0a2.tar.bz2
linux-2a30f9440640c418bcfbea9b2b344d268b58e0a2.zip
libbpf: Fix signed overflow in ringbuf_process_ring
One of our benchmarks running in (Google-internal) CI pushes data through the ringbuf faster htan than userspace is able to consume it. In this case it seems we're actually able to get >INT_MAX entries in a single ring_buffer__consume() call. ASAN detected that cnt overflows in this case. Fix by using 64-bit counter internally and then capping the result to INT_MAX before converting to the int return type. Do the same for the ring_buffer__poll(). Fixes: bf99c936f947 (libbpf: Add BPF ring buffer support) Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20210429130510.1621665-1-jackmanb@google.com
Diffstat (limited to 'tools')
-rw-r--r--tools/lib/bpf/ringbuf.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index e7a8d847161f..1d80ad4e0de8 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -202,9 +202,11 @@ static inline int roundup_len(__u32 len)
return (len + 7) / 8 * 8;
}
-static int ringbuf_process_ring(struct ring* r)
+static int64_t ringbuf_process_ring(struct ring* r)
{
- int *len_ptr, len, err, cnt = 0;
+ int *len_ptr, len, err;
+ /* 64-bit to avoid overflow in case of extreme application behavior */
+ int64_t cnt = 0;
unsigned long cons_pos, prod_pos;
bool got_new_data;
void *sample;
@@ -244,12 +246,14 @@ done:
}
/* Consume available ring buffer(s) data without event polling.
- * Returns number of records consumed across all registered ring buffers, or
- * negative number if any of the callbacks return error.
+ * Returns number of records consumed across all registered ring buffers (or
+ * INT_MAX, whichever is less), or negative number if any of the callbacks
+ * return error.
*/
int ring_buffer__consume(struct ring_buffer *rb)
{
- int i, err, res = 0;
+ int64_t err, res = 0;
+ int i;
for (i = 0; i < rb->ring_cnt; i++) {
struct ring *ring = &rb->rings[i];
@@ -259,18 +263,24 @@ int ring_buffer__consume(struct ring_buffer *rb)
return err;
res += err;
}
+ if (res > INT_MAX)
+ return INT_MAX;
return res;
}
/* Poll for available data and consume records, if any are available.
- * Returns number of records consumed, or negative number, if any of the
- * registered callbacks returned error.
+ * Returns number of records consumed (or INT_MAX, whichever is less), or
+ * negative number, if any of the registered callbacks returned error.
*/
int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
{
- int i, cnt, err, res = 0;
+ int i, cnt;
+ int64_t err, res = 0;
cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
+ if (cnt < 0)
+ return -errno;
+
for (i = 0; i < cnt; i++) {
__u32 ring_id = rb->events[i].data.fd;
struct ring *ring = &rb->rings[ring_id];
@@ -280,7 +290,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
return err;
res += err;
}
- return cnt < 0 ? -errno : res;
+ if (res > INT_MAX)
+ return INT_MAX;
+ return res;
}
/* Get an fd that can be used to sleep until data is available in the ring(s) */