summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2019-12-19 13:39:21 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-05-22 10:59:48 +0200
commit6b8fec241d19b00ba99d84ad4caa0f3c708f6246 (patch)
treec1e3c2ca36bfadfce875ba57a6b2b556414fe46c
parent57ef65b34fc5ac05095f4facf746d78cce49bbb0 (diff)
downloadlinux-stable-6b8fec241d19b00ba99d84ad4caa0f3c708f6246.tar.gz
linux-stable-6b8fec241d19b00ba99d84ad4caa0f3c708f6246.tar.bz2
linux-stable-6b8fec241d19b00ba99d84ad4caa0f3c708f6246.zip
xsk: Simplify detection of empty and full rings
[ Upstream commit 11cc2d21499cabe7e7964389634ed1de3ee91d33 ] In order to set the correct return flags for poll, the xsk code has to check if the Rx queue is empty and if the Tx queue is full. This code was unnecessarily large and complex as it used the functions that are used to update the local state from the global state (xskq_nb_free and xskq_nb_avail). Since we are not doing this nor updating any data dependent on this state, we can simplify the functions. Another benefit from this is that we can also simplify the xskq_nb_free and xskq_nb_avail functions in a later commit. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/1576759171-28550-3-git-send-email-magnus.karlsson@intel.com Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--net/xdp/xsk_queue.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index fe96c0d039f2..cf7cbb5dd918 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -245,12 +245,15 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
static inline bool xskq_full_desc(struct xsk_queue *q)
{
- return xskq_nb_avail(q, q->nentries) == q->nentries;
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
+ q->nentries;
}
static inline bool xskq_empty_desc(struct xsk_queue *q)
{
- return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries;
+ /* No barriers needed since data is not accessed */
+ return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
}
void xskq_set_umem(struct xsk_queue *q, struct xdp_umem_props *umem_props);