diff options
author | Björn Töpel <bjorn.topel@intel.com> | 2019-12-19 07:10:02 +0100 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2019-12-19 21:09:43 -0800 |
commit | e312b9e706ed6d94f6cc9088fcd9fbd81de4525c (patch) | |
tree | 9e7eeaba755c57e74ed2c8707ddedfad909e91af /net/core | |
parent | fb5aacdf3603ccbafe1da74eecd132eb4a31e53f (diff) | |
download | linux-stable-e312b9e706ed6d94f6cc9088fcd9fbd81de4525c.tar.gz linux-stable-e312b9e706ed6d94f6cc9088fcd9fbd81de4525c.tar.bz2 linux-stable-e312b9e706ed6d94f6cc9088fcd9fbd81de4525c.zip |
xsk: Make xskmap flush_list common for all map instances
The xskmap flush list is used to track entries that need to flushed
from via the xdp_do_flush_map() function. This list used to be
per-map, but there is really no reason for that. Instead make the
flush list global for all xskmaps, which simplifies __xsk_map_flush()
and xsk_map_alloc().
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://lore.kernel.org/bpf/20191219061006.21980-5-bjorn.topel@gmail.com
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/filter.c | 9 |
1 files changed, 4 insertions, 5 deletions
diff --git a/net/core/filter.c b/net/core/filter.c index a411f7835dee..c51678c473c5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3511,8 +3511,7 @@ err: static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_map *map, - struct xdp_buff *xdp, - u32 index) + struct xdp_buff *xdp) { int err; @@ -3537,7 +3536,7 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, case BPF_MAP_TYPE_XSKMAP: { struct xdp_sock *xs = fwd; - err = __xsk_map_redirect(map, xdp, xs); + err = __xsk_map_redirect(xs, xdp); return err; } default: @@ -3562,7 +3561,7 @@ void xdp_do_flush_map(void) __cpu_map_flush(map); break; case BPF_MAP_TYPE_XSKMAP: - __xsk_map_flush(map); + __xsk_map_flush(); break; default: break; @@ -3619,7 +3618,7 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); - err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); + err = __bpf_tx_xdp_map(dev, fwd, map, xdp); if (unlikely(err)) goto err; |