summaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorBjörn Töpel <bjorn.topel@intel.com>2021-03-08 12:29:06 +0100
committerDaniel Borkmann <daniel@iogearbox.net>2021-03-10 01:06:34 +0100
commite6a4750ffe9d701c4d55212b14b615e63571d235 (patch)
tree0e807fb18dde8e4d6c7eb0495f25be9183bfdb2c /net/xdp
parent11d39cfeecfc9d92a5faa2a55c228e796478e0cb (diff)
downloadlinux-stable-e6a4750ffe9d701c4d55212b14b615e63571d235.tar.gz
linux-stable-e6a4750ffe9d701c4d55212b14b615e63571d235.tar.bz2
linux-stable-e6a4750ffe9d701c4d55212b14b615e63571d235.zip
bpf, xdp: Make bpf_redirect_map() a map operation
Currently the bpf_redirect_map() implementation dispatches to the correct map-lookup function via a switch-statement. To avoid the dispatching, this change adds bpf_redirect_map() as a map operation. Each map provides its bpf_redirect_map() version, and correct function is automatically selected by the BPF verifier. A nice side-effect of the code movement is that the map lookup functions are now local to the map implementation files, which removes one additional function call. Signed-off-by: Björn Töpel <bjorn.topel@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/bpf/20210308112907.559576-2-bjorn.topel@gmail.com
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xskmap.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 113fd9017203..fbeb4870f798 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -125,6 +125,16 @@ static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
return insn - insn_buf;
}
+static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+
+ if (key >= map->max_entries)
+ return NULL;
+
+ return READ_ONCE(m->xsk_map[key]);
+}
+
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
{
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -215,6 +225,11 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
return 0;
}
+static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
+{
+ return __bpf_xdp_redirect_map(map, ifindex, flags, __xsk_map_lookup_elem);
+}
+
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry)
{
@@ -247,4 +262,5 @@ const struct bpf_map_ops xsk_map_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "xsk_map",
.map_btf_id = &xsk_map_btf_id,
+ .map_redirect = xsk_map_redirect,
};