summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@redhat.com>2019-06-28 11:12:34 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2019-06-29 01:31:09 +0200
commit43e74c0267a35d6f5127218054b2d80c7fe801f5 (patch)
tree8d2445e0be167139975629e9eb604c3506ee0164 /net/core
parent4b55cf290dc6bd3a9e5da26d1ad60e77aa88c8cf (diff)
downloadlinux-43e74c0267a35d6f5127218054b2d80c7fe801f5.tar.gz
linux-43e74c0267a35d6f5127218054b2d80c7fe801f5.tar.bz2
linux-43e74c0267a35d6f5127218054b2d80c7fe801f5.zip
bpf_xdp_redirect_map: Perform map lookup in eBPF helper
The bpf_redirect_map() helper used by XDP programs doesn't return any indication of whether it can successfully redirect to the map index it was given. Instead, BPF programs have to track this themselves, leading to programs using duplicate maps to track which entries are populated in the devmap. This patch fixes this by moving the map lookup into the bpf_redirect_map() helper, which makes it possible to return failure to the eBPF program. The lower bits of the flags argument is used as the return code, which means that existing users who pass a '0' flag argument will get XDP_ABORTED. With this, a BPF program can check the return code from the helper call and react by, for instance, substituting a different redirect. This works for any type of map used for redirect. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Acked-by: Jonathan Lemon <jonathan.lemon@gmail.com> Acked-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index b4a062379bb9..4836264f82ee 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3605,17 +3605,13 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_redirect_info *ri)
{
u32 index = ri->tgt_index;
- void *fwd = NULL;
+ void *fwd = ri->tgt_value;
int err;
ri->tgt_index = 0;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
- fwd = __xdp_map_lookup_elem(map, index);
- if (unlikely(!fwd)) {
- err = -EINVAL;
- goto err;
- }
if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
xdp_do_flush_map();
@@ -3652,18 +3648,13 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
u32 index = ri->tgt_index;
- void *fwd = NULL;
+ void *fwd = ri->tgt_value;
int err = 0;
ri->tgt_index = 0;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
- fwd = __xdp_map_lookup_elem(map, index);
- if (unlikely(!fwd)) {
- err = -EINVAL;
- goto err;
- }
-
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
struct bpf_dtab_netdev *dst = fwd;
@@ -3732,6 +3723,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
ri->flags = flags;
ri->tgt_index = ifindex;
+ ri->tgt_value = NULL;
WRITE_ONCE(ri->map, NULL);
return XDP_REDIRECT;
@@ -3750,9 +3742,21 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- if (unlikely(flags))
+ /* Lower bits of the flags are used as return code on lookup failure */
+ if (unlikely(flags > XDP_TX))
return XDP_ABORTED;
+ ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
+ if (unlikely(!ri->tgt_value)) {
+ /* If the lookup fails we want to clear out the state in the
+ * redirect_info struct completely, so that if an eBPF program
+ * performs multiple lookups, the last one always takes
+ * precedence.
+ */
+ WRITE_ONCE(ri->map, NULL);
+ return flags;
+ }
+
ri->flags = flags;
ri->tgt_index = ifindex;
WRITE_ONCE(ri->map, map);