diff options
Diffstat (limited to 'net/xdp')
-rw-r--r-- | net/xdp/xsk.c | 7 | ||||
-rw-r--r-- | net/xdp/xsk.h | 1 | ||||
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 6 | ||||
-rw-r--r-- | net/xdp/xsk_diag.c | 14 | ||||
-rw-r--r-- | net/xdp/xskmap.c | 5 |
5 files changed, 10 insertions, 23 deletions
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 5eb6662f562a..3895697f8540 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -33,12 +33,6 @@ static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); -bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) -{ - return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && - (xs->pool->fq || READ_ONCE(xs->fq_tmp)); -} - void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) { if (pool->cached_need_wakeup & XDP_WAKEUP_RX) @@ -717,6 +711,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) dev, qid); if (err) { xp_destroy(xs->pool); + xs->pool = NULL; sockfd_put(sock); goto out_unlock; } diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h index da1f73e43924..b9e896cee5bb 100644 --- a/net/xdp/xsk.h +++ b/net/xdp/xsk.h @@ -39,7 +39,6 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk) return (struct xdp_sock *)sk; } -bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, struct xdp_sock **map_entry); int xsk_map_inc(struct xsk_map *map); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 795d7c81c0ca..e63fadd000db 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -287,7 +287,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi return NULL; dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); - if (!dma_map) { + if (!dma_map->dma_pages) { kfree(dma_map); return NULL; } @@ -296,7 +296,7 @@ static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_devi dma_map->dev = dev; dma_map->dma_need_sync = false; dma_map->dma_pages_cnt = nr_pages; - refcount_set(&dma_map->users, 0); + refcount_set(&dma_map->users, 1); list_add(&dma_map->list, &umem->xsk_dma_list); return dma_map; } @@ -369,7 +369,6 @@ static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_ pool->dev = dma_map->dev; pool->dma_pages_cnt = dma_map->dma_pages_cnt; pool->dma_need_sync = dma_map->dma_need_sync; - refcount_inc(&dma_map->users); memcpy(pool->dma_pages, dma_map->dma_pages, pool->dma_pages_cnt * sizeof(*pool->dma_pages)); @@ -390,6 +389,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, if (err) return err; + refcount_inc(&dma_map->users); return 0; } diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 5bd8ea9d206a..c014217f5fa7 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -59,22 +59,20 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) du.num_pages = umem->npgs; du.chunk_size = umem->chunk_size; du.headroom = umem->headroom; - du.ifindex = pool->netdev ? pool->netdev->ifindex : 0; - du.queue_id = pool->queue_id; + du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0; + du.queue_id = pool ? pool->queue_id : 0; du.flags = 0; if (umem->zc) du.flags |= XDP_DU_F_ZEROCOPY; du.refs = refcount_read(&umem->users); err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du); - - if (!err && pool->fq) + if (!err && pool && pool->fq) err = xsk_diag_put_ring(pool->fq, XDP_DIAG_UMEM_FILL_RING, nlskb); - if (!err && pool->cq) { - err = xsk_diag_put_ring(pool->cq, XDP_DIAG_UMEM_COMPLETION_RING, - nlskb); - } + if (!err && pool && pool->cq) + err = xsk_diag_put_ring(pool->cq, + XDP_DIAG_UMEM_COMPLETION_RING, nlskb); return err; } diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index 2a4fd6677155..0c5df593bc56 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -185,11 +185,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, xs = (struct xdp_sock *)sock->sk; - if (!xsk_is_setup_for_bpf_map(xs)) { - sockfd_put(sock); - return -EOPNOTSUPP; - } - map_entry = &m->xsk_map[i]; node = xsk_map_node_alloc(m, map_entry); if (IS_ERR(node)) { |