diff options
author | Yunsheng Lin <linyunsheng@huawei.com> | 2019-12-05 10:12:28 +0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-12-05 14:42:43 -0800 |
commit | d1a37dedcfcf2c01daff5281c3c378876a04e2f4 (patch) | |
tree | 6feb0871314133d0fd9ba248e323775d69750b2f /drivers/net/ethernet/hisilicon/hns3 | |
parent | 2a597eff2437d21841a1e87ffa536ab69dbffdcf (diff) | |
download | linux-d1a37dedcfcf2c01daff5281c3c378876a04e2f4.tar.gz linux-d1a37dedcfcf2c01daff5281c3c378876a04e2f4.tar.bz2 linux-d1a37dedcfcf2c01daff5281c3c378876a04e2f4.zip |
net: hns3: fix a use after free problem in hns3_nic_maybe_stop_tx()
Currently, hns3_nic_maybe_stop_tx() uses skb_copy() to linearize a
SKB if the BD num required by the SKB does not meet the hardware
limitation, and it linearizes the SKB by allocating a new linearized SKB
and freeing the old SKB, if hns3_nic_maybe_stop_tx() returns -EBUSY
because there are no enough space in the ring to send the linearized
skb to hardware, the sch_direct_xmit() still hold reference to old SKB
and try to retransmit the old SKB when dev_hard_start_xmit() return
TX_BUSY, which may cause use after freed problem.
This patch fixes it by using __skb_linearize() to linearize the
SKB in hns3_nic_maybe_stop_tx().
Fixes: 51e8439f3496 ("net: hns3: add 8 BD limit for tx flow")
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 19 |
1 files changed, 6 insertions, 13 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index e2730319bd43..69545dd6c938 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1288,31 +1288,24 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, - struct sk_buff **out_skb) + struct sk_buff *skb) { struct hns3_nic_priv *priv = netdev_priv(netdev); unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; - struct sk_buff *skb = *out_skb; unsigned int bd_num; bd_num = hns3_tx_bd_num(skb, bd_size); if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { - struct sk_buff *new_skb; - if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && !hns3_skb_need_linearized(skb, bd_size, bd_num)) goto out; - /* manual split the send packet */ - new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) + if (__skb_linearize(skb)) return -ENOMEM; - dev_kfree_skb_any(skb); - *out_skb = new_skb; - bd_num = hns3_tx_bd_count(new_skb->len); - if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || - (!skb_is_gso(new_skb) && + bd_num = hns3_tx_bd_count(skb->len); + if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || + (!skb_is_gso(skb) && bd_num > HNS3_MAX_NON_TSO_BD_NUM)) return -ENOMEM; @@ -1415,7 +1408,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Prefetch the data used later */ prefetch(skb->data); - ret = hns3_nic_maybe_stop_tx(ring, netdev, &skb); + ret = hns3_nic_maybe_stop_tx(ring, netdev, skb); if (unlikely(ret <= 0)) { if (ret == -EBUSY) { u64_stats_update_begin(&ring->syncp); |