diff options
-rw-r--r-- | drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 58 |
1 files changed, 35 insertions, 23 deletions
diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c index 26017fe9df93..801ba0733e28 100644 --- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c +++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c @@ -718,6 +718,23 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) return 0; } +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +{ + struct device *dev = priv->net_dev->dev.parent; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); + skb_free_frag(vaddr); + } +} + /* Perform a single release command to add buffers * to the specified buffer pool */ @@ -727,7 +744,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; void *buf; dma_addr_t addr; - int i; + int i, err; for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { /* Allocate buffer visible to WRIOP + skb shared info + @@ -754,22 +771,27 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) } release_bufs: - /* In case the portal is busy, retry until successful. - * The buffer release function would only fail if the QBMan portal - * was busy, which implies portal contention (i.e. more CPUs than - * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, - * there is little we can realistically do, short of giving up - - * in which case we'd risk depleting the buffer pool and never again - * receiving the Rx interrupt which would kick-start the refill logic. - * So just keep retrying, at the risk of being moved to ksoftirqd. - */ - while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) + /* In case the portal is busy, retry until successful */ + while ((err = dpaa2_io_service_release(NULL, bpid, + buf_array, i)) == -EBUSY) cpu_relax(); + + /* If release command failed, clean up and bail out; + * not much else we can do about it + */ + if (err) { + free_bufs(priv, buf_array, i); + return 0; + } + return i; err_map: skb_free_frag(buf); err_alloc: + /* If we managed to allocate at least some buffers, + * release them to hardware + */ if (i) goto release_bufs; @@ -811,10 +833,8 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) */ static void drain_bufs(struct dpaa2_eth_priv *priv, int count) { - struct device *dev = priv->net_dev->dev.parent; u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; - void *vaddr; - int ret, i; + int ret; do { ret = dpaa2_io_service_acquire(NULL, priv->bpid, @@ -823,15 +843,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count) netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; } - for (i = 0; i < ret; i++) { - /* Same logic as on regular Rx path */ - vaddr = dpaa2_iova_to_virt(priv->iommu_domain, - buf_array[i]); - dma_unmap_single(dev, buf_array[i], - DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); - skb_free_frag(vaddr); - } + free_bufs(priv, buf_array, ret); } while (ret); } |