summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPrzemyslaw Patynowski <przemyslawx.patynowski@intel.com>2022-06-24 17:33:01 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-07-29 17:25:19 +0200
commit2918419c06088f6709ceb543feb01752779ade4c (patch)
treefa22b35bacbb7a2a7222dc03eb0f2ec1deb4dfa5 /drivers/net
parenta77a75a0e7f397550ab039f96115103e78dd5c69 (diff)
downloadlinux-stable-2918419c06088f6709ceb543feb01752779ade4c.tar.gz
linux-stable-2918419c06088f6709ceb543feb01752779ade4c.tar.bz2
linux-stable-2918419c06088f6709ceb543feb01752779ade4c.zip
iavf: Fix handling of dummy receive descriptors
[ Upstream commit a9f49e0060301a9bfebeca76739158d0cf91cdf6 ] Fix memory leak caused by not handling dummy receive descriptor properly. iavf_get_rx_buffer now sets the rx_buffer return value for dummy receive descriptors. Without this patch, when the hardware writes a dummy descriptor, iavf would not free the page allocated for the previous receive buffer. This is an unlikely event but can still happen. [Jesse: massaged commit message] Fixes: efa14c398582 ("iavf: allow null RX descriptors") Signed-off-by: Przemyslaw Patynowski <przemyslawx.patynowski@intel.com> Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Tested-by: Konrad Jankowski <konrad0.jankowski@intel.com> Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 3525eab8e9f9..5448ed0e0357 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1250,11 +1250,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,