summaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
authorMichael Kelley <mikelley@microsoft.com>2023-01-30 19:33:06 -0800
committerPaolo Abeni <pabeni@redhat.com>2023-02-02 09:15:30 +0100
commit99f1c46011cc0feb47d4f4f7bee70a0341442d14 (patch)
tree4d7c52e00b3aa9199dda31fe7c18c22c492de488 /drivers/net/hyperv
parent917d5e04d4dd2bbbf36fc6976ba442e284ccc42d (diff)
downloadlinux-99f1c46011cc0feb47d4f4f7bee70a0341442d14.tar.gz
linux-99f1c46011cc0feb47d4f4f7bee70a0341442d14.tar.bz2
linux-99f1c46011cc0feb47d4f4f7bee70a0341442d14.zip
hv_netvsc: Fix missed pagebuf entries in netvsc_dma_map/unmap()
netvsc_dma_map() and netvsc_dma_unmap() currently check the cp_partial flag and adjust the page_count so that pagebuf entries for the RNDIS portion of the message are skipped when it has already been copied into a send buffer. But this adjustment has already been made by code in netvsc_send(). The duplicate adjustment causes some pagebuf entries to not be mapped. In a normal VM, this doesn't break anything because the mapping doesn’t change the PFN. But in a Confidential VM, dma_map_single() does bounce buffering and provides a different PFN. Failing to do the mapping causes the wrong PFN to be passed to Hyper-V, and various errors ensue. Fix this by removing the duplicate adjustment in netvsc_dma_map() and netvsc_dma_unmap(). Fixes: 846da38de0e8 ("net: netvsc: Add Isolation VM support for netvsc driver") Cc: stable@vger.kernel.org Signed-off-by: Michael Kelley <mikelley@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Link: https://lore.kernel.org/r/1675135986-254490-1-git-send-email-mikelley@microsoft.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/netvsc.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9352dad58996..e02d1e3ef672 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -987,9 +987,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
int i;
if (!hv_is_isolation_supported())
@@ -998,7 +995,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
if (!packet->dma_range)
return;
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
@@ -1028,9 +1025,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;