diff options
author | Ronak Doshi <doshir@vmware.com> | 2020-05-28 14:53:21 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-28 16:26:48 -0700 |
commit | dacce2be33124df3c71f979ac47e3d6354a41125 (patch) | |
tree | 5e56b2fef4ec703f990cc9ff42cefea97939a7d5 /drivers/net/vmxnet3/vmxnet3_drv.c | |
parent | d3a8a9e5c3b334d443e97daa59bb95c0b69f4794 (diff) | |
download | linux-stable-dacce2be33124df3c71f979ac47e3d6354a41125.tar.gz linux-stable-dacce2be33124df3c71f979ac47e3d6354a41125.tar.bz2 linux-stable-dacce2be33124df3c71f979ac47e3d6354a41125.zip |
vmxnet3: add geneve and vxlan tunnel offload support
Vmxnet3 version 3 device supports checksum/TSO offload. Thus, vNIC to
pNIC traffic can leverage hardware checksum/TSO offloads. However,
vmxnet3 does not support checksum/TSO offload for Geneve/VXLAN
encapsulated packets. Thus, for a vNIC configured with an overlay, the
guest stack must first segment the inner packet, compute the inner
checksum for each segment and encapsulate each segment before
transmitting the packet via the vNIC. This results in significant
performance penalty.
This patch will enhance vmxnet3 to support Geneve/VXLAN TSO as well as
checksum offload.
Signed-off-by: Ronak Doshi <doshir@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/vmxnet3/vmxnet3_drv.c')
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 120 |
1 files changed, 98 insertions, 22 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 4ea7a40ada88..171d4b1d1d04 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -842,12 +842,22 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, u8 protocol = 0; if (ctx->mss) { /* TSO */ - ctx->eth_ip_hdr_size = skb_transport_offset(skb); - ctx->l4_hdr_size = tcp_hdrlen(skb); - ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { + ctx->l4_offset = skb_inner_transport_offset(skb); + ctx->l4_hdr_size = inner_tcp_hdrlen(skb); + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; + } else { + ctx->l4_offset = skb_transport_offset(skb); + ctx->l4_hdr_size = tcp_hdrlen(skb); + ctx->copy_size = ctx->l4_offset + ctx->l4_hdr_size; + } } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); + /* For encap packets, skb_checksum_start_offset refers + * to inner L4 offset. Thus, below works for encap as + * well as non-encap case + */ + ctx->l4_offset = skb_checksum_start_offset(skb); if (ctx->ipv4) { const struct iphdr *iph = ip_hdr(skb); @@ -871,10 +881,10 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, break; } - ctx->copy_size = min(ctx->eth_ip_hdr_size + + ctx->copy_size = min(ctx->l4_offset + ctx->l4_hdr_size, skb->len); } else { - ctx->eth_ip_hdr_size = 0; + ctx->l4_offset = 0; ctx->l4_hdr_size = 0; /* copy as much as allowed */ ctx->copy_size = min_t(unsigned int, @@ -930,6 +940,25 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, static void +vmxnet3_prepare_inner_tso(struct sk_buff *skb, + struct vmxnet3_tx_ctx *ctx) +{ + struct tcphdr *tcph = inner_tcp_hdr(skb); + struct iphdr *iph = inner_ip_hdr(skb); + + if (ctx->ipv4) { + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + } else if (ctx->ipv6) { + struct ipv6hdr *iph = inner_ipv6_hdr(skb); + + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, + IPPROTO_TCP, 0); + } +} + +static void vmxnet3_prepare_tso(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx) { @@ -987,6 +1016,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, /* Use temporary descriptor to avoid touching bits multiple times */ union Vmxnet3_GenericDesc tempTxDesc; #endif + struct udphdr *udph; count = txd_estimate(skb); @@ -1003,7 +1033,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, } tq->stats.copy_skb_header++; } - vmxnet3_prepare_tso(skb, &ctx); + if (skb->encapsulation) { + vmxnet3_prepare_inner_tso(skb, &ctx); + } else { + vmxnet3_prepare_tso(skb, &ctx); + } } else { if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { @@ -1026,14 +1060,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, BUG_ON(ret <= 0 && ctx.copy_size != 0); /* hdrs parsed, check against other limits */ if (ctx.mss) { - if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > + if (unlikely(ctx.l4_offset + ctx.l4_hdr_size > VMXNET3_MAX_TX_BUF_SIZE)) { tq->stats.drop_oversized_hdr++; goto drop_pkt; } } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (unlikely(ctx.eth_ip_hdr_size + + if (unlikely(ctx.l4_offset + skb->csum_offset > VMXNET3_MAX_CSUM_OFFSET)) { tq->stats.drop_oversized_hdr++; @@ -1080,16 +1114,34 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, #endif tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); if (ctx.mss) { - gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; - gdesc->txd.om = VMXNET3_OM_TSO; - gdesc->txd.msscof = ctx.mss; + if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) { + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_ENCAP; + gdesc->txd.msscof = ctx.mss; + + udph = udp_hdr(skb); + if (udph->check) + gdesc->txd.oco = 1; + } else { + gdesc->txd.hlen = ctx.l4_offset + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = ctx.mss; + } num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss; } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { - gdesc->txd.hlen = ctx.eth_ip_hdr_size; - gdesc->txd.om = VMXNET3_OM_CSUM; - gdesc->txd.msscof = ctx.eth_ip_hdr_size + - skb->csum_offset; + if (VMXNET3_VERSION_GE_4(adapter) && + skb->encapsulation) { + gdesc->txd.hlen = ctx.l4_offset + + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_ENCAP; + gdesc->txd.msscof = 0; /* Reserved */ + } else { + gdesc->txd.hlen = ctx.l4_offset; + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.msscof = ctx.l4_offset + + skb->csum_offset; + } } else { gdesc->txd.om = 0; gdesc->txd.msscof = 0; @@ -1168,13 +1220,21 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, (le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) { skb->ip_summed = CHECKSUM_UNNECESSARY; - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); - BUG_ON(gdesc->rcd.frg); + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); + WARN_ON_ONCE(gdesc->rcd.frg && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) & (1 << VMXNET3_RCD_TUC_SHIFT))) { skb->ip_summed = CHECKSUM_UNNECESSARY; - BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); - BUG_ON(gdesc->rcd.frg); + WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); + WARN_ON_ONCE(gdesc->rcd.frg && + !(le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))); } else { if (gdesc->rcd.csum) { skb->csum = htons(gdesc->rcd.csum); @@ -2429,6 +2489,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM)) + devRead->misc.uptFeatures |= UPT1_F_RXINNEROFLD; + devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); devRead->misc.queueDescLen = cpu_to_le32( @@ -2561,8 +2625,8 @@ vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter) union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; unsigned long flags; - if (!VMXNET3_VERSION_GE_4(adapter)) - return; + if (!VMXNET3_VERSION_GE_4(adapter)) + return; spin_lock_irqsave(&adapter->cmd_lock, flags); @@ -3073,6 +3137,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO; + + if (VMXNET3_VERSION_GE_4(adapter)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_LRO | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + if (dma64) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->vlan_features = netdev->hw_features & |