summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRaed Salem <raeds@nvidia.com>2021-12-02 17:49:01 +0200
committerSaeed Mahameed <saeedm@nvidia.com>2022-02-01 20:59:43 -0800
commitde47db0cf7f4a9c555ad204e06baa70b50a70d08 (patch)
tree8bdd313ed6030914f042f6c9d155c03165936c47 /drivers
parent5352859b3bfa0ca188b2f1d2c1436fddc781e3b6 (diff)
downloadlinux-stable-de47db0cf7f4a9c555ad204e06baa70b50a70d08.tar.gz
linux-stable-de47db0cf7f4a9c555ad204e06baa70b50a70d08.tar.bz2
linux-stable-de47db0cf7f4a9c555ad204e06baa70b50a70d08.zip
net/mlx5e: IPsec: Fix tunnel mode crypto offload for non TCP/UDP traffic
IPsec Tunnel mode crypto offload software parser (SWP) setting in data path currently always set the inner L4 offset regardless of the encapsulated L4 header type and whether it exists in the first place, this breaks non TCP/UDP traffic as such. Set the SWP inner L4 offset only when the IPsec tunnel encapsulated L4 header protocol is TCP/UDP. While at it fix inner ip protocol read for setting MLX5_ETH_WQE_SWP_INNER_L4_UDP flag to address the case where the ip header protocol is IPv6. Fixes: f1267798c980 ("net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload") Signed-off-by: Raed Salem <raeds@nvidia.com> Reviewed-by: Maor Dickman <maord@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 2db9573a3fe6..b56fea142c24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | IP | [TCP | UDP] */
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
return;
}