summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/iavf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/iavf')
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c218
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h95
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c883
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c779
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.h118
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c62
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c17
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c360
10 files changed, 2543 insertions, 14 deletions
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index c997063ed728..9c3e45c54d01 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,5 +11,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
+iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o \
iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 8a65525a7c0d..e8bd04100ecd 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -37,6 +37,8 @@
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
+#include "iavf_fdir.h"
+#include "iavf_adv_rss.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
@@ -300,6 +302,10 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
+#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
+#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
/* OS defined structs */
struct net_device *netdev;
@@ -340,6 +346,10 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+#define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
@@ -362,6 +372,14 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+
+#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
+ u16 fdir_active_fltr;
+ struct list_head fdir_list_head;
+ spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
+
+ struct list_head adv_rss_list_head;
+ spinlock_t adv_rss_lock; /* protect the RSS management list */
};
@@ -432,6 +450,10 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_add_fdir_filter(struct iavf_adapter *adapter);
+void iavf_del_fdir_filter(struct iavf_adapter *adapter);
+void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
+void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
new file mode 100644
index 000000000000..6edbf134b73f
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Intel Corporation. */
+
+/* advanced RSS configuration ethtool support for iavf */
+
+#include "iavf.h"
+
+/**
+ * iavf_fill_adv_rss_ip4_hdr - fill the IPv4 RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_ip4_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_SA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV4_DA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+}
+
+/**
+ * iavf_fill_adv_rss_ip6_hdr - fill the IPv6 RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_ip6_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_SA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_IPV6_DA)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+}
+
+/**
+ * iavf_fill_adv_rss_tcp_hdr - fill the TCP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_tcp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_udp_hdr - fill the UDP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_udp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_sctp_hdr - fill the SCTP RSS protocol header
+ * @hdr: the virtchnl message protocol header data structure
+ * @hash_flds: the RSS configuration protocol hash fields
+ */
+static void
+iavf_fill_adv_rss_sctp_hdr(struct virtchnl_proto_hdr *hdr, u64 hash_flds)
+{
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+
+ if (hash_flds & IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT)
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+}
+
+/**
+ * iavf_fill_adv_rss_cfg_msg - fill the RSS configuration into virtchnl message
+ * @rss_cfg: the virtchnl message to be filled with RSS configuration setting
+ * @packet_hdrs: the RSS configuration protocol header types
+ * @hash_flds: the RSS configuration protocol hash fields
+ *
+ * Returns 0 if the RSS configuration virtchnl message is filled successfully
+ */
+int
+iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
+ u32 packet_hdrs, u64 hash_flds)
+{
+ struct virtchnl_proto_hdrs *proto_hdrs = &rss_cfg->proto_hdrs;
+ struct virtchnl_proto_hdr *hdr;
+
+ rss_cfg->rss_algorithm = VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC;
+
+ proto_hdrs->tunnel_level = 0; /* always outer layer */
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L3) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4:
+ iavf_fill_adv_rss_ip4_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6:
+ iavf_fill_adv_rss_ip6_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ switch (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_L4) {
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_TCP:
+ iavf_fill_adv_rss_tcp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_UDP:
+ iavf_fill_adv_rss_udp_hdr(hdr, hash_flds);
+ break;
+ case IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP:
+ iavf_fill_adv_rss_sctp_hdr(hdr, hash_flds);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_find_adv_rss_cfg_by_hdrs - find RSS configuration with header type
+ * @adapter: pointer to the VF adapter structure
+ * @packet_hdrs: protocol header type to find.
+ *
+ * Returns pointer to advance RSS configuration if found or null
+ */
+struct iavf_adv_rss *
+iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs)
+{
+ struct iavf_adv_rss *rss;
+
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
+ if (rss->packet_hdrs == packet_hdrs)
+ return rss;
+
+ return NULL;
+}
+
+/**
+ * iavf_print_adv_rss_cfg
+ * @adapter: pointer to the VF adapter structure
+ * @rss: pointer to the advance RSS configuration to print
+ * @action: the string description about how to handle the RSS
+ * @result: the string description about the virtchnl result
+ *
+ * Print the advance RSS configuration
+ **/
+void
+iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
+ const char *action, const char *result)
+{
+ u32 packet_hdrs = rss->packet_hdrs;
+ u64 hash_flds = rss->hash_flds;
+ static char hash_opt[300];
+ const char *proto;
+
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_TCP)
+ proto = "TCP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_UDP)
+ proto = "UDP";
+ else if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+ proto = "SCTP";
+ else
+ return;
+
+ memset(hash_opt, 0, sizeof(hash_opt));
+
+ strcat(hash_opt, proto);
+ if (packet_hdrs & IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4)
+ strcat(hash_opt, "v4 ");
+ else
+ strcat(hash_opt, "v6 ");
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
+ strcat(hash_opt, "IP SA,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
+ strcat(hash_opt, "IP DA,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
+ strcat(hash_opt, "src port,");
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
+ strcat(hash_opt, "dst port,");
+
+ if (!action)
+ action = "";
+
+ if (!result)
+ result = "";
+
+ dev_info(&adapter->pdev->dev, "%s %s %s\n", action, hash_opt, result);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
new file mode 100644
index 000000000000..4d3be11af7aa
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_adv_rss.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _IAVF_ADV_RSS_H_
+#define _IAVF_ADV_RSS_H_
+
+struct iavf_adapter;
+
+/* State of advanced RSS configuration */
+enum iavf_adv_rss_state_t {
+ IAVF_ADV_RSS_ADD_REQUEST, /* User requests to add RSS */
+ IAVF_ADV_RSS_ADD_PENDING, /* RSS pending add by the PF */
+ IAVF_ADV_RSS_DEL_REQUEST, /* Driver requests to delete RSS */
+ IAVF_ADV_RSS_DEL_PENDING, /* RSS pending delete by the PF */
+ IAVF_ADV_RSS_ACTIVE, /* RSS configuration is active */
+};
+
+enum iavf_adv_rss_flow_seg_hdr {
+ IAVF_ADV_RSS_FLOW_SEG_HDR_NONE = 0x00000000,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 = 0x00000001,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6 = 0x00000002,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_TCP = 0x00000004,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP = 0x00000008,
+ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP = 0x00000010,
+};
+
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_L3 \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4 | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6)
+
+#define IAVF_ADV_RSS_FLOW_SEG_HDR_L4 \
+ (IAVF_ADV_RSS_FLOW_SEG_HDR_TCP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_UDP | \
+ IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP)
+
+enum iavf_adv_rss_flow_field {
+ /* L3 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA,
+ /* L4 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT,
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT,
+
+ /* The total number of enums must not exceed 64 */
+ IAVF_ADV_RSS_FLOW_FIELD_IDX_MAX
+};
+
+#define IAVF_ADV_RSS_HASH_INVALID 0
+#define IAVF_ADV_RSS_HASH_FLD_IPV4_SA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_SA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV6_SA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_SA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV4_DA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV4_DA)
+#define IAVF_ADV_RSS_HASH_FLD_IPV6_DA \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_IPV6_DA)
+#define IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_TCP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_UDP_DST_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_SRC_PORT)
+#define IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT \
+ BIT_ULL(IAVF_ADV_RSS_FLOW_FIELD_IDX_SCTP_DST_PORT)
+
+/* bookkeeping of advanced RSS configuration */
+struct iavf_adv_rss {
+ enum iavf_adv_rss_state_t state;
+ struct list_head list;
+
+ u32 packet_hdrs;
+ u64 hash_flds;
+
+ struct virtchnl_rss_cfg cfg_msg;
+};
+
+int
+iavf_fill_adv_rss_cfg_msg(struct virtchnl_rss_cfg *rss_cfg,
+ u32 packet_hdrs, u64 hash_flds);
+struct iavf_adv_rss *
+iavf_find_adv_rss_cfg_by_hdrs(struct iavf_adapter *adapter, u32 packet_hdrs);
+void
+iavf_print_adv_rss_cfg(struct iavf_adapter *adapter, struct iavf_adv_rss *rss,
+ const char *action, const char *result);
+#endif /* _IAVF_ADV_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index c93567f4d0f7..af43fbd8cb75 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -828,6 +828,872 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
}
/**
+ * iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
+ * flow type values
+ * @flow: filter type to be converted
+ *
+ * Returns the corresponding ethtool flow type.
+ */
+static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
+{
+ switch (flow) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ return TCP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ return UDP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ return SCTP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ return AH_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ return ESP_V4_FLOW;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ return IPV4_USER_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return TCP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return UDP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return SCTP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return AH_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return ESP_V6_FLOW;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return IPV6_USER_FLOW;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return ETHER_FLOW;
+ default:
+ /* 0 is undefined ethtool flow */
+ return 0;
+ }
+}
+
+/**
+ * iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
+ * @eth: Ethtool flow type to be converted
+ *
+ * Returns flow enum
+ */
+static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
+{
+ switch (eth) {
+ case TCP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_TCP;
+ case UDP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_UDP;
+ case SCTP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_SCTP;
+ case AH_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_AH;
+ case ESP_V4_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_ESP;
+ case IPV4_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV4_OTHER;
+ case TCP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_TCP;
+ case UDP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_UDP;
+ case SCTP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_SCTP;
+ case AH_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_AH;
+ case ESP_V6_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_ESP;
+ case IPV6_USER_FLOW:
+ return IAVF_FDIR_FLOW_IPV6_OTHER;
+ case ETHER_FLOW:
+ return IAVF_FDIR_FLOW_NON_IP_L2;
+ default:
+ return IAVF_FDIR_FLOW_NONE;
+ }
+}
+
+/**
+ * iavf_is_mask_valid - check mask field set
+ * @mask: full mask to check
+ * @field: field for which mask should be valid
+ *
+ * If the mask is fully set return true. If it is not valid for field return
+ * false.
+ */
+static bool iavf_is_mask_valid(u64 mask, u64 field)
+{
+ return (mask & field) == field;
+}
+
+/**
+ * iavf_parse_rx_flow_user_data - deconstruct user-defined data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter for userdef data storage
+ *
+ * Returns 0 on success, negative error value on failure
+ */
+static int
+iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_flex_word *flex;
+ int i, cnt = 0;
+
+ if (!(fsp->flow_type & FLOW_EXT))
+ return 0;
+
+ for (i = 0; i < IAVF_FLEX_WORD_NUM; i++) {
+#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
+#define IAVF_USERDEF_FLEX_OFFS_S 16
+#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
+#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
+ u32 value = be32_to_cpu(fsp->h_ext.data[i]);
+ u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
+
+ if (!value || !mask)
+ continue;
+
+ if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
+ return -EINVAL;
+
+ /* 504 is the maximum value for offsets, and offset is measured
+ * from the start of the MAC address.
+ */
+#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
+ flex = &fltr->flex_words[cnt++];
+ flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
+ flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
+ IAVF_USERDEF_FLEX_OFFS_S;
+ if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
+ return -EINVAL;
+ }
+
+ fltr->flex_cnt = cnt;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_rx_flow_ext_data - fill the additional data
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: pointer to Flow Director filter to get additional data
+ */
+static void
+iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
+ return;
+
+ fsp->flow_type |= FLOW_EXT;
+
+ memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
+ memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
+}
+
+/**
+ * iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
+ * @adapter: the VF adapter structure that contains filter list
+ * @cmd: ethtool command data structure to receive the filter data
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *rule = NULL;
+ int ret = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (!rule) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
+
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
+ fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
+ fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
+ fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
+ fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
+ fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
+ fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
+ fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
+ fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
+ fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
+ fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
+ memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
+ fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
+ fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
+ break;
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
+ fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ iavf_fill_rx_flow_ext_data(fsp, rule);
+
+ if (rule->action == VIRTCHNL_ACTION_DROP)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return ret;
+}
+
+/**
+ * iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
+ * @adapter: the VF adapter structure containing the filter list
+ * @cmd: ethtool command data structure
+ * @rule_locs: ethtool array passed in from OS to receive filter IDs
+ *
+ * Returns 0 as expected for success by ethtool
+ */
+static int
+iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct iavf_fdir_fltr *fltr;
+ unsigned int cnt = 0;
+ int val = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+
+ list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
+ if (cnt == cmd->rule_cnt) {
+ val = -EMSGSIZE;
+ goto release_lock;
+ }
+ rule_locs[cnt] = fltr->loc;
+ cnt++;
+ }
+
+release_lock:
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ if (!val)
+ cmd->rule_cnt = cnt;
+
+ return val;
+}
+
+/**
+ * iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @fsp: pointer to ethtool Rx flow specification
+ * @fltr: filter structure
+ */
+static int
+iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
+ struct iavf_fdir_fltr *fltr)
+{
+ u32 flow_type, q_index = 0;
+ enum virtchnl_action act;
+ int err;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ act = VIRTCHNL_ACTION_DROP;
+ } else {
+ q_index = fsp->ring_cookie;
+ if (q_index >= adapter->num_active_queues)
+ return -EINVAL;
+
+ act = VIRTCHNL_ACTION_QUEUE;
+ }
+
+ fltr->action = act;
+ fltr->loc = fsp->location;
+ fltr->q_index = q_index;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
+ sizeof(fltr->ext_data.usr_def));
+ memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
+ sizeof(fltr->ext_mask.usr_def));
+ }
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
+ fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
+ fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+ fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
+ fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
+ fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
+ fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
+ fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+ fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+ err = iavf_parse_rx_flow_user_data(fsp, fltr);
+ if (err)
+ return err;
+
+ return iavf_fill_fdir_add_msg(adapter, fltr);
+}
+
+/**
+ * iavf_add_fdir_ethtool - add Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to add Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct iavf_fdir_fltr *fltr;
+ int count = 50;
+ int err;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return -EINVAL;
+
+ if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
+ IAVF_MAX_FDIR_FILTERS);
+ return -ENOSPC;
+ }
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
+ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ if (--count == 0) {
+ kfree(fltr);
+ return -EINVAL;
+ }
+ udelay(1);
+ }
+
+ err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
+ if (err)
+ goto ret;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ iavf_fdir_list_add_fltr(adapter, fltr);
+ adapter->fdir_active_fltr++;
+ fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ret:
+ if (err && fltr)
+ kfree(fltr);
+
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return err;
+}
+
+/**
+ * iavf_del_fdir_ethtool - delete Flow Director filter
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: command to delete Flow Director filter
+ *
+ * Returns 0 on success and negative values for failure
+ */
+static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct iavf_fdir_fltr *fltr = NULL;
+ int err = 0;
+
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
+ if (fltr) {
+ if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
+ fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ } else {
+ err = -EBUSY;
+ }
+ } else if (adapter->fdir_active_fltr) {
+ err = -EINVAL;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ return err;
+}
+
+/**
+ * iavf_adv_rss_parse_hdrs - parses headers from RSS hash input
+ * @cmd: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended
+ * header types for RSS configuration
+ */
+static u32 iavf_adv_rss_parse_hdrs(struct ethtool_rxnfc *cmd)
+{
+ u32 hdrs = IAVF_ADV_RSS_FLOW_SEG_HDR_NONE;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case UDP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case SCTP_V4_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV4;
+ break;
+ case TCP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_TCP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case UDP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_UDP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ case SCTP_V6_FLOW:
+ hdrs |= IAVF_ADV_RSS_FLOW_SEG_HDR_SCTP |
+ IAVF_ADV_RSS_FLOW_SEG_HDR_IPV6;
+ break;
+ default:
+ break;
+ }
+
+ return hdrs;
+}
+
+/**
+ * iavf_adv_rss_parse_hash_flds - parses hash fields from RSS hash input
+ * @cmd: ethtool rxnfc command
+ *
+ * This function parses the rxnfc command and returns intended hash fields for
+ * RSS configuration
+ */
+static u64 iavf_adv_rss_parse_hash_flds(struct ethtool_rxnfc *cmd)
+{
+ u64 hfld = IAVF_ADV_RSS_HASH_INVALID;
+
+ if (cmd->data & RXH_IP_SRC || cmd->data & RXH_IP_DST) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV4_DA;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (cmd->data & RXH_IP_SRC)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_SA;
+ if (cmd->data & RXH_IP_DST)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_IPV6_DA;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (cmd->data & RXH_L4_B_0_1 || cmd->data & RXH_L4_B_2_3) {
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ if (cmd->data & RXH_L4_B_0_1)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT;
+ if (cmd->data & RXH_L4_B_2_3)
+ hfld |= IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return hfld;
+}
+
+/**
+ * iavf_set_adv_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adv_rss *rss_old, *rss_new;
+ bool rss_new_add = false;
+ int count = 50, err = 0;
+ u64 hash_flds;
+ u32 hdrs;
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ hash_flds = iavf_adv_rss_parse_hash_flds(cmd);
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ rss_new = kzalloc(sizeof(*rss_new), GFP_KERNEL);
+ if (!rss_new)
+ return -ENOMEM;
+
+ if (iavf_fill_adv_rss_cfg_msg(&rss_new->cfg_msg, hdrs, hash_flds)) {
+ kfree(rss_new);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ if (--count == 0) {
+ kfree(rss_new);
+ return -EINVAL;
+ }
+
+ udelay(1);
+ }
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss_old = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss_old) {
+ if (rss_old->state != IAVF_ADV_RSS_ACTIVE) {
+ err = -EBUSY;
+ } else if (rss_old->hash_flds != hash_flds) {
+ rss_old->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_old->hash_flds = hash_flds;
+ memcpy(&rss_old->cfg_msg, &rss_new->cfg_msg,
+ sizeof(rss_new->cfg_msg));
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ } else {
+ err = -EEXIST;
+ }
+ } else {
+ rss_new_add = true;
+ rss_new->state = IAVF_ADV_RSS_ADD_REQUEST;
+ rss_new->packet_hdrs = hdrs;
+ rss_new->hash_flds = hash_flds;
+ list_add_tail(&rss_new->list, &adapter->adv_rss_list_head);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (!err)
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+
+ clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
+
+ if (!rss_new_add)
+ kfree(rss_new);
+
+ return err;
+}
+
+/**
+ * iavf_get_adv_rss_hash_opt - Retrieve hash fields for a given flow-type
+ * @adapter: pointer to the VF adapter structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ */
+static int
+iavf_get_adv_rss_hash_opt(struct iavf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adv_rss *rss;
+ u64 hash_flds;
+ u32 hdrs;
+
+ if (!ADV_RSS_SUPPORT(adapter))
+ return -EOPNOTSUPP;
+
+ cmd->data = 0;
+
+ hdrs = iavf_adv_rss_parse_hdrs(cmd);
+ if (hdrs == IAVF_ADV_RSS_FLOW_SEG_HDR_NONE)
+ return -EINVAL;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ rss = iavf_find_adv_rss_cfg_by_hdrs(adapter, hdrs);
+ if (rss)
+ hash_flds = rss->hash_flds;
+ else
+ hash_flds = IAVF_ADV_RSS_HASH_INVALID;
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (hash_flds == IAVF_ADV_RSS_HASH_INVALID)
+ return -EINVAL;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_SA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_SA))
+ cmd->data |= (u64)RXH_IP_SRC;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_IPV4_DA |
+ IAVF_ADV_RSS_HASH_FLD_IPV6_DA))
+ cmd->data |= (u64)RXH_IP_DST;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_SRC_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_SRC_PORT))
+ cmd->data |= (u64)RXH_L4_B_0_1;
+
+ if (hash_flds & (IAVF_ADV_RSS_HASH_FLD_TCP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_UDP_DST_PORT |
+ IAVF_ADV_RSS_HASH_FLD_SCTP_DST_PORT))
+ cmd->data |= (u64)RXH_L4_B_2_3;
+
+ return 0;
+}
+
+/**
+ * iavf_set_rxnfc - command to set Rx flow rules.
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns 0 for success and negative values for errors
+ */
+static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = iavf_add_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = iavf_del_fdir_ethtool(adapter, cmd);
+ break;
+ case ETHTOOL_SRXFH:
+ ret = iavf_set_adv_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -846,9 +1712,21 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_active_queues;
ret = 0;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!FDIR_FLTR_SUPPORT(adapter))
+ break;
+ cmd->rule_cnt = adapter->fdir_active_fltr;
+ cmd->data = IAVF_MAX_FDIR_FILTERS;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
+ break;
case ETHTOOL_GRXFH:
- netdev_info(netdev,
- "RSS hash info is not available to vf, use pf.\n");
+ ret = iavf_get_adv_rss_hash_opt(adapter, cmd);
break;
default:
break;
@@ -1025,6 +1903,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_coalesce = iavf_set_coalesce,
.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
+ .set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
new file mode 100644
index 000000000000..6146203efd84
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation. */
+
+/* flow director ethtool support for iavf */
+
+#include "iavf.h"
+
+#define GTPU_PORT 2152
+#define NAT_T_ESP_PORT 4500
+#define PFCP_PORT 8805
+
+static const struct in6_addr ipv6_addr_full_mask = {
+ .in6_u = {
+ .u6_addr8 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+ }
+};
+
+/**
+ * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+ * @fltr: Flow Director filter data structure
+ */
+static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
+{
+ return sizeof(struct ethhdr) +
+ (fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct udphdr);
+}
+
+/**
+ * iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the GTP-U protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_GTPU_HDR_TEID_OFFS0 4
+#define IAVF_GTPU_HDR_TEID_OFFS1 6
+#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
+#define IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK 0x00FF /* skip N_PDU */
+/* PDU Session Container Extension Header (PSC) */
+#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85
+#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
+#define IAVF_GTPU_HDR_PSC_PDU_QFI_MASK 0x3F /* skip Type */
+#define IAVF_GTPU_EH_QFI_IDX 1
+
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_GTPU_HDR_TEID_OFFS0:
+ case IAVF_GTPU_HDR_TEID_OFFS1: {
+ __be16 *pay_word = (__be16 *)ghdr->buffer;
+
+ pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
+ }
+ break;
+ case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
+ if ((fltr->flex_words[i].word &
+ IAVF_GTPU_HDR_NEXT_EXTHDR_TYPE_MASK) !=
+ IAVF_GTPU_PSC_EXTHDR_TYPE)
+ return -EOPNOTSUPP;
+ if (!ehdr)
+ ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
+ break;
+ case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
+ if (!ehdr)
+ return -EINVAL;
+ ehdr->buffer[IAVF_GTPU_EH_QFI_IDX] =
+ fltr->flex_words[i].word &
+ IAVF_GTPU_HDR_PSC_PDU_QFI_MASK;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the PFCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
+ hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the NAT-T-ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ u16 adj_offs, hdr_offs;
+ u32 spi = 0;
+ int i;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ adj_offs = iavf_pkt_udp_no_pay_len(fltr);
+
+ for (i = 0; i < fltr->flex_cnt; i++) {
+#define IAVF_NAT_T_ESP_SPI_OFFS0 0
+#define IAVF_NAT_T_ESP_SPI_OFFS1 2
+ if (fltr->flex_words[i].offset < adj_offs)
+ return -EINVAL;
+
+ hdr_offs = fltr->flex_words[i].offset - adj_offs;
+
+ switch (hdr_offs) {
+ case IAVF_NAT_T_ESP_SPI_OFFS0:
+ spi |= fltr->flex_words[i].word << 16;
+ break;
+ case IAVF_NAT_T_ESP_SPI_OFFS1:
+ spi |= fltr->flex_words[i].word;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (!spi)
+ return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
+
+ *(__be32 *)hdr->buffer = htonl(spi);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+
+ uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP payload defined protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ int err;
+
+ switch (ntohs(fltr->ip_data.dst_port)) {
+ case GTPU_PORT:
+ err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
+ break;
+ case NAT_T_ESP_PORT:
+ err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
+ break;
+ case PFCP_PORT:
+ err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct iphdr *iph = (struct iphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
+
+ if (fltr->ip_mask.tos == U8_MAX) {
+ iph->tos = fltr->ip_data.tos;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->protocol = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
+ }
+
+ if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
+ iph->saddr = fltr->ip_data.v4_addrs.src_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
+ }
+
+ if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
+ iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+ fltr->ip_ver = 4;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the IPv6 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
+
+ if (fltr->ip_mask.tclass == U8_MAX) {
+ iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
+ iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
+ }
+
+ if (fltr->ip_mask.proto == U8_MAX) {
+ iph->nexthdr = fltr->ip_data.proto;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
+ }
+
+ if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
+ sizeof(struct in6_addr))) {
+ memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
+ sizeof(struct in6_addr));
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+ fltr->ip_ver = 6;
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the TCP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ tcph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ tcph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_udp_hdr - fill the UDP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the UDP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct udphdr *udph = (struct udphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ udph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ udph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
+ }
+
+ if (!fltr->flex_cnt)
+ return 0;
+
+ return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
+}
+
+/**
+ * iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the SCTP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
+
+ if (fltr->ip_mask.src_port == htons(U16_MAX)) {
+ sctph->source = fltr->ip_data.src_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
+ }
+
+ if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
+ sctph->dest = fltr->ip_data.dst_port;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_ah_hdr - fill the AH protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the AH protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ ah->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_esp_hdr - fill the ESP protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the ESP protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
+
+ if (fltr->ip_mask.spi == htonl(U32_MAX)) {
+ esph->spi = fltr->ip_data.spi;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_l4_hdr - fill the L4 protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the L4 protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr;
+ __be32 *l4_4_data;
+
+ if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
+ return 0;
+
+ hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ l4_4_data = (__be32 *)hdr->buffer;
+
+ /* L2TPv3 over IP with 'Session ID' */
+ if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
+
+ *l4_4_data = fltr->ip_data.l4_header;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
+ * @fltr: Flow Director filter data structure
+ * @proto_hdrs: Flow Director protocol headers data structure
+ *
+ * Returns 0 if the Ethernet protocol header is set successfully
+ */
+static int
+iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
+ struct virtchnl_proto_hdrs *proto_hdrs)
+{
+ struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
+ struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
+
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
+
+ if (fltr->eth_mask.etype == htons(U16_MAX)) {
+ if (fltr->eth_data.etype == htons(ETH_P_IP) ||
+ fltr->eth_data.etype == htons(ETH_P_IPV6))
+ return -EOPNOTSUPP;
+
+ ehdr->h_proto = fltr->eth_data.etype;
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns 0 if the add Flow Director virtchnl message is filled successfully
+ */
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
+ struct virtchnl_proto_hdrs *proto_hdrs;
+ int err;
+
+ proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
+
+ err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
+ if (err)
+ return err;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
+ iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ vc_msg->vsi_id = adapter->vsi.id;
+ vc_msg->rule_cfg.action_set.count = 1;
+ vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
+ vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
+
+ return 0;
+}
+
+/**
+ * iavf_fdir_flow_proto_name - get the flow protocol name
+ * @flow_type: Flow Director filter flow type
+ **/
+static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
+{
+ switch (flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ return "TCP";
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ return "UDP";
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ return "SCTP";
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ return "AH";
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ return "ESP";
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ return "Other";
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ return "Ethernet";
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * iavf_print_fdir_fltr
+ * @adapter: adapter structure
+ * @fltr: Flow Director filter to print
+ *
+ * Print the Flow Director filter
+ **/
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type);
+
+ if (!proto)
+ return;
+
+ switch (fltr->flow_type) {
+ case IAVF_FDIR_FLOW_IPV4_TCP:
+ case IAVF_FDIR_FLOW_IPV4_UDP:
+ case IAVF_FDIR_FLOW_IPV4_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_AH:
+ case IAVF_FDIR_FLOW_IPV4_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV4_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v4_addrs.dst_ip,
+ &fltr->ip_data.v4_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_TCP:
+ case IAVF_FDIR_FLOW_IPV6_UDP:
+ case IAVF_FDIR_FLOW_IPV6_SCTP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohs(fltr->ip_data.dst_port),
+ ntohs(fltr->ip_data.src_port));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_AH:
+ case IAVF_FDIR_FLOW_IPV6_ESP:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ proto,
+ ntohl(fltr->ip_data.spi));
+ break;
+ case IAVF_FDIR_FLOW_IPV6_OTHER:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
+ fltr->loc,
+ &fltr->ip_data.v6_addrs.dst_ip,
+ &fltr->ip_data.v6_addrs.src_ip,
+ fltr->ip_data.proto,
+ ntohl(fltr->ip_data.l4_header));
+ break;
+ case IAVF_FDIR_FLOW_NON_IP_L2:
+ dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
+ fltr->loc,
+ ntohs(fltr->eth_data.etype));
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * iavf_fdir_is_dup_fltr - test if filter is already in list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: Flow Director filter data structure
+ *
+ * Returns true if the filter is found in the list
+ */
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *tmp;
+
+ list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
+ if (tmp->flow_type != fltr->flow_type)
+ continue;
+
+ if (!memcmp(&tmp->eth_data, &fltr->eth_data,
+ sizeof(fltr->eth_data)) &&
+ !memcmp(&tmp->ip_data, &fltr->ip_data,
+ sizeof(fltr->ip_data)) &&
+ !memcmp(&tmp->ext_data, &fltr->ext_data,
+ sizeof(fltr->ext_data)))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * iavf_find_fdir_fltr_by_loc - find filter with location
+ * @adapter: pointer to the VF adapter structure
+ * @loc: location to find.
+ *
+ * Returns pointer to Flow Director filter if found or null
+ */
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
+{
+ struct iavf_fdir_fltr *rule;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list)
+ if (rule->loc == loc)
+ return rule;
+
+ return NULL;
+}
+
+/**
+ * iavf_fdir_list_add_fltr - add a new node to the flow director filter list
+ * @adapter: pointer to the VF adapter structure
+ * @fltr: filter node to add to structure
+ */
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
+{
+ struct iavf_fdir_fltr *rule, *parent = NULL;
+
+ list_for_each_entry(rule, &adapter->fdir_list_head, list) {
+ if (rule->loc >= fltr->loc)
+ break;
+ parent = rule;
+ }
+
+ if (parent)
+ list_add(&fltr->list, &parent->list);
+ else
+ list_add(&fltr->list, &adapter->fdir_list_head);
+}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
new file mode 100644
index 000000000000..33c55c366315
--- /dev/null
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _IAVF_FDIR_H_
+#define _IAVF_FDIR_H_
+
+struct iavf_adapter;
+
+/* State of Flow Director filter */
+enum iavf_fdir_fltr_state_t {
+ IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
+ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
+ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
+ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
+ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
+};
+
+enum iavf_fdir_flow_type {
+ /* NONE - used for undef/error */
+ IAVF_FDIR_FLOW_NONE = 0,
+ IAVF_FDIR_FLOW_IPV4_TCP,
+ IAVF_FDIR_FLOW_IPV4_UDP,
+ IAVF_FDIR_FLOW_IPV4_SCTP,
+ IAVF_FDIR_FLOW_IPV4_AH,
+ IAVF_FDIR_FLOW_IPV4_ESP,
+ IAVF_FDIR_FLOW_IPV4_OTHER,
+ IAVF_FDIR_FLOW_IPV6_TCP,
+ IAVF_FDIR_FLOW_IPV6_UDP,
+ IAVF_FDIR_FLOW_IPV6_SCTP,
+ IAVF_FDIR_FLOW_IPV6_AH,
+ IAVF_FDIR_FLOW_IPV6_ESP,
+ IAVF_FDIR_FLOW_IPV6_OTHER,
+ IAVF_FDIR_FLOW_NON_IP_L2,
+ /* MAX - this must be last and add anything new just above it */
+ IAVF_FDIR_FLOW_PTYPE_MAX,
+};
+
+/* Must not exceed the array element number of '__be32 data[2]' in the ethtool
+ * 'struct ethtool_rx_flow_spec.m_ext.data[2]' to express the flex-byte (word).
+ */
+#define IAVF_FLEX_WORD_NUM 2
+
+struct iavf_flex_word {
+ u16 offset;
+ u16 word;
+};
+
+struct iavf_ipv4_addrs {
+ __be32 src_ip;
+ __be32 dst_ip;
+};
+
+struct iavf_ipv6_addrs {
+ struct in6_addr src_ip;
+ struct in6_addr dst_ip;
+};
+
+struct iavf_fdir_eth {
+ __be16 etype;
+};
+
+struct iavf_fdir_ip {
+ union {
+ struct iavf_ipv4_addrs v4_addrs;
+ struct iavf_ipv6_addrs v6_addrs;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 l4_header; /* first 4 bytes of the layer 4 header */
+ __be32 spi; /* security parameter index for AH/ESP */
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+ u8 proto;
+};
+
+struct iavf_fdir_extra {
+ u32 usr_def[IAVF_FLEX_WORD_NUM];
+};
+
+/* bookkeeping of Flow Director filters */
+struct iavf_fdir_fltr {
+ enum iavf_fdir_fltr_state_t state;
+ struct list_head list;
+
+ enum iavf_fdir_flow_type flow_type;
+
+ struct iavf_fdir_eth eth_data;
+ struct iavf_fdir_eth eth_mask;
+
+ struct iavf_fdir_ip ip_data;
+ struct iavf_fdir_ip ip_mask;
+
+ struct iavf_fdir_extra ext_data;
+ struct iavf_fdir_extra ext_mask;
+
+ enum virtchnl_action action;
+
+ /* flex byte filter data */
+ u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */
+ u8 flex_cnt;
+ struct iavf_flex_word flex_words[IAVF_FLEX_WORD_NUM];
+
+ u32 flow_id;
+
+ u32 loc; /* Rule location inside the flow table */
+ u32 q_index;
+
+ struct virtchnl_fdir_add vc_add_msg;
+};
+
+int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
+#endif /* _IAVF_FDIR_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index dc5b3c06d1e0..e612c24fa384 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -959,8 +959,10 @@ void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_vlan_filter *vlf;
- struct iavf_mac_filter *f;
struct iavf_cloud_filter *cf;
+ struct iavf_fdir_fltr *fdir;
+ struct iavf_mac_filter *f;
+ struct iavf_adv_rss *rss;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -996,6 +998,19 @@ void iavf_down(struct iavf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ /* remove all advance RSS configuration */
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
@@ -1007,6 +1022,8 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1629,6 +1646,22 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
+ iavf_add_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
+ iavf_del_fdir_filter(adapter);
+ return IAVF_SUCCESS;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
+ iavf_add_adv_rss_cfg(adapter);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
+ iavf_del_adv_rss_cfg(adapter);
+ return 0;
+ }
return -EAGAIN;
}
@@ -2529,7 +2562,7 @@ validate_bw:
}
/**
- * iavf_validate_channel_config - validate queue mapping info
+ * iavf_validate_ch_config - validate queue mapping info
* @adapter: board private structure
* @mqprio_qopt: queue parameters
*
@@ -3525,6 +3558,8 @@ int iavf_process_config(struct iavf_adapter *adapter)
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
+ hw_features |= NETIF_F_GSO_UDP_L4;
netdev->hw_features |= hw_features;
@@ -3738,10 +3773,14 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->mac_vlan_list_lock);
spin_lock_init(&adapter->cloud_filter_list_lock);
+ spin_lock_init(&adapter->fdir_fltr_lock);
+ spin_lock_init(&adapter->adv_rss_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
+ INIT_LIST_HEAD(&adapter->fdir_list_head);
+ INIT_LIST_HEAD(&adapter->adv_rss_list_head);
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
@@ -3845,7 +3884,9 @@ static void iavf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
struct iavf_hw *hw = &adapter->hw;
@@ -3899,8 +3940,6 @@ static void iavf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
- iavf_free_all_tx_resources(adapter);
- iavf_free_all_rx_resources(adapter);
iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -3926,6 +3965,21 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ffaf2742a2e0..3525eab8e9f9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1905,13 +1905,20 @@ static int iavf_tso(struct iavf_tx_buffer *first, u8 *hdr_len,
/* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data;
-
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
- /* compute length of segmentation header */
- *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of UDP segmentation header */
+ *hdr_len = (u8)sizeof(l4.udp) + l4_offset;
+ } else {
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of TCP segmentation header */
+ *hdr_len = (u8)((l4.tcp->doff * 4) + l4_offset);
+ }
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
@@ -2098,7 +2105,7 @@ static int iavf_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
}
/**
- * iavf_create_tx_ctx Build the Tx context descriptor
+ * iavf_create_tx_ctx - Build the Tx context descriptor
* @tx_ring: ring to create the descriptor on
* @cd_type_cmd_tso_mss: Quad Word 1
* @cd_tunneling: Quad Word 0 - bits 0-31
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 647e7fde11b4..0eab3c43bdc5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -140,6 +140,9 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
+ VIRTCHNL_VF_OFFLOAD_USO |
+ VIRTCHNL_VF_OFFLOAD_FDIR_PF |
+ VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
@@ -1005,7 +1008,7 @@ iavf_set_adapter_link_speed_from_vpe(struct iavf_adapter *adapter,
}
/**
- * iavf_enable_channel
+ * iavf_enable_channels
* @adapter: adapter structure
*
* Request that the PF enable channels as specified by
@@ -1046,7 +1049,7 @@ void iavf_enable_channels(struct iavf_adapter *adapter)
}
/**
- * iavf_disable_channel
+ * iavf_disable_channels
* @adapter: adapter structure
*
* Request that the PF disable channels that are configured
@@ -1198,6 +1201,200 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter)
}
/**
+ * iavf_add_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF add Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_add_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_add *f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_add);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ process_fltr = true;
+ fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
+ memcpy(f, &fdir->vc_add_msg, len);
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ /* prevent iavf_add_fdir_filter() from being called when there
+ * are no filters to add
+ */
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
+ kfree(f);
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
+ kfree(f);
+}
+
+/**
+ * iavf_del_fdir_filter
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF delete Flow Director filters as specified
+ * by the user via ethtool.
+ **/
+void iavf_del_fdir_filter(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir;
+ struct virtchnl_fdir_del f;
+ bool process_fltr = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_fdir_del);
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
+ process_fltr = true;
+ memset(&f, 0, len);
+ f.vsi_id = fdir->vc_add_msg.vsi_id;
+ f.flow_id = fdir->flow_id;
+ fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+
+ if (!process_fltr) {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ return;
+ }
+
+ adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
+}
+
+/**
+ * iavf_add_adv_rss_cfg
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF add RSS configuration as specified
+ * by the user via ethtool.
+ **/
+void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ struct iavf_adv_rss *rss;
+ bool process_rss = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add RSS configuration, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_rss_cfg);
+ rss_cfg = kzalloc(len, GFP_KERNEL);
+ if (!rss_cfg)
+ return;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ process_rss = true;
+ rss->state = IAVF_ADV_RSS_ADD_PENDING;
+ memcpy(rss_cfg, &rss->cfg_msg, len);
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Input set change for",
+ "is pending");
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (process_rss) {
+ adapter->current_op = VIRTCHNL_OP_ADD_RSS_CFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_RSS_CFG,
+ (u8 *)rss_cfg, len);
+ } else {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_ADV_RSS_CFG;
+ }
+
+ kfree(rss_cfg);
+}
+
+/**
+ * iavf_del_adv_rss_cfg
+ * @adapter: the VF adapter structure
+ *
+ * Request that the PF delete RSS configuration as specified
+ * by the user via ethtool.
+ **/
+void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
+{
+ struct virtchnl_rss_cfg *rss_cfg;
+ struct iavf_adv_rss *rss;
+ bool process_rss = false;
+ int len;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove RSS configuration, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = sizeof(struct virtchnl_rss_cfg);
+ rss_cfg = kzalloc(len, GFP_KERNEL);
+ if (!rss_cfg)
+ return;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_REQUEST) {
+ process_rss = true;
+ rss->state = IAVF_ADV_RSS_DEL_PENDING;
+ memcpy(rss_cfg, &rss->cfg_msg, len);
+ break;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+
+ if (process_rss) {
+ adapter->current_op = VIRTCHNL_OP_DEL_RSS_CFG;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_RSS_CFG,
+ (u8 *)rss_cfg, len);
+ } else {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ }
+
+ kfree(rss_cfg);
+}
+
+/**
* iavf_request_reset
* @adapter: adapter structure
*
@@ -1357,6 +1554,84 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ if (msglen)
+ dev_err(&adapter->pdev->dev,
+ "%s\n", msg);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct iavf_fdir_fltr *fdir;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry(fdir, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG: {
+ struct iavf_adv_rss *rss, *rss_tmp;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rss_tmp,
+ &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Failed to change the input set for",
+ NULL);
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG: {
+ struct iavf_adv_rss *rss;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
+ rss->state = IAVF_ADV_RSS_ACTIVE;
+ dev_err(&adapter->pdev->dev, "Failed to delete RSS configuration, error %s\n",
+ iavf_stat_str(&adapter->hw,
+ v_retval));
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
@@ -1490,6 +1765,87 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER: {
+ struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp,
+ &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
+ if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
+ fdir->loc);
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ fdir->flow_id = add_fltr->flow_id;
+ } else {
+ dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
+ add_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER: {
+ struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
+ struct iavf_fdir_fltr *fdir, *fdir_tmp;
+
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
+ if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
+ dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
+ fdir->loc);
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
+ del_fltr->status);
+ iavf_print_fdir_fltr(adapter, fdir);
+ }
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG: {
+ struct iavf_adv_rss *rss;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry(rss, &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_PENDING) {
+ iavf_print_adv_rss_cfg(adapter, rss,
+ "Input set change for",
+ "successful");
+ rss->state = IAVF_ADV_RSS_ACTIVE;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG: {
+ struct iavf_adv_rss *rss, *rss_tmp;
+
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rss_tmp,
+ &adapter->adv_rss_list_head, list) {
+ if (rss->state == IAVF_ADV_RSS_DEL_PENDING) {
+ list_del(&rss->list);
+ kfree(rss);
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",