diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
21 files changed, 2789 insertions, 1022 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 299b277bc7ae..83e19c6b974e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; @@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ }; #define HCLGE_MBX_MAX_MSG_SIZE 16 @@ -80,12 +84,15 @@ struct hclgevf_mbx_resp_status { struct hclge_mbx_vf_to_pf_cmd { u8 rsv; u8 mbx_src_vfid; /* Auto filled by IMP */ - u8 rsv1[2]; + u8 mbx_need_resp; + u8 rsv1[1]; u8 msg_len; u8 rsv2[3]; u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; }; +#define HCLGE_MBX_NEED_RESP_BIT BIT(0) + struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; @@ -107,7 +114,7 @@ struct hclgevf_mbx_arq_ring { struct hclgevf_dev *hdev; u32 head; u32 tail; - u32 count; + atomic_t count; u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 17ab4f4af6ad..fa8b8506b120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, return inited; } -static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, bool is_reg) +static int hnae3_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { int ret; @@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, return 0; } - /* now, (un-)instantiate client by calling lower layer */ - if (is_reg) { - ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) - dev_err(&ae_dev->pdev->dev, - "fail to instantiate client, ret = %d\n", ret); + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client, ret = %d\n", ret); - return ret; - } + return ret; +} + +static void hnae3_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) + return; if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); hnae3_set_client_init_flag(client, ae_dev, 0); } - - return 0; } int hnae3_register_client(struct hnae3_client *client) @@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port, ret = %d\n", @@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client) mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); } list_del(&client->node); @@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) * un-initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); @@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) continue; list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 38b430f11fc1..ad21b0ef1946 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -120,6 +120,25 @@ enum hnae3_media_type { HNAE3_MEDIA_TYPE_NONE, }; +/* must be consistent with definition in firmware */ +enum hnae3_module_type { + HNAE3_MODULE_TYPE_UNKNOWN = 0x00, + HNAE3_MODULE_TYPE_FIBRE_LR = 0x01, + HNAE3_MODULE_TYPE_FIBRE_SR = 0x02, + HNAE3_MODULE_TYPE_AOC = 0x03, + HNAE3_MODULE_TYPE_CR = 0x04, + HNAE3_MODULE_TYPE_KR = 0x05, + HNAE3_MODULE_TYPE_TP = 0x06, + +}; + +enum hnae3_fec_mode { + HNAE3_FEC_AUTO = 0, + HNAE3_FEC_BASER, + HNAE3_FEC_RS, + HNAE3_FEC_USER_DEF, +}; + enum hnae3_reset_notify_type { HNAE3_UP_CLIENT, HNAE3_DOWN_CLIENT, @@ -147,6 +166,13 @@ enum hnae3_flr_state { HNAE3_FLR_DONE, }; +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -223,10 +249,10 @@ struct hnae3_ae_dev { * non-ok * get_ksettings_an_result() * Get negotiation status,speed and duplex - * update_speed_duplex_h() - * Update hardware speed and duplex * get_media_type() * Get media type of MAC + * check_port_speed() + * Check target speed whether is supported * adjust_link() * Adjust link status * set_loopback() @@ -243,6 +269,8 @@ struct hnae3_ae_dev { * set auto autonegotiation of pause frame use * get_autoneg() * get auto autonegotiation of pause frame use + * restart_autoneg() + * restart autonegotiation * get_coalesce_usecs() * get usecs to delay a TX interrupt after a packet is sent * get_rx_max_coalesced_frames() @@ -333,11 +361,15 @@ struct hnae3_ae_ops { void (*get_ksettings_an_result)(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex); - int (*update_speed_duplex_h)(struct hnae3_handle *handle); int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, u8 duplex); - void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type); + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type); + int (*check_port_speed)(struct hnae3_handle *handle, u32 speed); + void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode); + int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode); void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); int (*set_loopback)(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en); @@ -353,6 +385,7 @@ struct hnae3_ae_ops { int (*set_autoneg)(struct hnae3_handle *handle, bool enable); int (*get_autoneg)(struct hnae3_handle *handle); + int (*restart_autoneg)(struct hnae3_handle *handle); void (*get_coalesce_usecs)(struct hnae3_handle *handle, u32 *tx_usecs, u32 *rx_usecs); @@ -385,7 +418,8 @@ struct hnae3_ae_ops { void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae3_handle *handle, u64 *data); - + void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt); void (*get_strings)(struct hnae3_handle *handle, u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); @@ -578,8 +612,13 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ + enum hnae3_port_base_vlan_state port_base_vlan_state; + u8 netdev_flags; struct dentry *hnae3_dbgfs; + + /* Network interface message level enabled bits */ + u32 msg_enable; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 0de543faa5b1..fc4917ac44be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "queue info [number]\n"); dev_info(&h->pdev->dev, "queue map\n"); dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + + if (!hns3_is_phys_func(h->pdev)) + return; + dev_info(&h->pdev->dev, "dump fd tcam\n"); dev_info(&h->pdev->dev, "dump tc\n"); dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); @@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump qos pri map\n"); dev_info(&h->pdev->dev, "dump qos buf cfg\n"); dev_info(&h->pdev->dev, "dump mng tbl\n"); + dev_info(&h->pdev->dev, "dump reset info\n"); + dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n"); + dev_info(&h->pdev->dev, "dump mac tnl status\n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", @@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ret = hns3_dbg_bd_info(handle, cmd_buf); else if (handle->ae_algo->ops->dbg_run_cmd) ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); + else + ret = -EOPNOTSUPP; if (ret) hns3_dbg_help(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 162cb9afa0e7..18711e0f9bdf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -35,6 +35,13 @@ static const char hns3_driver_string[] = static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; static struct hnae3_client client; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -67,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) { struct hns3_enet_tqp_vector *tqp_vector = vector; - napi_schedule(&tqp_vector->napi); + napi_schedule_irqoff(&tqp_vector->napi); return IRQ_HANDLED; } @@ -730,95 +737,6 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, return 0; } -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union l3_hdr_info l3; - union l4_hdr_info l4; - unsigned char *l2_hdr; - u8 l4_proto = ol4_proto; - u32 ol2_len; - u32 ol3_len; - u32 ol4_len; - u32 l2_len; - u32 l3_len; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* compute L2 header size for normal packet, defined in 2 Bytes */ - l2_len = l3.hdr - skb->data; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* tunnel packet*/ - if (skb->encapsulation) { - /* compute OL2 header size, defined in 2 Bytes */ - ol2_len = l2_len; - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_S, ol2_len >> 1); - - /* compute OL3 header size, defined in 4 Bytes */ - ol3_len = l4.hdr - l3.hdr; - hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, - ol3_len >> 2); - - /* MAC in UDP, MAC in GRE (0x6558)*/ - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ - l2_hdr = skb_inner_mac_header(skb); - - /* compute OL4 header size, defined in 4 Bytes. */ - ol4_len = l2_hdr - l4.hdr; - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_S, ol4_len >> 2); - - /* switch IP header ptr from outer to inner header */ - l3.hdr = skb_inner_network_header(skb); - - /* compute inner l2 header size, defined in 2 Bytes. */ - l2_len = l3.hdr - l2_hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, - l2_len >> 1); - } else { - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } - - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); - - l4_proto = il4_proto; - } - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); - - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ - switch (l4_proto) { - case IPPROTO_TCP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - l4.tcp->doff); - break; - case IPPROTO_SCTP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); - break; - case IPPROTO_UDP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); - break; - default: - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } -} - /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the @@ -827,12 +745,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, */ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) { -#define IANA_VXLAN_PORT 4789 union l4_hdr_info l4; l4.hdr = skb_transport_header(skb); - if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT))) + if (!(!skb->encapsulation && + l4.udp->dest == htons(IANA_VXLAN_UDP_PORT))) return false; skb_checksum_help(skb); @@ -840,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) return true; } -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u32 *ol_type_vlan_len_msec) { + u32 l2_len, l3_len, l4_len; + unsigned char *il2_hdr; union l3_hdr_info l3; - u32 l4_proto = ol4_proto; + union l4_hdr_info l4; l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); - /* define OL3 type and tunnel type(OL4).*/ - if (skb->encapsulation) { - /* define outer network header type.*/ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb_is_gso(skb)) - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); - else - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); - - } else if (skb->protocol == htons(ETH_P_IPV6)) { - hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV6); - } + /* compute OL2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); - /* define tunnel type(OL4).*/ - switch (l4_proto) { - case IPPROTO_UDP: + /* compute OL3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); + + il2_hdr = skb_inner_mac_header(skb); + /* compute OL4 header size, defined in 4 Bytes. */ + l4_len = il2_hdr - l4.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); + + /* define outer network header type */ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); - break; - case IPPROTO_GRE: + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); - break; - default: + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); + } + + if (ol4_proto == IPPROTO_UDP) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + else if (ol4_proto == IPPROTO_GRE) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); +} + +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { /* drop the skb tunnel packet if hardware don't support, * because hardware can't calculate csum when TSO. */ @@ -893,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, return 0; } + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); l4_proto = il4_proto; } @@ -911,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L3T_IPV6); } + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) @@ -924,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; case IPPROTO_SCTP: hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -963,6 +926,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, { #define HNS3_TX_VLAN_PRIO_SHIFT 13 + struct hnae3_handle *handle = tx_ring->tqp->handle; + + /* Since HW limitation, if port based insert VLAN enabled, only one VLAN + * header is allowed in skb, otherwise it will cause RAS error. + */ + if (unlikely(skb_vlan_tagged_multi(skb) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_ENABLE)) + return -EINVAL; + if (skb->protocol == htons(ETH_P_8021Q) && !(tx_ring->tqp->handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { @@ -984,8 +957,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE){ + hns3_set_field(*out_vlan_flag, + HNS3_TXD_OVLAN_B, 1); + *out_vtag = vlan_tag; + } else { + hns3_set_field(*inner_vlan_flag, + HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } } else { hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; @@ -1012,7 +993,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; int k, sizeoflast; @@ -1042,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (unlikely(ret)) return ret; - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); + + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); if (unlikely(ret)) return ret; @@ -1073,19 +1051,37 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (unlikely(dma_mapping_error(ring->dev, dma))) { + if (unlikely(dma_mapping_error(dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } desc_cb->length = size; + if (likely(size <= HNS3_MAX_BD_SIZE)) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + + desc_cb->priv = priv; + desc_cb->dma = dma; + desc_cb->type = type; + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16(size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + ring_ptr_move_fw(ring, next_to_use); + return 0; + } + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; @@ -1112,64 +1108,92 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return 0; } -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +static int hns3_nic_bd_num(struct sk_buff *skb) { - struct sk_buff *skb = *out_skb; - struct sk_buff *new_skb = NULL; - struct skb_frag_struct *frag; - int bdnum_for_frag; - int frag_num; - int buf_num; - int size; - int i; + int size = skb_headlen(skb); + int i, bd_num; - size = skb_headlen(skb); - buf_num = hns3_tx_bd_count(size); + /* if the total len is within the max bd limit */ + if (likely(skb->len <= HNS3_MAX_BD_SIZE)) + return skb_shinfo(skb)->nr_frags + 1; + + bd_num = hns3_tx_bd_count(size); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int frag_bd_num; - frag_num = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frag_num; i++) { - frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = hns3_tx_bd_count(size); - if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) + frag_bd_num = hns3_tx_bd_count(size); + + if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; - buf_num += bdnum_for_frag; + bd_num += frag_bd_num; } - if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = hns3_tx_bd_count(skb->len); - if (ring_space(ring) < buf_num) - return -EBUSY; - /* manual split the send packet */ - new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) - return -ENOMEM; - dev_kfree_skb_any(skb); - *out_skb = new_skb; - } + return bd_num; +} - if (unlikely(ring_space(ring) < buf_num)) - return -EBUSY; +static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) +{ + if (!skb->encapsulation) + return skb_transport_offset(skb) + tcp_hdrlen(skb); - *bnum = buf_num; - return 0; + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); } -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +/* HW need every continuous 8 buffer data to be larger than MSS, + * we simplify it by ensuring skb_headlen + the first continuous + * 7 frags to to be larger than gso header len + mss, and the remaining + * continuous 7 frags to be larger than MSS except the last 7 frags. + */ +static bool hns3_skb_need_linearized(struct sk_buff *skb) +{ + int bd_limit = HNS3_MAX_BD_PER_FRAG - 1; + unsigned int tot_len = 0; + int i; + + for (i = 0; i < bd_limit; i++) + tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); + + /* ensure headlen + the first 7 frags is greater than mss + header + * and the first 7 frags is greater than mss. + */ + if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size + + hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size)) + return true; + + /* ensure the remaining continuous 7 buffer is greater than mss */ + for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) { + tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]); + tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]); + + if (tot_len < skb_shinfo(skb)->gso_size) + return true; + } + + return false; +} + +static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, + struct sk_buff **out_skb) { struct sk_buff *skb = *out_skb; - struct sk_buff *new_skb = NULL; - int buf_num; + int bd_num; - /* No. of segments (plus a header) */ - buf_num = skb_shinfo(skb)->nr_frags + 1; + bd_num = hns3_nic_bd_num(skb); + if (bd_num < 0) + return bd_num; - if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = hns3_tx_bd_count(skb->len); - if (ring_space(ring) < buf_num) + if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) { + struct sk_buff *new_skb; + + if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb)) + goto out; + + bd_num = hns3_tx_bd_count(skb->len); + if (unlikely(ring_space(ring) < bd_num)) return -EBUSY; /* manual split the send packet */ new_skb = skb_copy(skb, GFP_ATOMIC); @@ -1177,14 +1201,17 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, return -ENOMEM; dev_kfree_skb_any(skb); *out_skb = new_skb; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_copy++; + u64_stats_update_end(&ring->syncp); } - if (unlikely(ring_space(ring) < buf_num)) +out: + if (unlikely(ring_space(ring) < bd_num)) return -EBUSY; - *bnum = buf_num; - - return 0; + return bd_num; } static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) @@ -1197,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) if (ring->next_to_use == next_to_use_orig) break; + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) dma_unmap_single(dev, @@ -1210,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) DMA_TO_DEVICE); ring->desc_cb[ring->next_to_use].length = 0; - - /* rollback one */ - ring_ptr_move_bw(ring, next_to_use); + ring->desc_cb[ring->next_to_use].dma = 0; } } @@ -1225,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int next_to_use_head; - int next_to_use_frag; int buf_num; int seg_num; int size; @@ -1235,22 +1262,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Prefetch the data used later */ prefetch(skb->data); - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { - case -EBUSY: - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + buf_num = hns3_nic_maybe_stop_tx(ring, &skb); + if (unlikely(buf_num <= 0)) { + if (buf_num == -EBUSY) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + goto out_net_tx_busy; + } else if (buf_num == -ENOMEM) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + } - goto out_net_tx_busy; - case -ENOMEM: - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - netdev_err(netdev, "no memory to xmit!\n"); + if (net_ratelimit()) + netdev_err(netdev, "xmit error: %d!\n", buf_num); goto out_err_tx_ok; - default: - break; } /* No. of segments (plus a header) */ @@ -1263,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, DESC_TYPE_SKB); if (unlikely(ret)) - goto head_fill_err; + goto fill_err; - next_to_use_frag = ring->next_to_use; /* Fill the fragments */ for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; @@ -1276,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) DESC_TYPE_PAGE); if (unlikely(ret)) - goto frag_fill_err; + goto fill_err; } /* Complete translate all packets */ @@ -1289,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -frag_fill_err: - hns3_clear_desc(ring, next_to_use_frag); - -head_fill_err: +fill_err: hns3_clear_desc(ring, next_to_use_head); out_err_tx_ok: @@ -1355,13 +1379,6 @@ static int hns3_nic_set_features(struct net_device *netdev, bool enable; int ret; - if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - else - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } - if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { enable = !!(features & NETIF_F_GRO_HW); ret = h->ae_algo->ops->set_gro_en(h, enable); @@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) struct hnae3_handle *h = hns3_get_handle(netdev); int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; @@ -1590,13 +1610,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); struct hns3_enet_ring *tx_ring = NULL; + struct napi_struct *napi; int timeout_queue = 0; int hw_head, hw_tail; + int fbd_num, fbd_oft; + int ebd_num, ebd_oft; + int bd_num, bd_err; + int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { + for (i = 0; i < ndev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; @@ -1617,21 +1643,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) return false; } + priv->tx_timeout_count++; + tx_ring = priv->ring_data[timeout_queue].ring; + napi = &tx_ring->tqp_vector->napi; + + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, + tx_ring->next_to_clean, napi->state); + + netdev_info(ndev, + "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, + tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); + + netdev_info(ndev, + "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + /* When mac received many pause frames continuous, it's unable to send + * packets, which may cause tx timeout + */ + if (h->ae_algo->ops->update_stats && + h->ae_algo->ops->get_mac_pause_stats) { + u64 tx_pause_cnt, rx_pause_cnt; + + h->ae_algo->ops->update_stats(h, &ndev->stats); + h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt, + &rx_pause_cnt); + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", + tx_pause_cnt, rx_pause_cnt); + } hw_head = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); hw_tail = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + fbd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + fbd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + ebd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBDNUM_REG); + ebd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBD_OFFSET_REG); + bd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + bd_err = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_ERR_REG); + ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); + tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); + netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + bd_num, hw_head, hw_tail, bd_err, readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); return true; } @@ -1644,8 +1715,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev) if (!hns3_get_tx_timeo_queue_info(ndev)) return; - priv->tx_timeout_count++; - /* request the reset, and let the hclge to determine * which reset level should be done */ @@ -1670,7 +1739,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; -static bool hns3_is_phys_func(struct pci_dev *pdev) +bool hns3_is_phys_func(struct pci_dev *pdev) { u32 dev_id = pdev->device; @@ -2117,17 +2186,30 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc[i].rx.bd_base_info = 0; } -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, - int *pkts) +static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, + int *bytes, int *pkts) { - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + int ntc = ring->next_to_clean; + struct hns3_desc_cb *desc_cb; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); + while (head != ntc) { + desc_cb = &ring->desc_cb[ntc]; + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ + hns3_free_buffer_detach(ring, ntc); - ring_ptr_move_fw(ring, next_to_clean); + if (++ntc == ring->desc_num) + ntc = 0; + + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ntc]); + } + + /* This smp_store_release() pairs with smp_load_acquire() in + * ring_space called by hns3_nic_net_xmit. + */ + smp_store_release(&ring->next_to_clean, ntc); } static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) @@ -2167,11 +2249,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) bytes = 0; pkts = 0; - while (head != ring->next_to_clean) { - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); - /* Issue prefetch for next Tx descriptor */ - prefetch(&ring->desc_cb[ring->next_to_clean]); - } + hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); ring->tqp_vector->tx_group.total_bytes += bytes; ring->tqp_vector->tx_group.total_packets += pkts; @@ -2233,6 +2311,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) break; } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + + u64_stats_update_begin(&ring->syncp); + ring->stats.non_reuse_pg++; + u64_stats_update_end(&ring->syncp); } ring_ptr_move_fw(ring, next_to_use); @@ -2246,64 +2328,78 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) { - struct hns3_desc *desc; - u32 truesize; - int size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - truesize = hnae3_buf_size(ring); - - if (!twobufs) - last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + int size = le16_to_cpu(desc->rx.size); + u32 truesize = hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) - return; - - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } + /* Avoid re-using remote pages, or the stack is still using the page + * when page_offset rollback to zero, flag default unreuse + */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) || + (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) return; - } /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset <= last_offset) { + if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { desc_cb->reuse_flag = 1; /* Bump ref count on page before it is given*/ get_page(desc_cb->priv); + } else if (page_count(desc_cb->priv) == 1) { + desc_cb->reuse_flag = 1; + desc_cb->page_offset = 0; + get_page(desc_cb->priv); } } +static int hns3_gro_complete(struct sk_buff *skb) +{ + __be16 type = skb->protocol; + struct tcphdr *th; + int depth = 0; + + while (type == htons(ETH_P_8021Q)) { + struct vlan_hdr *vh; + + if ((depth + VLAN_HLEN) > skb_headlen(skb)) + return -EFAULT; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; + } + + if (type == htons(ETH_P_IP)) { + depth += sizeof(struct iphdr); + } else if (type == htons(ETH_P_IPV6)) { + depth += sizeof(struct ipv6hdr); + } else { + netdev_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); + return -EFAULT; + } + + th = (struct tcphdr *)(skb->data + depth); + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + return 0; +} + static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) + u32 l234info, u32 bd_base_info, u32 ol_info) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int l3_type, l4_type; - u32 bd_base_info; int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); skb->ip_summed = CHECKSUM_NONE; @@ -2312,12 +2408,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; - /* We MUST enable hardware checksum before enabling hardware GRO */ - if (skb_shinfo(skb)->gso_size) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } - /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; @@ -2332,7 +2422,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: @@ -2370,6 +2460,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, u32 l234info, u16 *vlan_tag) { + struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; if (pdev->revision == 0x20) { @@ -2382,15 +2473,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + /* Hardware always insert VLAN tag into RX descriptor when + * remove the tag from packet, driver needs to determine + * reporting which tag to stack. + */ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); return true; case HNS3_STRP_INNER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); return true; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return true; default: return false; } @@ -2437,7 +2549,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); - ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, desc_cb); @@ -2512,8 +2624,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, return 0; } -static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, - u32 bd_base_info) +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info, + u32 bd_base_info, u32 ol_info) { u16 gro_count; u32 l3_type; @@ -2521,12 +2634,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, HNS3_RXD_GRO_COUNT_S); /* if there is no HW GRO, do not set gro params */ - if (!gro_count) - return; + if (!gro_count) { + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); + return 0; + } - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ NAPI_GRO_CB(skb)->count = gro_count; l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, @@ -2536,47 +2648,121 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, else if (l3_type == HNS3_L3_TYPE_IPV6) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else - return; + return -EFAULT; skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_S); - if (skb_shinfo(skb)->gso_size) - tcp_gro_complete(skb); + + return hns3_gro_complete(skb); } static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, - struct sk_buff *skb) + struct sk_buff *skb, u32 rss_hash) { struct hnae3_handle *handle = ring->tqp->handle; enum pkt_hash_types rss_type; - struct hns3_desc *desc; - int last_bd; - - /* When driver handle the rss type, ring->next_to_clean indicates the - * first descriptor of next packet, need -1 here. - */ - last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num; - desc = &ring->desc[last_bd]; - if (le32_to_cpu(desc->rx.rss_hash)) + if (rss_hash) rss_type = handle->kinfo.rss_type; else rss_type = PKT_HASH_TYPE_NONE; - skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); + skb_set_hash(skb, rss_hash, rss_type); } -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb) +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; enum hns3_pkt_l2t_type l2_frame_type; + u32 bd_base_info, l234info, ol_info; + struct hns3_desc *desc; + unsigned int len; + int pre_ntc, ret; + + /* bdinfo handled below is only valid on the last BD of the + * current packet, and ring->next_to_clean indicates the first + * descriptor of next packet, so need - 1 below. + */ + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : + (ring->desc_num - 1); + desc = &ring->desc[pre_ntc]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + ol_info = le32_to_cpu(desc->rx.ol_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } + + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + return -EINVAL; + } + + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { + u64_stats_update_begin(&ring->syncp); + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + return -EFAULT; + } + + len = skb->len; + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + + /* This is needed in order to enable forwarding support */ + ret = hns3_set_gro_and_checksum(ring, skb, l234info, + bd_base_info, ol_info); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_err_cnt++; + u64_stats_update_end(&ring->syncp); + return ret; + } + + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += len; + + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += len; + + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); + return 0; +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb) +{ struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; u32 bd_base_info; - u32 l234info; int length; int ret; @@ -2636,64 +2822,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ALIGN(ring->pull_len, sizeof(long))); } - l234info = le32_to_cpu(desc->rx.l234_info); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - - if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | - BIT(HNS3_RXD_L2E_B))))) { - u64_stats_update_begin(&ring->syncp); - if (l234info & BIT(HNS3_RXD_L2E_B)) - ring->stats.l2_err++; - else - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - + ret = hns3_handle_bdinfo(ring, skb); + if (unlikely(ret)) { dev_kfree_skb_any(skb); - return -EFAULT; + return ret; } - - l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, - HNS3_RXD_DMAC_S); - u64_stats_update_begin(&ring->syncp); - if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) - ring->stats.rx_multicast++; - - ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; - u64_stats_update_end(&ring->syncp); - - ring->tqp_vector->rx_group.total_bytes += skb->len; - - /* This is needed in order to enable forwarding support */ - hns3_set_gro_param(skb, l234info, bd_base_info); - - hns3_rx_checksum(ring, skb, desc); *out_skb = skb; - hns3_set_rx_skb_rss_type(ring, skb); return 0; } @@ -2703,9 +2838,8 @@ int hns3_clean_rx_ring( void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring) - ring->pending_buf; + int unused_count = hns3_desc_unused(ring); struct sk_buff *skb = ring->skb; int num; @@ -2714,6 +2848,7 @@ int hns3_clean_rx_ring( recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; + unused_count -= ring->pending_buf; while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ @@ -2740,8 +2875,6 @@ int hns3_clean_rx_ring( continue; } - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); rx_fn(ring, skb); recv_bds += ring->pending_buf; clean_count += ring->pending_buf; @@ -2891,7 +3024,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) struct hns3_enet_tqp_vector *tqp_vector = container_of(napi, struct hns3_enet_tqp_vector, napi); bool clean_complete = true; - int rx_budget; + int rx_budget = budget; if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { napi_complete(napi); @@ -2905,7 +3038,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); + if (tqp_vector->num_tqps > 1) + rx_budget = max(budget / tqp_vector->num_tqps, 1); hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, @@ -3316,6 +3450,7 @@ err: } devm_kfree(&pdev->dev, priv->ring_data); + priv->ring_data = NULL; return ret; } @@ -3324,12 +3459,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) struct hnae3_handle *h = priv->ae_handle; int i; + if (!priv->ring_data) + return; + for (i = 0; i < h->kinfo.num_tqps; i++) { devm_kfree(priv->dev, priv->ring_data[i].ring); devm_kfree(priv->dev, priv->ring_data[i + h->kinfo.num_tqps].ring); } devm_kfree(priv->dev, priv->ring_data); + priv->ring_data = NULL; } static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) @@ -3339,8 +3478,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) if (ring->desc_num <= 0 || ring->buf_size <= 0) return -EINVAL; - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, + sizeof(ring->desc_cb[0]), GFP_KERNEL); if (!ring->desc_cb) { ret = -ENOMEM; goto out; @@ -3361,7 +3500,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) out_with_desc: hns3_free_desc(ring); out_with_desc_cb: - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; out: return ret; @@ -3370,7 +3509,7 @@ out: static void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; @@ -3557,17 +3696,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) h->ae_algo->ops->del_all_fd_entries(h, clear_list); } -static void hns3_nic_set_priv_ops(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - else - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; -} - static int hns3_client_start(struct hnae3_handle *handle) { if (!handle->ae_algo->ops->client_start) @@ -3584,6 +3712,21 @@ static void hns3_client_stop(struct hnae3_handle *handle) handle->ae_algo->ops->client_stop(handle); } +static void hns3_info_show(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + + dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); + dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -3605,6 +3748,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->tx_timeout_count = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); + handle->kinfo.netdev = netdev; handle->priv = (void *)priv; @@ -3617,7 +3762,6 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->netdev_ops = &hns3_nic_netdev_ops; SET_NETDEV_DEV(netdev, &pdev->dev); hns3_ethtool_set_ops(netdev); - hns3_nic_set_priv_ops(netdev); /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -3671,6 +3815,9 @@ static int hns3_client_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (netif_msg_drv(handle)) + hns3_info_show(priv); + return ret; out_client_start: @@ -3697,13 +3844,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_client_stop(handle); - hns3_remove_hw_addr(netdev); if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_client_stop(handle); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); goto out_netdev_free; @@ -3729,8 +3876,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_dbg_uninit(handle); - priv->ring_data = NULL; - out_netdev_free: free_netdev(netdev); } @@ -3745,11 +3890,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) if (linkup) { netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link up\n"); } else { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link down\n"); } } @@ -3773,12 +3920,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev) struct netdev_hw_addr *ha, *tmp; int ret = 0; + netif_addr_lock_bh(ndev); /* go through and sync uc_addr entries to the device */ list = &ndev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_uc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } /* go through and sync mc_addr entries to the device */ @@ -3786,9 +3934,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev) list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_mc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } +out: + netif_addr_unlock_bh(ndev); return ret; } @@ -3799,6 +3949,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev) hns3_nic_uc_unsync(netdev, netdev->dev_addr); + netif_addr_lock_bh(netdev); /* go through and unsync uc_addr entries to the device */ list = &netdev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) @@ -3809,6 +3960,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev) list_for_each_entry_safe(ha, tmp, &list->list, list) if (ha->refcount > 1) hns3_nic_mc_unsync(netdev, ha->addr); + + netif_addr_unlock_bh(netdev); } static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) @@ -3850,6 +4003,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ring_ptr_move_fw(ring, next_to_use); } + /* Free the pending skb in rx ring */ + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + ring->pending_buf = 0; + } + return 0; } @@ -4048,18 +4208,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_uninit_ring; + } + set_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; +err_uninit_ring: + hns3_uninit_all_ring(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; err_dealloc_vector: hns3_nic_dealloc_vector_data(priv); err_put_ring: hns3_put_ring_config(priv); - priv->ring_data = NULL; return ret; } @@ -4101,7 +4267,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); return 0; } @@ -4121,9 +4287,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) netdev_err(netdev, "uninit ring error\n"); hns3_put_ring_config(priv); - priv->ring_data = NULL; - - clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 75669cd0c311..c14480f9b625 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -42,8 +42,10 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - +#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C @@ -374,6 +376,7 @@ struct ring_stats { u64 tx_err_cnt; u64 restart_queue; u64 tx_busy; + u64 tx_copy; }; struct { u64 rx_pkts; @@ -386,6 +389,7 @@ struct ring_stats { u64 l2_err; u64 l3l4_csum_err; u64 rx_multicast; + u64 non_reuse_pg; }; }; }; @@ -397,7 +401,6 @@ struct hns3_enet_ring { struct hns3_enet_ring *next; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; struct device *dev; /* will be used for DMA mapping of descriptors */ /* statistic */ @@ -407,9 +410,6 @@ struct hns3_enet_ring { dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; int next_to_use; /* idx of next spare desc */ /* idx of lastest sent desc, the ring is empty when equal to @@ -423,9 +423,6 @@ struct hns3_enet_ring { u32 flag; /* ring attribute */ - int numa_node; - cpumask_t affinity_mask; - int pending_buf; struct sk_buff *skb; struct sk_buff *tail_skb; @@ -442,11 +439,6 @@ struct hns3_nic_ring_data { void (*fini_process)(struct hns3_nic_ring_data *); }; -struct hns3_nic_ops { - int (*maybe_stop_tx)(struct sk_buff **out_skb, - int *bnum, struct hns3_enet_ring *ring); -}; - enum hns3_flow_level_range { HNS3_FLOW_LOW = 0, HNS3_FLOW_MID = 1, @@ -536,7 +528,6 @@ struct hns3_nic_priv { u32 port_id; struct net_device *netdev; struct device *dev; - struct hns3_nic_ops ops; /** * the cb for nic to manage the ring buffer, the first half of the @@ -577,18 +568,16 @@ union l4_hdr_info { unsigned char *hdr; }; -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} - static inline int ring_space(struct hns3_enet_ring *ring) { - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. + */ + int begin = smp_load_acquire(&ring->next_to_clean); + int end = READ_ONCE(ring->next_to_use); + + return ((end >= begin) ? (ring->desc_num - end + begin) : + (begin - end)) - 1; } static inline int is_ring_empty(struct hns3_enet_ring *ring) @@ -633,7 +622,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) +#define ring_to_dev(ring) ((ring)->dev) #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ DMA_TO_DEVICE : DMA_FROM_DEVICE) @@ -666,6 +655,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 359d4731fb2d..d1588ea6132c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("errors", tx_err_cnt), HNS3_TQP_STAT("wake", restart_queue), HNS3_TQP_STAT("busy", tx_busy), + HNS3_TQP_STAT("copy", tx_copy), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -48,6 +49,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), HNS3_TQP_STAT("multicast", rx_multicast), + HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -483,6 +485,11 @@ static void hns3_get_stats(struct net_device *netdev, struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting, could not get stats\n"); + return; + } + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { netdev_err(netdev, "could not get any statistics\n"); return; @@ -599,6 +606,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; + u8 module_type; u8 media_type; u8 link_stat; @@ -607,7 +615,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, ops = h->ae_algo->ops; if (ops->get_media_type) - ops->get_media_type(h, &media_type); + ops->get_media_type(h, &media_type, &module_type); else return -EOPNOTSUPP; @@ -617,7 +625,15 @@ static int hns3_get_link_ksettings(struct net_device *netdev, hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; + if (module_type == HNAE3_MODULE_TYPE_CR) + cmd->base.port = PORT_DA; + else + cmd->base.port = PORT_FIBRE; + + hns3_get_ksettings(h, cmd); + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; hns3_get_ksettings(h, cmd); break; case HNAE3_MEDIA_TYPE_COPPER: @@ -645,14 +661,79 @@ static int hns3_get_link_ksettings(struct net_device *netdev, return 0; } +static int hns3_check_ksettings_param(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u8 autoneg; + u32 speed; + u8 duplex; + int ret; + + if (ops->get_ksettings_an_result) { + ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex); + if (cmd->base.autoneg == autoneg && cmd->base.speed == speed && + cmd->base.duplex == duplex) + return 0; + } + + if (ops->get_media_type) + ops->get_media_type(handle, &media_type, &module_type); + + if (cmd->base.duplex != DUPLEX_FULL && + media_type != HNAE3_MEDIA_TYPE_COPPER) { + netdev_err(netdev, + "only copper port supports half duplex!"); + return -EINVAL; + } + + if (ops->check_port_speed) { + ret = ops->check_port_speed(handle, cmd->base.speed); + if (ret) { + netdev_err(netdev, "unsupported speed\n"); + return ret; + } + } + + return 0; +} + static int hns3_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + int ret = 0; + + /* Chip don't support this mode. */ + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + /* Only support ksettings_set for netdev with phy attached for now */ if (netdev->phydev) return phy_ethtool_ksettings_set(netdev->phydev, cmd); - return -EOPNOTSUPP; + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + ret = hns3_check_ksettings_param(netdev, cmd); + if (ret) + return ret; + + if (ops->set_autoneg) { + ret = ops->set_autoneg(handle, cmd->base.autoneg); + if (ret) + return ret; + } + + if (ops->cfg_mac_speed_dup_h) + ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed, + cmd->base.duplex); + + return ret; } static u32 hns3_get_rss_key_size(struct net_device *netdev) @@ -857,19 +938,36 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) static int hns3_nway_reset(struct net_device *netdev) { + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; struct phy_device *phy = netdev->phydev; + int autoneg; if (!netif_running(netdev)) return 0; - /* Only support nway_reset for netdev with phy attached for now */ - if (!phy) + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return -EBUSY; + } + + if (!ops->get_autoneg || !ops->restart_autoneg) return -EOPNOTSUPP; - if (phy->autoneg != AUTONEG_ENABLE) + autoneg = ops->get_autoneg(handle); + if (autoneg != AUTONEG_ENABLE) { + netdev_err(netdev, + "Autoneg is off, don't support to restart it\n"); return -EINVAL; + } + + if (phy) + return genphy_restart_aneg(phy); + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; - return genphy_restart_aneg(phy); + return ops->restart_autoneg(handle); } static void hns3_get_channels(struct net_device *netdev, @@ -1101,6 +1199,95 @@ static int hns3_set_phys_id(struct net_device *netdev, return h->ae_algo->ops->set_led_id(h, state); } +static u32 hns3_get_msglevel(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + return h->msg_enable; +} + +static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + h->msg_enable = msg_level; +} + +/* Translate local fec value into ethtool value. */ +static unsigned int loc_to_eth_fec(u8 loc_fec) +{ + u32 eth_fec = 0; + + if (loc_fec & BIT(HNAE3_FEC_AUTO)) + eth_fec |= ETHTOOL_FEC_AUTO; + if (loc_fec & BIT(HNAE3_FEC_RS)) + eth_fec |= ETHTOOL_FEC_RS; + if (loc_fec & BIT(HNAE3_FEC_BASER)) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool fec value into local value. */ +static unsigned int eth_to_loc_fec(unsigned int eth_fec) +{ + u32 loc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + return loc_fec; + + if (eth_fec & ETHTOOL_FEC_AUTO) + loc_fec |= BIT(HNAE3_FEC_AUTO); + if (eth_fec & ETHTOOL_FEC_RS) + loc_fec |= BIT(HNAE3_FEC_RS); + if (eth_fec & ETHTOOL_FEC_BASER) + loc_fec |= BIT(HNAE3_FEC_BASER); + + return loc_fec; +} + +static int hns3_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u8 fec_ability; + u8 fec_mode; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (!ops->get_fec) + return -EOPNOTSUPP; + + ops->get_fec(handle, &fec_ability, &fec_mode); + + fec->fec = loc_to_eth_fec(fec_ability); + fec->active_fec = loc_to_eth_fec(fec_mode); + + return 0; +} + +static int hns3_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + u32 fec_mode; + + if (handle->pdev->revision == 0x20) + return -EOPNOTSUPP; + + if (!ops->set_fec) + return -EOPNOTSUPP; + fec_mode = eth_to_loc_fec(fec->fec); + return ops->set_fec(handle, fec_mode); +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, @@ -1121,6 +1308,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .get_link = hns3_get_link, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1150,6 +1339,10 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .set_phys_id = hns3_set_phys_id, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, + .get_fecparam = hns3_get_fecparam, + .set_fecparam = hns3_set_fecparam, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 3a093a92eac5..fbd904e3077c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; @@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) hclge_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); @@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev) * reset may happen when lower level reset is being processed. */ if ((hclge_is_reset_pending(hdev))) { - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "firmware version query failed %d\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclge_cmd_uninit_regs(struct hclge_hw *hw) @@ -411,7 +416,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring) spin_unlock(&ring->lock); } -void hclge_destroy_cmd_queue(struct hclge_hw *hw) +static void hclge_destroy_cmd_queue(struct hclge_hw *hw) { hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3714733c96d9..d79a209b80f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -109,7 +109,11 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, HCLGE_OPC_SERDES_LOOPBACK = 0x0315, + HCLGE_OPC_CONFIG_FEC_MODE = 0x031A, /* PFC/Pause commands */ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, @@ -237,8 +241,11 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + /* SFP command */ - HCLGE_OPC_SFP_GET_SPEED = 0x7104, + HCLGE_OPC_GET_SFP_INFO = 0x7104, /* Error INT commands */ HCLGE_MAC_COMMON_INT_EN = 0x030E, @@ -593,9 +600,30 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; -struct hclge_sfp_speed_cmd { - __le32 sfp_speed; - u32 rsv[5]; +struct hclge_sfp_info_cmd { + __le32 speed; + u8 query_type; /* 0: sfp speed, 1: active speed */ + u8 active_fec; + u8 autoneg; /* autoneg state */ + u8 autoneg_ability; /* whether support autoneg */ + __le32 speed_ability; /* speed ability for current media */ + __le32 module_type; + u8 rsv[8]; +}; + +#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0 +#define HCLGE_MAC_CFG_FEC_MODE_S 1 +#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1) +#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0 +#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1 + +#define HCLGE_MAC_FEC_OFF 0 +#define HCLGE_MAC_FEC_BASER 1 +#define HCLGE_MAC_FEC_RS 2 +struct hclge_config_fec_cmd { + u8 fec_mode; + u8 default_config; + u8 rsv[22]; }; #define HCLGE_MAC_UPLINK_PORT 0x100 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 1192cf6f2321..a9ffb57c4607 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) } } +static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "PF reset count: %d\n", + hdev->rst_stats.pf_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %d\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "CORE reset count: %d\n", + hdev->rst_stats.core_rst_cnt); + dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n", + hdev->rst_stats.global_rst_cnt); + dev_info(&hdev->pdev->dev, "IMP reset count: %d\n", + hdev->rst_stats.imp_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %d\n", + hdev->rst_stats.reset_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %d\n", + hdev->rst_stats.hw_reset_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %d\n", + hdev->rst_stats.reset_cnt); +} + +/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file + * @hdev: pointer to struct hclge_dev + * @cmd_buf: string that contains offset and length + */ +static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf) +{ +#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 +#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4) +#define HCLGE_CMD_DATA_NUM 6 + + struct hclge_desc desc[5]; + u32 byte_offset; + int bd_num = 5; + int offset; + int length; + int data0; + int ret; + int i; + int j; + + ret = sscanf(cmd_buf, "%x %x", &offset, &length); + if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || + length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { + dev_err(&hdev->pdev->dev, "Invalid offset or length.\n"); + return; + } + if (offset < 0 || length <= 0) { + dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n"); + return; + } + + dev_info(&hdev->pdev->dev, "offset | data\n"); + + while (length > 0) { + data0 = offset; + if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH) + data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16; + else + data0 |= length << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return; + + byte_offset = offset; + for (i = 0; i < bd_num; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", + byte_offset, + le32_to_cpu(desc[i].data[j])); + byte_offset += sizeof(u32); + length -= sizeof(u32); + if (length <= 0) + return; + } + } + offset += HCLGE_MAX_NCL_CONFIG_LENGTH; + } +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) +{ +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + + dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) hclge_dbg_dump_mng_table(hdev); } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { + hclge_dbg_dump_rst_info(hdev); + } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { + hclge_dbg_dump_ncl_config(hdev, + &cmd_buf[sizeof("dump ncl_config")]); + } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { + hclge_dbg_dump_mac_tnl_status(hdev); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 1f52d11f77b5..4ac80634c984 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,287 +4,468 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow" }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun" }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" }, + { .int_msk = BIT(0), .msg = "rx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" }, - { .int_msk = BIT(27), - .msg = "flow_director_ad_mem0_ecc_mbit_err" }, - { .int_msk = BIT(28), - .msg = "flow_director_ad_mem1_ecc_mbit_err" }, - { .int_msk = BIT(29), - .msg = "rx_vlan_tag_memory_ecc_mbit_err" }, - { .int_msk = BIT(30), - .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(12), - .msg = "tm_sch_port_shap_offset_fifo_wr_err" }, - { .int_msk = BIT(13), - .msg = "tm_sch_port_shap_offset_fifo_rd_err" }, - { .int_msk = BIT(14), - .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(15), - .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(16), - .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(17), - .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(18), - .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(19), - .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(20), - .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(21), - .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" }, + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" }, + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "rd_bus_err" }, - { .int_msk = BIT(27), .msg = "wr_bus_err" }, - { .int_msk = BIT(28), .msg = "reg_search_miss" }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss" }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" }, + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout" }, + { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err" }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err" }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err" }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err" }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err" }, + { .int_msk = BIT(0), .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET } static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" }, + { .int_msk = BIT(0), .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" }, + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; @@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, char *reg, - const struct hclge_hw_error *err, - u32 err_sts) +static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts) { + enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET; + bool need_reset = false; + while (err->msg) { - if (err->int_msk & err_sts) + if (err->int_msk & err_sts) { dev_warn(dev, "%s %s found [error status=0x%x]\n", reg, err->msg, err_sts); + if (err->reset_level != HNAE3_NONE_RESET && + err->reset_level >= reset_level) { + reset_level = err->reset_level; + need_reset = true; + } + } err++; } + if (need_reset) + return reset_level; + else + return HNAE3_NONE_RESET; } /* hclge_cmd_query_error: read the error information @@ -454,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, return ret; } +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; @@ -673,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) return ret; } +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, bool en) { @@ -826,6 +1089,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, int num) { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_level; struct device *dev = &hdev->pdev->dev; __le32 *desc_data; u32 status; @@ -845,78 +1109,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, /* log HNS common errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", - &hclge_imp_tcm_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", - &hclge_cmdq_nic_mem_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) { dev_warn(dev, "imp_rd_data_poison_err found\n"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET); } status = le32_to_cpu(desc[0].data[3]); if (status) { - hclge_log_error(dev, "TQP_INT_ECC_INT_STS", - &hclge_tqp_int_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[4]); if (status) { - hclge_log_error(dev, "MSIX_ECC_INT_STS", - &hclge_msix_sram_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log SSU(Storage Switch Unit) errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", - &hclge_ssu_mem_ecc_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; if (status) { - hclge_log_error(dev, "SSU_COMMON_ERR_INT", - &hclge_ssu_com_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_INT_STS", - &hclge_igu_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPP(Programmable Packet Process) errors */ desc_data = (__le32 *)&desc[4]; status = le32_to_cpu(*(desc_data + 1)); - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", - &hclge_ppp_mpf_abnormal_int_st1[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", - &hclge_ppp_mpf_abnormal_int_st3[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[5]; @@ -924,55 +1204,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, if (status) { dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", "rpu_rx_pkt_ecc_mbit_err"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", - &hclge_ppu_mpf_abnormal_int_st3[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log TM(Traffic Manager) errors */ desc_data = (__le32 *)&desc[6]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "TM_SCH_RINT", - &hclge_tm_sch_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log QCN(Quantized Congestion Control) errors */ desc_data = (__le32 *)&desc[7]; status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_FIFO_RINT", - &hclge_qcn_fifo_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_ECC_RINT", - &hclge_qcn_ecc_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log NCSI errors */ desc_data = (__le32 *)&desc[9]; status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "NCSI_ECC_INT_RPT", - &hclge_ncsi_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* clear all main PF RAS errors */ @@ -1000,6 +1285,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; + enum hnae3_reset_type reset_level; __le32 *desc_data; u32 status; int ret; @@ -1018,38 +1304,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, /* log SSU(Storage Switch Unit) errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", - &hclge_ssu_fifo_overflow_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[2]); if (status) { - hclge_log_error(dev, "SSU_ETS_TCG_INT", - &hclge_ssu_ets_tcg_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", - &hclge_igu_egu_tnl_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1341,16 +1636,15 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests) { + struct hclge_mac_tnl_stats mac_tnl_stats; struct device *dev = &hdev->pdev->dev; u32 mpf_bd_num, pf_bd_num, bd_num; + enum hnae3_reset_type reset_level; struct hclge_desc desc_bd; struct hclge_desc *desc; __le32 *desc_data; - int ret = 0; u32 status; - - /* set default handling */ - set_bit(HNAE3_FUNC_RESET, reset_requests); + int ret; /* query the number of bds for the MSIx int status */ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM, @@ -1359,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "fail(%d) to query msix int status bd num\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); return ret; } @@ -1381,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1390,9 +1680,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", - &hclge_mac_afifo_tnl_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], + status); + set_bit(reset_level, reset_requests); } /* log PPU(RCB) MPF errors */ @@ -1400,9 +1691,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - set_bit(HNAE3_CORE_RESET, reset_requests); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + set_bit(reset_level, reset_requests); } /* clear all main PF MSIx errors */ @@ -1413,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1428,32 +1719,37 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } /* log SSU PF errors */ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_pf_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status); + set_bit(reset_level, reset_requests); } /* read and log PPP PF errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*desc_data); - if (status) - hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", - &hclge_ppp_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* clear all PF MSIx errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1463,8 +1759,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret); + goto msi_error; + } + + status = le32_to_cpu(desc->data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "clear mac tnl int failed (%d)\n", ret); + set_bit(HNAE3_NONE_RESET, reset_requests); } msi_error: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index fc068280d391..9645590c9294 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -47,6 +47,9 @@ #define HCLGE_NCSI_ERR_INT_TYPE 0x9 #define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF #define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -112,8 +115,10 @@ struct hclge_hw_blk { struct hclge_hw_error { u32 int_msk; const char *msg; + enum hnae3_reset_type reset_level; }; +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); int hclge_handle_hw_msix_error(struct hclge_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index deda606c51e7..d3b1f8cb1155 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/if_vlan.h> +#include <linux/crash_dump.h> #include <net/rtnetlink.h> #include "hclge_cmd.h" #include "hclge_dcb.h" @@ -31,6 +32,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, u16 *allocated_size, bool is_alloc); @@ -697,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) p = hclge_tqps_get_stats(handle, p); } +static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; + *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; +} + static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { @@ -833,33 +845,189 @@ static int hclge_parse_speed(int speed_cmd, int *speed) return 0; } -static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, - u8 speed_ability) +static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) { - unsigned long *supported = hdev->hw.mac.supported; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 speed_ability = hdev->hw.mac.speed_ability; + u32 speed_bit = 0; - if (speed_ability & HCLGE_SUPPORT_1G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, - supported); + switch (speed) { + case HCLGE_MAC_SPEED_10M: + speed_bit = HCLGE_SUPPORT_10M_BIT; + break; + case HCLGE_MAC_SPEED_100M: + speed_bit = HCLGE_SUPPORT_100M_BIT; + break; + case HCLGE_MAC_SPEED_1G: + speed_bit = HCLGE_SUPPORT_1G_BIT; + break; + case HCLGE_MAC_SPEED_10G: + speed_bit = HCLGE_SUPPORT_10G_BIT; + break; + case HCLGE_MAC_SPEED_25G: + speed_bit = HCLGE_SUPPORT_25G_BIT; + break; + case HCLGE_MAC_SPEED_40G: + speed_bit = HCLGE_SUPPORT_40G_BIT; + break; + case HCLGE_MAC_SPEED_50G: + speed_bit = HCLGE_SUPPORT_50G_BIT; + break; + case HCLGE_MAC_SPEED_100G: + speed_bit = HCLGE_SUPPORT_100G_BIT; + break; + default: + return -EINVAL; + } + + if (speed_bit & speed_ability) + return 0; + + return -EINVAL; +} +static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability) +{ if (speed_ability & HCLGE_SUPPORT_10G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - supported); + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + mac->supported); +} +static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + mac->supported); if (speed_ability & HCLGE_SUPPORT_25G_BIT) linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - supported); + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + mac->supported); +} +static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + mac->supported); if (speed_ability & HCLGE_SUPPORT_50G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + mac->supported); +} +static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + mac->supported); + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + mac->supported); if (speed_ability & HCLGE_SUPPORT_100G_BIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + mac->supported); +} - linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported); - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported); +static void hclge_convert_setting_fec(struct hclge_mac *mac) +{ + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + + switch (mac->speed) { + case HCLGE_MAC_SPEED_10G: + case HCLGE_MAC_SPEED_40G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + mac->supported); + mac->fec_ability = + BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO); + break; + case HCLGE_MAC_SPEED_25G: + case HCLGE_MAC_SPEED_50G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + mac->supported); + mac->fec_ability = + BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) | + BIT(HNAE3_FEC_AUTO); + break; + case HCLGE_MAC_SPEED_100G: + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported); + mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO); + break; + default: + mac->fec_ability = 0; + break; + } +} + +static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + mac->supported); + + hclge_convert_setting_sr(mac, speed_ability); + hclge_convert_setting_lr(mac, speed_ability); + hclge_convert_setting_cr(mac, speed_ability); + if (hdev->pdev->revision >= 0x21) + hclge_convert_setting_fec(mac); + + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); +} + +static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, + u8 speed_ability) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + hclge_convert_setting_kr(mac, speed_ability); + if (hdev->pdev->revision >= 0x21) + hclge_convert_setting_fec(mac); + linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported); } static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, @@ -900,8 +1068,9 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) hclge_parse_fiber_link_mode(hdev, speed_ability); else if (media_type == HNAE3_MEDIA_TYPE_COPPER) hclge_parse_copper_link_mode(hdev, speed_ability); + else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE) + hclge_parse_backplane_link_mode(hdev, speed_ability); } - static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { struct hclge_cfg_param_cmd *req; @@ -1015,6 +1184,23 @@ static int hclge_get_cap(struct hclge_dev *hdev) return ret; } +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hclge_cfg cfg; @@ -1074,6 +1260,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + hclge_init_kdump_kernel_config(hdev); + return ret; } @@ -1337,6 +1525,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->rxvlan_cfg.rx_vlan_offload_en = true; INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); @@ -1399,7 +1589,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, return ret; } -static int hclge_get_tc_num(struct hclge_dev *hdev) +static u32 hclge_get_tc_num(struct hclge_dev *hdev) { int i, cnt = 0; @@ -1409,17 +1599,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) return cnt; } -static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) -{ - int i, cnt = 0; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) - cnt++; - return cnt; -} - /* Get the number of pfc enabled TCs, which have private buffer */ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) @@ -1483,14 +1662,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc, u32 rx_all) { - u32 shared_buf_min, shared_buf_tc, shared_std; - int tc_num, pfc_enable_num; + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); u32 shared_buf, aligned_mps; u32 rx_priv; int i; - tc_num = hclge_get_tc_num(hdev); - pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) @@ -1499,9 +1676,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * aligned_mps + - (tc_num - pfc_enable_num) * aligned_mps / 2 + - aligned_mps; + shared_buf_tc = tc_num * aligned_mps + aligned_mps; shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), HCLGE_BUF_SIZE_UNIT); @@ -1518,19 +1693,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, } else { buf_alloc->s_buf.self.high = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; - buf_alloc->s_buf.self.low = - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + if (tc_num) + hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num; + else + hi_thrd = shared_buf - hdev->dv_buf_size; + + hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / 2; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if ((hdev->hw_tc_map & BIT(i)) && - (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; - } else { - buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; - } + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; } return true; @@ -2095,6 +2277,16 @@ static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + if (!hdev->hw.mac.support_autoneg) { + if (enable) { + dev_err(&hdev->pdev->dev, + "autoneg is not supported by current port\n"); + return -EOPNOTSUPP; + } else { + return 0; + } + } + return hclge_set_autoneg_en(hdev, enable); } @@ -2110,6 +2302,78 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) return hdev->hw.mac.autoneg; } +static int hclge_restart_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); + + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + return hclge_notify_client(hdev, HNAE3_UP_CLIENT); +} + +static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) +{ + struct hclge_config_fec_cmd *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false); + + req = (struct hclge_config_fec_cmd *)desc.data; + if (fec_mode & BIT(HNAE3_FEC_AUTO)) + hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1); + if (fec_mode & BIT(HNAE3_FEC_RS)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS); + if (fec_mode & BIT(HNAE3_FEC_BASER)) + hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M, + HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); + + return ret; +} + +static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + if (fec_mode && !(mac->fec_ability & fec_mode)) { + dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); + return -EINVAL; + } + + ret = hclge_set_fec_hw(hdev, fec_mode); + if (ret) + return ret; + + mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF); + return 0; +} + +static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability, + u8 *fec_mode) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_mac *mac = &hdev->hw.mac; + + if (fec_ability) + *fec_ability = mac->fec_ability; + if (fec_mode) + *fec_mode = mac->fec_mode; +} + static int hclge_mac_init(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; @@ -2127,6 +2391,15 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; + if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) { + ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); + if (ret) { + dev_err(&hdev->pdev->dev, + "Fec mode init fail, ret = %d\n", ret); + return ret; + } + } + ret = hclge_set_mac_mtu(hdev, hdev->mps); if (ret) { dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); @@ -2143,7 +2416,8 @@ static int hclge_mac_init(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { - if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) schedule_work(&hdev->mbx_service_task); } @@ -2222,6 +2496,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); rhandle = &hdev->vport[i].roce; if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, @@ -2231,14 +2506,35 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } } +static void hclge_update_port_capability(struct hclge_mac *mac) +{ + /* firmware can not identify back plane type, the media type + * read from configuration can help deal it + */ + if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE && + mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN) + mac->module_type = HNAE3_MODULE_TYPE_KR; + else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) + mac->module_type = HNAE3_MODULE_TYPE_TP; + + if (mac->support_autoneg == true) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); + linkmode_copy(mac->advertising, mac->supported); + } else { + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + mac->supported); + linkmode_zero(mac->advertising); + } +} + static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) { - struct hclge_sfp_speed_cmd *resp = NULL; + struct hclge_sfp_info_cmd *resp = NULL; struct hclge_desc desc; int ret; - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); - resp = (struct hclge_sfp_speed_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret == -EOPNOTSUPP) { dev_warn(&hdev->pdev->dev, @@ -2249,28 +2545,67 @@ static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) return ret; } - *speed = resp->sfp_speed; + *speed = le32_to_cpu(resp->speed); return 0; } -static int hclge_update_speed_duplex(struct hclge_dev *hdev) +static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) { - struct hclge_mac mac = hdev->hw.mac; - int speed; + struct hclge_sfp_info_cmd *resp; + struct hclge_desc desc; int ret; - /* get the speed from SFP cmd when phy - * doesn't exit. + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true); + resp = (struct hclge_sfp_info_cmd *)desc.data; + + resp->query_type = QUERY_ACTIVE_SPEED; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP does not support get SFP info %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); + return ret; + } + + mac->speed = le32_to_cpu(resp->speed); + /* if resp->speed_ability is 0, it means it's an old version + * firmware, do not update these params */ - if (mac.phydev) + if (resp->speed_ability) { + mac->module_type = le32_to_cpu(resp->module_type); + mac->speed_ability = le32_to_cpu(resp->speed_ability); + mac->autoneg = resp->autoneg; + mac->support_autoneg = resp->autoneg_ability; + } else { + mac->speed_type = QUERY_SFP_SPEED; + } + + return 0; +} + +static int hclge_update_port_info(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int speed = HCLGE_MAC_SPEED_UNKNOWN; + int ret; + + /* get the port info from SFP cmd if not copper port */ + if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) return 0; - /* if IMP does not support get SFP/qSFP speed, return directly */ + /* if IMP does not support get SFP/qSFP info, return directly */ if (!hdev->support_sfp_query) return 0; - ret = hclge_get_sfp_speed(hdev, &speed); + if (hdev->pdev->revision >= 0x21) + ret = hclge_get_sfp_info(hdev, mac); + else + ret = hclge_get_sfp_speed(hdev, &speed); + if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; return ret; @@ -2278,19 +2613,20 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) return ret; } - if (speed == HCLGE_MAC_SPEED_UNKNOWN) - return 0; /* do nothing if no SFP */ - - /* must config full duplex for SFP */ - return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); -} - -static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; + if (hdev->pdev->revision >= 0x21) { + if (mac->speed_type == QUERY_ACTIVE_SPEED) { + hclge_update_port_capability(mac); + return 0; + } + return hclge_cfg_mac_speed_dup(hdev, mac->speed, + HCLGE_MAC_FULL); + } else { + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ - return hclge_update_speed_duplex(hdev); + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); + } } static int hclge_get_status(struct hnae3_handle *handle) @@ -2344,6 +2680,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2352,6 +2689,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2360,12 +2698,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + hdev->rst_stats.core_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } /* check for vector0 msix event source */ - if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + dev_dbg(&hdev->pdev->dev, "received event 0x%x\n", + msix_src_reg); return HCLGE_VECTOR0_EVENT_ERR; + } /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2374,6 +2716,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_MBX; } + /* print other vector0 event source */ + dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n", + cmdq_src_reg, msix_src_reg); return HCLGE_VECTOR0_EVENT_OTHER; } @@ -2657,7 +3002,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return ret; } - if (!reset) + if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) continue; /* Inform VF to process the reset. @@ -2694,9 +3039,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) static void hclge_do_reset(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; struct pci_dev *pdev = hdev->pdev; u32 val; + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "Hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@ -2775,6 +3129,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, clear_bit(HNAE3_FLR_RESET, addr); } + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; + return rst_level; } @@ -2844,6 +3202,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) * after hclge_cmd_init is called. */ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hdev->rst_stats.pf_rst_cnt++; break; case HNAE3_FLR_RESET: /* There is no mechanism for PF to know if VF has stopped IO @@ -2852,6 +3211,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) msleep(100); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; case HNAE3_IMP_RESET: reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); @@ -2932,7 +3292,7 @@ static void hclge_reset(struct hclge_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.reset_cnt++; /* perform reset of the stack & ae device for a client */ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); if (ret) @@ -2958,6 +3318,8 @@ static void hclge_reset(struct hclge_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_reset_done_cnt++; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); if (ret) goto err_reset; @@ -3001,7 +3363,9 @@ static void hclge_reset(struct hclge_dev *hdev) hdev->last_reset_time = jiffies; hdev->reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; ae_dev->reset_type = HNAE3_NONE_RESET; + del_timer(&hdev->reset_timer); return; @@ -3154,7 +3518,7 @@ static void hclge_service_task(struct work_struct *work) hdev->hw_stats.stats_timer = 0; } - hclge_update_speed_duplex(hdev); + hclge_update_port_info(hdev); hclge_update_link_status(hdev); hclge_update_vport_alive(hdev); hclge_service_complete(hdev); @@ -5194,7 +5558,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return hdev->reset_count; + return hdev->rst_stats.hw_reset_done_cnt; } static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) @@ -5282,8 +5646,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 -#define HCLGE_MAC_LINK_STATUS_MS 20 -#define HCLGE_MAC_LINK_STATUS_NUM 10 +#define HCLGE_MAC_LINK_STATUS_MS 10 +#define HCLGE_MAC_LINK_STATUS_NUM 100 #define HCLGE_MAC_LINK_STATUS_DOWN 0 #define HCLGE_MAC_LINK_STATUS_UP 1 @@ -5942,8 +6306,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + if (!ret) { + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + vport->vport_id, addr); + return 0; + } dev_err(&hdev->pdev->dev, "PF failed to add unicast entry(%pM) in the MAC table\n", @@ -6293,7 +6660,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return -EINVAL; } - if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) + if ((!is_first || is_kdump_kernel()) && + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, "remove old uc mac address fail.\n"); @@ -6543,30 +6911,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return ret; } -int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, - u16 vlan_id, bool is_kill) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, - 0, is_kill); -} - -static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, - u16 vlan, u8 qos, __be16 proto) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - - return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); -} - static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) { struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; @@ -6640,6 +6984,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) return status; } +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + vport->txvlan_cfg.accept_tag1 = false; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) { struct hclge_rx_vlan_type_cfg_cmd *rx_req; @@ -6730,34 +7120,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag1 = true; - vport->txvlan_cfg.accept_untag1 = true; - - /* accept_tag2 and accept_untag2 are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - * This two fields can not configured by user. - */ - vport->txvlan_cfg.accept_tag2 = true; - vport->txvlan_cfg.accept_untag2 = true; + u16 vlan_tag; - vport->txvlan_cfg.insert_tag1_en = false; - vport->txvlan_cfg.insert_tag2_en = false; - vport->txvlan_cfg.default_tag1 = 0; - vport->txvlan_cfg.default_tag2 = 0; - - ret = hclge_set_vlan_tx_offload_cfg(vport); - if (ret) - return ret; - - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = true; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport = &hdev->vport[i]; + vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - ret = hclge_set_vlan_rx_offload_cfg(vport); + ret = hclge_vlan_offload_cfg(vport, + vport->port_base_vlan_cfg.state, + vlan_tag); if (ret) return ret; } @@ -6765,7 +7135,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan; @@ -6777,14 +7148,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) if (!vlan) return; - vlan->hd_tbl_status = true; + vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); } -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl) +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; @@ -6847,14 +7242,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = enable; + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + } vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; return hclge_set_vlan_rx_offload_cfg(vport); } +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + new_info->qos, false); + } + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + old_info->qos, true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + struct hclge_dev *hdev = vport->back; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); + if (ret) + return ret; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(vlan_info->vlan_proto), + vport->vport_id, + vlan_info->vlan_tag, + vlan_info->qos, false); + if (ret) + return ret; + + /* remove old VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(old_vlan_info->vlan_proto), + vport->vport_id, + old_vlan_info->vlan_tag, + old_vlan_info->qos, true); + if (ret) + return ret; + + goto update; + } + + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + + /* update state only when disable/enable port based VLAN */ + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + +update: + vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; + vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; + vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_ENABLE; + } else { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_DISABLE; + else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_MODIFY; + } +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + vport = &hdev->vport[vfid]; + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + /* update port based VLAN for PF */ + if (!vfid) { + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + + return ret; + } + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + return hclge_update_port_base_vlan_cfg(vport, state, + &vlan_info); + } else { + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + (u8)vfid, state, + vlan, qos, + ntohs(proto)); + return ret; + } +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* when port based VLAN enabled, we use port based VLAN as the VLAN + * filter entry. In this case, we don't update VLAN filter table + * when user add new VLAN or remove exist VLAN, just update the vport + * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter + * table until port based VLAN disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, 0, is_kill); + writen_to_tbl = true; + } + + if (ret) + return ret; + + if (is_kill) + hclge_rm_vport_vlan_table(vport, vlan_id, false); + else + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + + return 0; +} + static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; @@ -7199,13 +7783,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, if (!fc_autoneg) return hclge_cfg_pauseparam(hdev, rx_en, tx_en); - /* Only support flow control negotiation for netdev with - * phy attached for now. - */ - if (!phydev) + if (phydev) + return phy_start_aneg(phydev); + + if (hdev->pdev->revision == 0x20) return -EOPNOTSUPP; - return phy_start_aneg(phydev); + return hclge_restart_autoneg(handle); } static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, @@ -7222,13 +7806,17 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, *auto_neg = hdev->hw.mac.autoneg; } -static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; if (media_type) *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; } static void hclge_get_mdix_mode(struct hnae3_handle *handle, @@ -7280,6 +7868,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); + dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); + + dev_info(dev, "PF info end.\n"); +} + static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -7301,6 +7915,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -7660,6 +8277,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + INIT_KFIFO(hdev->mac_tnl_log); + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -7708,7 +8327,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) int i; for (i = 0; i < hdev->num_alloc_vport; i++) { - hclge_vport_start(vport); + hclge_vport_stop(vport); vport++; } } @@ -7813,6 +8432,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); + hclge_config_mac_tnl_int(hdev, false); hclge_hw_error_set_state(hdev, false); hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); @@ -8234,9 +8854,11 @@ static const struct hnae3_ae_ops hclge_ops = { .client_stop = hclge_client_stop, .get_status = hclge_get_status, .get_ksettings_an_result = hclge_get_ksettings_an_result, - .update_speed_duplex_h = hclge_update_speed_duplex_h, .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, .get_media_type = hclge_get_media_type, + .check_port_speed = hclge_check_port_speed, + .get_fec = hclge_get_fec, + .set_fec = hclge_set_fec, .get_rss_key_size = hclge_get_rss_key_size, .get_rss_indir_size = hclge_get_rss_indir_size, .get_rss = hclge_get_rss, @@ -8253,11 +8875,13 @@ static const struct hnae3_ae_ops hclge_ops = { .rm_mc_addr = hclge_rm_mc_addr, .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, + .restart_autoneg = hclge_restart_autoneg, .get_pauseparam = hclge_get_pauseparam, .set_pauseparam = hclge_set_pauseparam, .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, + .get_mac_pause_stats = hclge_get_mac_pause_stat, .update_stats = hclge_update_stats, .get_strings = hclge_get_strings, .get_sset_count = hclge_get_sset_count, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b57ac4beb313..dd06b11187b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/phy.h> #include <linux/if_vlan.h> +#include <linux/kfifo.h> #include "hclge_cmd.h" #include "hnae3.h" @@ -188,6 +189,8 @@ enum HLCGE_PORT_TYPE { #define HCLGE_SUPPORT_25G_BIT BIT(2) #define HCLGE_SUPPORT_50G_BIT BIT(3) #define HCLGE_SUPPORT_100G_BIT BIT(4) +/* to be compatible with exsit board */ +#define HCLGE_SUPPORT_40G_BIT BIT(5) #define HCLGE_SUPPORT_100M_BIT BIT(6) #define HCLGE_SUPPORT_10M_BIT BIT(7) #define HCLGE_SUPPORT_GE \ @@ -235,15 +238,25 @@ enum HCLGE_MAC_DUPLEX { HCLGE_MAC_FULL }; +#define QUERY_SFP_SPEED 0 +#define QUERY_ACTIVE_SPEED 1 + struct hclge_mac { u8 phy_addr; u8 flag; - u8 media_type; + u8 media_type; /* port media type, e.g. fibre/copper/backplane */ u8 mac_addr[ETH_ALEN]; u8 autoneg; u8 duplex; + u8 support_autoneg; + u8 speed_type; /* 0: sfp speed, 1: active speed */ u32 speed; - int link; /* store the link status of mac & phy (if phy exit)*/ + u32 speed_ability; /* speed ability supported by current media */ + u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ + u32 fec_mode; /* active fec mode */ + u32 user_fec_mode; + u32 fec_ability; + int link; /* store the link status of mac & phy (if phy exit) */ struct phy_device *phydev; struct mii_bus *mdio_bus; phy_interface_t phy_if; @@ -649,6 +662,23 @@ struct hclge_vport_vlan_cfg { u16 vlan_id; }; +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 core_rst_cnt; /* the number of CORE reset */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -675,6 +705,7 @@ struct hclge_vport_vlan_cfg { (y) = (_k_ ^ ~_v_) & (_k_); \ } while (0) +#define HCLGE_MAC_TNL_LOG_SIZE 8 #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -691,7 +722,7 @@ struct hclge_dev { unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclge_rst_stats rst_stats; u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ @@ -791,6 +822,9 @@ struct hclge_dev { struct mutex umv_mutex; /* protect share_umv_size */ struct mutex vport_cfg_mutex; /* Protect stored vf table */ + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); }; /* VPort level vlan tag configuration for TX direction */ @@ -807,10 +841,11 @@ struct hclge_tx_vtag_cfg { /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - bool strip_tag1_en; /* Whether strip inner vlan tag */ - bool strip_tag2_en; /* Whether strip outer vlan tag */ - bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ - bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ + u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ + u8 strip_tag1_en; /* Whether strip inner vlan tag */ + u8 strip_tag2_en; /* Whether strip outer vlan tag */ + u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_rss_tuple_cfg { @@ -829,6 +864,17 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAX }; +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + struct hclge_vlan_info vlan_info; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -842,9 +888,10 @@ struct hclge_vport { u16 alloc_rss_size; u16 qs_offset; - u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct hclge_port_base_vlan_config port_base_vlan_cfg; struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; @@ -924,9 +971,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type); void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 306a23e486de..0e04e63f2a94 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, } static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; @@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return -EIO; } - if (gen_resp) + if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT) hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); return 0; @@ -289,9 +288,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return 0; } +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto) +{ +#define MSG_DATA_SIZE 8 + + u8 msg_data[MSG_DATA_SIZE]; + + memcpy(&msg_data[0], &state, sizeof(u16)); + memcpy(&msg_data[2], &vlan_proto, sizeof(u16)); + memcpy(&msg_data[4], &qos, sizeof(u16)); + memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); + + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HLCGE_MBX_PUSH_VLAN_INFO, vfid); +} + static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { int status = 0; @@ -305,19 +320,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); - if (!status) - is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) - : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; status = hclge_en_hw_strip_rxvtag(handle, en); + } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { + struct hclge_vlan_info *vlan_info; + u16 *state; + + state = (u16 *)&mbx_req->msg[2]; + vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4]; + status = hclge_update_port_base_vlan_cfg(vport, *state, + vlan_info); + } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { + u8 state; + + state = vport->port_base_vlan_cfg.state; + status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state, + sizeof(u8)); } - if (gen_resp) - status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); - return status; } @@ -385,24 +408,33 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, HCLGE_TQPS_DEPTH_INFO_LEN); } +static int hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_data[2]; + + resp_data[0] = hdev->hw.mac.media_type; + resp_data[1] = hdev->hw.mac.module_type; + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + sizeof(resp_data)); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[10]; - u16 media_type; + u8 msg_data[8]; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; - media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -565,7 +597,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, true); + ret = hclge_set_vf_uc_mac_addr(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", @@ -579,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, false); + ret = hclge_set_vf_vlan_cfg(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF failed(%d) to config VF's VLAN\n", @@ -662,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_rm_vport_all_vlan_table(vport, true); mutex_unlock(&hdev->vport_cfg_mutex); break; + case HCLGE_MBX_GET_MEDIA_TYPE: + ret = hclge_get_vf_media_type(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to media type for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 48eda2c6fdae..1e8134892d77 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -3,6 +3,7 @@ #include <linux/etherdevice.h> #include <linux/kernel.h> +#include <linux/marvell_phy.h> #include "hclge_cmd.h" #include "hclge_main.h" @@ -121,12 +122,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int hclge_mac_mdio_config(struct hclge_dev *hdev) { +#define PHY_INEXISTENT 255 + struct hclge_mac *mac = &hdev->hw.mac; struct phy_device *phydev; struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", hdev->hw.mac.phy_addr); return -EINVAL; @@ -203,6 +210,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, PHY_INTERFACE_MODE_SGMII); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index aafc69f4bfdd..a7bbb6d3091a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ret = hclge_pfc_setup_hw(hdev); if (init && ret == -EOPNOTSUPP) dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); - else + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); return ret; + } return hclge_tm_bp_setup(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 9441b453d38d..71f356fc2446 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) return ring->desc_num - used - 1; } +static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, + int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) { + struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclgevf_desc *desc; int clean = 0; u32 head; - desc = &csq->desc[ntc]; head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclgevf_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EIO; } - csq->next_to_clean = ntc; + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -321,13 +334,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; - hdev->arq.count = 0; + atomic_set(&hdev->arq.count, 0); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; @@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) hclgevf_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); @@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) * reset may happen when lower level reset is being processed. */ if (hclgevf_is_reset_pending(hdev)) { - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } /* get firmware version */ @@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 8bc28e6f465f..5d53467ee2d2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, + NULL, 0, true, &resp_msg, sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port based vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; + + return 0; +} + static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 @@ -307,6 +328,26 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) return qid_in_pf; } +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + u8 resp_msg[2]; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, + true, resp_msg, sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg[0]; + hdev->hw.mac.module_type = resp_msg[1]; + + return 0; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; @@ -404,7 +445,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) } } -void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) { #define HCLGEVF_ADVERTISING 0 #define HCLGEVF_SUPPORTED 1 @@ -1375,9 +1416,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) case HNAE3_VF_FUNC_RESET: ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 0, true, NULL, sizeof(u8)); + hdev->rst_stats.vf_func_rst_cnt++; break; case HNAE3_FLR_RESET: set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; default: break; @@ -1400,7 +1443,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.rst_cnt++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ @@ -1426,6 +1469,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_rst_done_cnt++; + rtnl_lock(); /* now, re-initialize the nic client and ae device*/ @@ -1444,6 +1489,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) hdev->last_reset_time = jiffies; ae_dev->reset_type = HNAE3_NONE_RESET; + hdev->rst_stats.rst_done_cnt++; return ret; err_reset_lock: @@ -1455,6 +1501,8 @@ err_reset: */ hclgevf_cmd_init(hdev); dev_err(&hdev->pdev->dev, "failed to reset VF\n"); + if (hclgevf_is_reset_pending(hdev)) + hclgevf_reset_task_schedule(hdev); return ret; } @@ -1564,8 +1612,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) { - if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) && - !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) { set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state); schedule_work(&hdev->rst_service_task); } @@ -1603,6 +1650,7 @@ static void hclgevf_service_timer(struct timer_list *t) mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + hdev->stats_timer++; hclgevf_task_schedule(hdev); } @@ -1711,7 +1759,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work) hdev = container_of(work, struct hclgevf_dev, keep_alive_task); - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) return; ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, @@ -1723,9 +1771,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work) { + struct hnae3_handle *handle; struct hclgevf_dev *hdev; hdev = container_of(work, struct hclgevf_dev, service_task); + handle = &hdev->nic; + + if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { + hclgevf_tqps_update_stats(handle); + hdev->stats_timer = 0; + } /* request the link status from the PF. PF would be able to tell VF * about such updates in future so we might remove this later @@ -1762,6 +1817,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); *clearval = cmdq_src_reg; + hdev->rst_stats.vf_rst_cnt++; return HCLGEVF_VECTOR0_EVENT_RST; } @@ -1814,6 +1870,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); if (ret) @@ -1824,6 +1885,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_get_pf_media_type(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1986,8 +2051,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - for (i = 0; i < handle->kinfo.num_tqps; i++) - hclgevf_reset_tqp(handle, i); + if (hdev->reset_type != HNAE3_VF_RESET) + for (i = 0; i < handle->kinfo.num_tqps; i++) + if (hclgevf_reset_tqp(handle, i)) + break; /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); @@ -2007,9 +2074,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) static int hclgevf_client_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, true); + if (ret) + return ret; mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); - return hclgevf_set_alive(handle, true); + + return 0; } static void hclgevf_client_stop(struct hnae3_handle *handle) @@ -2051,6 +2124,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev) { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + if (hdev->keep_alive_timer.function) + del_timer_sync(&hdev->keep_alive_timer); + if (hdev->keep_alive_task.func) + cancel_work_sync(&hdev->keep_alive_task); if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) @@ -2155,6 +2232,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } +static void hclgevf_info_show(struct hclgevf_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "VF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %d\n", + hdev->hw.mac.media_type); + + dev_info(dev, "VF info end.\n"); +} + static int hclgevf_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -2172,6 +2266,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->nic)) + hclgevf_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -2651,12 +2748,16 @@ static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) return hclgevf_config_gro(hdev, enable); } -static void hclgevf_get_media_type(struct hnae3_handle *handle, - u8 *media_type) +static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, + u8 *module_type) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + if (media_type) *media_type = hdev->hw.mac.media_type; + + if (module_type) + *module_type = hdev->hw.mac.module_type; } static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) @@ -2677,7 +2778,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return hdev->reset_count; + return hdev->rst_stats.hw_rst_done_cnt; } static void hclgevf_get_link_mode(struct hnae3_handle *handle, @@ -2756,6 +2857,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } } +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size) +{ + struct hnae3_handle *nic = &hdev->nic; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + + /* send msg to PF and wait update port based vlan info */ + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG, + port_base_vlan_info, data_size, + false, NULL, 0); + + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index c128863ee7d0..cc52f54f8c08 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -116,6 +116,8 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +#define HCLGEVF_STATS_TIMER_INTERVAL (36) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -141,6 +143,7 @@ enum hclgevf_states { struct hclgevf_mac { u8 media_type; + u8 module_type; u8 mac_addr[ETH_ALEN]; int link; u8 duplex; @@ -210,6 +213,15 @@ struct hclgevf_misc_vector { int vector_irq; }; +struct hclgevf_rst_stats { + u32 rst_cnt; /* the number of reset */ + u32 vf_func_rst_cnt; /* the number of VF function reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 vf_rst_cnt; /* the number of VF reset */ + u32 rst_done_cnt; /* the number of reset completed */ + u32 hw_rst_done_cnt; /* the number of HW reset completed */ +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -227,7 +239,7 @@ struct hclgevf_dev { #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclgevf_rst_stats rst_stats; u32 reset_attempts; u32 fw_version; @@ -272,6 +284,7 @@ struct hclgevf_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; u32 flag; + u32 stats_timer; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) @@ -290,4 +303,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, u8 duplex); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 7dc3c9f79169..30f2e9352cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, - "VF could not get mbx resp(=%d) from PF in %d tries\n", - hdev->mbx_resp.received_resp, i); + "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n", + code0, code1, hdev->mbx_resp.received_resp, i); return -EIO; } @@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { dev_err(&hdev->pdev->dev, - "VF could not match resp code(code0=%d,code1=%d), %d", + "VF could not match resp code(code0=%d,code1=%d), %d\n", code0, code1, mbx_resp->resp_status); + dev_err(&hdev->pdev->dev, + "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n", + r_code0, r_code1); return -EIO; } @@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT : + ~HCLGE_MBX_NEED_RESP_BIT; req->msg[0] = code; req->msg[1] = subcode; memcpy(&req->msg[2], msg_data, msg_len); @@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: + case HLCGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* we will drop the async msg if we find ARQ as full * and continue with next message */ - if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%d)\n", req->msg[1]); @@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); - hdev->arq.count++; + atomic_inc(&hdev->arq.count); hclgevf_mbx_task_schedule(hdev); @@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { enum hnae3_reset_type reset_type; - u16 link_status; - u16 *msg_q; + u16 link_status, state; + u16 *msg_q, *vlan_info; u8 duplex; u32 speed; u32 tail; @@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); - hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); @@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; + case HLCGE_MBX_PUSH_VLAN_INFO: + state = le16_to_cpu(msg_q[1]); + vlan_info = &msg_q[1]; + hclgevf_update_port_base_vlan_info(hdev, state, + (u8 *)vlan_info, 8); + break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%d) message from arq\n", @@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } hclge_mbx_head_ptr_move_arq(hdev->arq); - hdev->arq.count--; + atomic_dec(&hdev->arq.count); msg_q = hdev->arq.msg_q[hdev->arq.head]; } } |