diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hnae3.h | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 904 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 116 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 93 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 497 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 108 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 39 |
12 files changed, 1047 insertions, 769 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index c2bd2584201f..b668df6193be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -80,6 +80,9 @@ enum hclge_mbx_tbl_cfg_subcode { #define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4 +#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ) +#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2) + struct hclge_ring_chain_param { u8 ring_type; u8 tqp_index; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 3f7a9a4c59d5..54caf0dd1204 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -859,6 +859,20 @@ struct hnae3_handle { #define hnae3_get_bit(origin, shift) \ hnae3_get_field(origin, 0x1 << (shift), shift) +#define HNAE3_FORMAT_MAC_ADDR_LEN 18 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4 +#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5 + +static inline void hnae3_format_mac_addr(char *format_mac_addr, + const u8 *mac_addr) +{ + snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x", + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4], + mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]); +} + int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 9ccebbaa0d69..babc5d7a3b52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -17,6 +17,7 @@ #include <linux/skbuff.h> #include <linux/sctp.h> #include <net/gre.h> +#include <net/gro.h> #include <net/ip6_checksum.h> #include <net/pkt_cls.h> #include <net/tcp.h> @@ -53,10 +54,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, " Network interface message level setting"); -static unsigned int tx_spare_buf_size; -module_param(tx_spare_buf_size, uint, 0400); -MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer"); - static unsigned int tx_sgl = 1; module_param(tx_sgl, uint, 0600); MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping"); @@ -1005,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, return false; if (ALIGN(len, dma_get_cache_alignment()) > space) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1024,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, return false; if (space < HNS3_MAX_SGL_SIZE) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_spare_full++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_spare_full); return false; } @@ -1041,8 +1034,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) dma_addr_t dma; int order; - alloc_size = tx_spare_buf_size ? tx_spare_buf_size : - ring->tqp->handle->kinfo.tx_spare_buf_size; + alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; if (!alloc_size) return; @@ -1306,7 +1298,7 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) if (!(!skb->encapsulation && (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || l4.udp->dest == htons(GENEVE_UDP_PORT) || - l4.udp->dest == htons(4790)))) + l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) return false; return true; @@ -1359,44 +1351,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, HNS3_TUN_NVGRE); } -static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3, + u32 *type_cs_vlan_tso) { - unsigned char *l2_hdr = skb->data; - u32 l4_proto = ol4_proto; - union l4_hdr_info l4; - union l3_hdr_info l3; - u32 l2_len, l3_len; - - l4.hdr = skb_transport_header(skb); - l3.hdr = skb_network_header(skb); - - /* handle encapsulation skb */ - if (skb->encapsulation) { - /* If this is a not UDP/GRE encapsulation skb */ - if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - return skb_checksum_help(skb); - } - - hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); - - /* switch to inner header */ - l2_hdr = skb_inner_mac_header(skb); - l3.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - l4_proto = il4_proto; - } - if (l3.v4->version == 4) { hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV4); @@ -1410,15 +1367,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S, HNS3_L3T_IPV6); } +} - /* compute inner(/normal) L2 header size, defined in 2 Bytes */ - l2_len = l3.hdr - l2_hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); - +static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4, + u32 l4_proto, u32 *type_cs_vlan_tso) +{ /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: @@ -1464,6 +1417,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, return 0; } +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + return skb_checksum_help(skb); + } + + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + l4_proto = il4_proto; + } + + hns3_set_l3_type(skb, l3, type_cs_vlan_tso); + + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso); +} + static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { @@ -1540,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) return true; } -static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, - struct sk_buff *skb, struct hns3_desc *desc, - struct hns3_desc_cb *desc_cb) +struct hns3_desc_param { + u32 paylen_ol4cs; + u32 ol_type_vlan_len_msec; + u32 type_cs_vlan_tso; + u16 mss_hw_csum; + u16 inner_vtag; + u16 out_vtag; +}; + +static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa) +{ + pa->paylen_ol4cs = skb->len; + pa->ol_type_vlan_len_msec = 0; + pa->type_cs_vlan_tso = 0; + pa->mss_hw_csum = 0; + pa->inner_vtag = 0; + pa->out_vtag = 0; +} + +static int hns3_handle_vlan_info(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_param *param) { - u32 ol_type_vlan_len_msec = 0; - u32 paylen_ol4cs = skb->len; - u32 type_cs_vlan_tso = 0; - u16 mss_hw_csum = 0; - u16 inner_vtag = 0; - u16 out_vtag = 0; int ret; ret = hns3_handle_vtags(ring, skb); if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_vlan_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_vlan_err); return ret; } else if (ret == HNS3_INNER_VLAN_TAG) { - inner_vtag = skb_vlan_tag_get(skb); - inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->inner_vtag = skb_vlan_tag_get(skb); + param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); } else if (ret == HNS3_OUTER_VLAN_TAG) { - out_vtag = skb_vlan_tag_get(skb); - out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & + param->out_vtag = skb_vlan_tag_get(skb); + param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; - hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, + hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, 1); } + return 0; +} - desc_cb->send_bytes = skb->len; +static int hns3_handle_csum_partial(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + struct hns3_desc_param *param) +{ + u8 ol4_proto, il4_proto; + int ret; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 ol4_proto, il4_proto; - - if (hns3_check_hw_tx_csum(skb)) { - /* set checksum start and offset, defined in 2 Bytes */ - hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, - skb_checksum_start_offset(skb) >> 1); - hns3_set_field(ol_type_vlan_len_msec, - HNS3_TXD_CSUM_OFFSET_S, - skb->csum_offset >> 1); - mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); - goto out_hw_tx_csum; - } + if (hns3_check_hw_tx_csum(skb)) { + /* set checksum start and offset, defined in 2 Bytes */ + hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, + skb_checksum_start_offset(skb) >> 1); + hns3_set_field(param->ol_type_vlan_len_msec, + HNS3_TXD_CSUM_OFFSET_S, + skb->csum_offset >> 1); + param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); + return 0; + } - skb_reset_mac_len(skb); + skb_reset_mac_len(skb); - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l4_proto_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l4_proto_err); + return ret; + } - ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_l2l3l4_err++; - u64_stats_update_end(&ring->syncp); - return ret; - } + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + ¶m->type_cs_vlan_tso, + ¶m->ol_type_vlan_len_msec); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_l2l3l4_err); + return ret; + } + + ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, + ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); + if (unlikely(ret < 0)) { + hns3_ring_stats_update(ring, tx_tso_err); + return ret; + } + return 0; +} + +static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, struct hns3_desc *desc, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc_param param; + int ret; + + hns3_init_desc_data(skb, ¶m); + ret = hns3_handle_vlan_info(ring, skb, ¶m); + if (unlikely(ret < 0)) + return ret; + + desc_cb->send_bytes = skb->len; - ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, - &type_cs_vlan_tso, &desc_cb->send_bytes); - if (unlikely(ret < 0)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_tso_err++; - u64_stats_update_end(&ring->syncp); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ret = hns3_handle_csum_partial(ring, skb, desc_cb, ¶m); + if (ret) return ret; - } } -out_hw_tx_csum: /* Set txbd */ desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); - desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); - desc->tx.vlan_tag = cpu_to_le16(inner_vtag); - desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); + cpu_to_le32(param.ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); + desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); + desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); + desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); return 0; } @@ -1705,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, } if (unlikely(dma_mapping_error(dev, dma))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1853,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, * recursion level of over HNS3_MAX_RECURSION_LEVEL. */ if (bd_num == UINT_MAX) { - u64_stats_update_begin(&ring->syncp); - ring->stats.over_max_recursion++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, over_max_recursion); return -ENOMEM; } @@ -1864,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, */ if (skb->len > HNS3_MAX_TSO_SIZE || (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.hw_limitation++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, hw_limitation); return -ENOMEM; } if (__skb_linearize(skb)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -1903,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, bd_num = hns3_tx_bd_count(skb->len); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_copy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_copy); } out: @@ -1925,9 +1949,7 @@ out: return bd_num; } - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_busy); return -EBUSY; } @@ -2012,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ring->pending_buf += num; if (!doorbell) { - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_more++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_more); return; } @@ -2064,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ret = skb_copy_bits(skb, 0, buf, size); if (unlikely(ret < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.copy_bits_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, copy_bits_err); return ret; } @@ -2089,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, dma_sync_single_for_device(ring_to_dev(ring), dma, size, DMA_TO_DEVICE); - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bounce++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_bounce); + return bd_num; } @@ -2121,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); if (unlikely(nents < 0)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.skb2sgl_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, skb2sgl_err); return -ENOMEM; } @@ -2132,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, DMA_TO_DEVICE); if (unlikely(!sgt->nents)) { hns3_tx_spare_rollback(ring, cb_len); - u64_stats_update_begin(&ring->syncp); - ring->stats.map_sg_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, map_sg_err); return -ENOMEM; } @@ -2146,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, for (i = 0; i < sgt->nents; i++) bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), sg_dma_len(sgt->sgl + i)); - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_sgl++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, tx_sgl); return bd_num; } @@ -2174,23 +2184,45 @@ out: return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); } +static int hns3_handle_skb_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, + struct hns3_desc_cb *desc_cb, + int next_to_use_head) +{ + int ret; + + ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], + desc_cb); + if (unlikely(ret < 0)) + goto fill_err; + + /* 'ret < 0' means filling error, 'ret == 0' means skb->len is + * zero, which is unlikely, and 'ret > 0' means how many tx desc + * need to be notified to the hw. + */ + ret = hns3_handle_desc_filling(ring, skb); + if (likely(ret > 0)) + return ret; + +fill_err: + hns3_clear_desc(ring, next_to_use_head); + return ret; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct netdev_queue *dev_queue; - int pre_ntu, next_to_use_head; + int pre_ntu, ret; bool doorbell; - int ret; /* Hardware can only handle short frames above 32 bytes */ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return NETDEV_TX_OK; } @@ -2209,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) goto out_err_tx_ok; } - next_to_use_head = ring->next_to_use; - - ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], - desc_cb); - if (unlikely(ret < 0)) - goto fill_err; - - /* 'ret < 0' means filling error, 'ret == 0' means skb->len is - * zero, which is unlikely, and 'ret > 0' means how many tx desc - * need to be notified to the hw. - */ - ret = hns3_handle_desc_filling(ring, skb); + ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); if (unlikely(ret <= 0)) - goto fill_err; + goto out_err_tx_ok; pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : (ring->desc_num - 1); @@ -2244,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -fill_err: - hns3_clear_desc(ring, next_to_use_head); - out_err_tx_ok: dev_kfree_skb_any(skb); hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); @@ -2255,6 +2273,8 @@ out_err_tx_ok: static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { + char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN]; + char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -2263,8 +2283,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return -EADDRNOTAVAIL; if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { - netdev_info(netdev, "already using mac address %pM\n", - mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_info(netdev, "already using mac address %s\n", + format_mac_addr_sa); return 0; } @@ -2273,8 +2294,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) */ if (!hns3_is_phys_func(h->pdev) && !is_zero_ether_addr(netdev->perm_addr)) { - netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", - netdev->perm_addr, mac_addr->sa_data); + hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); + hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); + netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n", + format_mac_addr_perm, format_mac_addr_sa); return -EPERM; } @@ -2382,90 +2405,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb, return features; } +static void hns3_fetch_stats(struct rtnl_link_stats64 *stats, + struct hns3_enet_ring *ring, bool is_tx) +{ + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + if (is_tx) { + stats->tx_bytes += ring->stats.tx_bytes; + stats->tx_packets += ring->stats.tx_pkts; + stats->tx_dropped += ring->stats.sw_err_cnt; + stats->tx_dropped += ring->stats.tx_vlan_err; + stats->tx_dropped += ring->stats.tx_l4_proto_err; + stats->tx_dropped += ring->stats.tx_l2l3l4_err; + stats->tx_dropped += ring->stats.tx_tso_err; + stats->tx_dropped += ring->stats.over_max_recursion; + stats->tx_dropped += ring->stats.hw_limitation; + stats->tx_dropped += ring->stats.copy_bits_err; + stats->tx_dropped += ring->stats.skb2sgl_err; + stats->tx_dropped += ring->stats.map_sg_err; + stats->tx_errors += ring->stats.sw_err_cnt; + stats->tx_errors += ring->stats.tx_vlan_err; + stats->tx_errors += ring->stats.tx_l4_proto_err; + stats->tx_errors += ring->stats.tx_l2l3l4_err; + stats->tx_errors += ring->stats.tx_tso_err; + stats->tx_errors += ring->stats.over_max_recursion; + stats->tx_errors += ring->stats.hw_limitation; + stats->tx_errors += ring->stats.copy_bits_err; + stats->tx_errors += ring->stats.skb2sgl_err; + stats->tx_errors += ring->stats.map_sg_err; + } else { + stats->rx_bytes += ring->stats.rx_bytes; + stats->rx_packets += ring->stats.rx_pkts; + stats->rx_dropped += ring->stats.l2_err; + stats->rx_errors += ring->stats.l2_err; + stats->rx_errors += ring->stats.l3l4_csum_err; + stats->rx_crc_errors += ring->stats.l2_err; + stats->multicast += ring->stats.rx_multicast; + stats->rx_length_errors += ring->stats.err_pkt_len; + } + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +} + static void hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; struct hnae3_handle *handle = priv->ae_handle; + struct rtnl_link_stats64 ring_total_stats; struct hns3_enet_ring *ring; - u64 rx_length_errors = 0; - u64 rx_crc_errors = 0; - u64 rx_multicast = 0; - unsigned int start; - u64 tx_errors = 0; - u64 rx_errors = 0; unsigned int idx; - u64 tx_bytes = 0; - u64 rx_bytes = 0; - u64 tx_pkts = 0; - u64 rx_pkts = 0; - u64 tx_drop = 0; - u64 rx_drop = 0; if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) return; handle->ae_algo->ops->update_stats(handle, &netdev->stats); + memset(&ring_total_stats, 0, sizeof(ring_total_stats)); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ ring = &priv->ring[idx]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - tx_bytes += ring->stats.tx_bytes; - tx_pkts += ring->stats.tx_pkts; - tx_drop += ring->stats.sw_err_cnt; - tx_drop += ring->stats.tx_vlan_err; - tx_drop += ring->stats.tx_l4_proto_err; - tx_drop += ring->stats.tx_l2l3l4_err; - tx_drop += ring->stats.tx_tso_err; - tx_drop += ring->stats.over_max_recursion; - tx_drop += ring->stats.hw_limitation; - tx_drop += ring->stats.copy_bits_err; - tx_drop += ring->stats.skb2sgl_err; - tx_drop += ring->stats.map_sg_err; - tx_errors += ring->stats.sw_err_cnt; - tx_errors += ring->stats.tx_vlan_err; - tx_errors += ring->stats.tx_l4_proto_err; - tx_errors += ring->stats.tx_l2l3l4_err; - tx_errors += ring->stats.tx_tso_err; - tx_errors += ring->stats.over_max_recursion; - tx_errors += ring->stats.hw_limitation; - tx_errors += ring->stats.copy_bits_err; - tx_errors += ring->stats.skb2sgl_err; - tx_errors += ring->stats.map_sg_err; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + hns3_fetch_stats(&ring_total_stats, ring, true); /* fetch the rx stats */ ring = &priv->ring[idx + queue_num]; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - rx_bytes += ring->stats.rx_bytes; - rx_pkts += ring->stats.rx_pkts; - rx_drop += ring->stats.l2_err; - rx_errors += ring->stats.l2_err; - rx_errors += ring->stats.l3l4_csum_err; - rx_crc_errors += ring->stats.l2_err; - rx_multicast += ring->stats.rx_multicast; - rx_length_errors += ring->stats.err_pkt_len; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - } - - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_pkts; - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_pkts; - - stats->rx_errors = rx_errors; - stats->multicast = rx_multicast; - stats->rx_length_errors = rx_length_errors; - stats->rx_crc_errors = rx_crc_errors; + hns3_fetch_stats(&ring_total_stats, ring, false); + } + + stats->tx_bytes = ring_total_stats.tx_bytes; + stats->tx_packets = ring_total_stats.tx_packets; + stats->rx_bytes = ring_total_stats.rx_bytes; + stats->rx_packets = ring_total_stats.rx_packets; + + stats->rx_errors = ring_total_stats.rx_errors; + stats->multicast = ring_total_stats.multicast; + stats->rx_length_errors = ring_total_stats.rx_length_errors; + stats->rx_crc_errors = ring_total_stats.rx_crc_errors; stats->rx_missed_errors = netdev->stats.rx_missed_errors; - stats->tx_errors = tx_errors; - stats->rx_dropped = rx_drop; - stats->tx_dropped = tx_drop; + stats->tx_errors = ring_total_stats.tx_errors; + stats->rx_dropped = ring_total_stats.rx_dropped; + stats->tx_dropped = ring_total_stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -2658,18 +2680,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +static int hns3_get_timeout_queue(struct net_device *ndev) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = hns3_get_handle(ndev); - struct hns3_enet_ring *tx_ring; - struct napi_struct *napi; - int timeout_queue = 0; - int hw_head, hw_tail; - int fbd_num, fbd_oft; - int ebd_num, ebd_oft; - int bd_num, bd_err; - int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ @@ -2678,11 +2690,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) unsigned long trans_start; q = netdev_get_tx_queue(ndev, i); - trans_start = q->trans_start; + trans_start = READ_ONCE(q->trans_start); if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { - timeout_queue = i; +#ifdef CONFIG_BQL + struct dql *dql = &q->dql; + + netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n", + dql->last_obj_cnt, dql->num_queued, + dql->adj_limit, dql->num_completed); +#endif netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", q->state, jiffies_to_msecs(jiffies - trans_start)); @@ -2690,17 +2708,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) } } - if (i == ndev->num_tx_queues) { - netdev_info(ndev, - "no netdev TX timeout queue found, timeout count: %llu\n", - priv->tx_timeout_count); - return false; - } - - priv->tx_timeout_count++; + return i; +} - tx_ring = &priv->ring[timeout_queue]; - napi = &tx_ring->tqp_vector->napi; +static void hns3_dump_queue_stats(struct net_device *ndev, + struct hns3_enet_ring *tx_ring, + int timeout_queue) +{ + struct napi_struct *napi = &tx_ring->tqp_vector->napi; + struct hns3_nic_priv *priv = netdev_priv(ndev); netdev_info(ndev, "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", @@ -2716,6 +2732,48 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n", tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); +} + +static void hns3_dump_queue_reg(struct net_device *ndev, + struct hns3_enet_ring *tx_ring) +{ + netdev_info(ndev, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG), + readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG), + hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG), + hns3_tqp_read_reg(tx_ring, + HNS3_RING_TX_RING_EBD_OFFSET_REG)); +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); + struct hns3_enet_ring *tx_ring; + int timeout_queue; + + timeout_queue = hns3_get_timeout_queue(ndev); + if (timeout_queue >= ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + priv->tx_timeout_count++; + + tx_ring = &priv->ring[timeout_queue]; + hns3_dump_queue_stats(ndev, tx_ring, timeout_queue); /* When mac received many pause frames continuous, it's unable to send * packets, which may cause tx timeout @@ -2728,32 +2786,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt); } - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - fbd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_FBDNUM_REG); - fbd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_OFFSET_REG); - ebd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBDNUM_REG); - ebd_oft = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_EBD_OFFSET_REG); - bd_num = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_NUM_REG); - bd_err = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_BD_ERR_REG); - ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); - tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); - - netdev_info(ndev, - "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", - bd_num, hw_head, hw_tail, bd_err, - readl(tx_ring->tqp_vector->mask_addr)); - netdev_info(ndev, - "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", - ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); + hns3_dump_queue_reg(ndev, tx_ring); return true; } @@ -2836,14 +2869,16 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) { struct hnae3_handle *h = hns3_get_handle(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; if (!h->ae_algo->ops->set_vf_mac) return -EOPNOTSUPP; if (is_multicast_ether_addr(mac)) { + hnae3_format_mac_addr(format_mac_addr, mac); netdev_err(netdev, - "Invalid MAC:%pM specified. Could not set MAC\n", - mac); + "Invalid MAC:%s specified. Could not set MAC\n", + format_mac_addr); return -EINVAL; } @@ -3497,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, for (i = 0; i < cleand_count; i++) { desc_cb = &ring->desc_cb[ring->next_to_use]; if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, reuse_pg_cnt); hns3_reuse_buffer(ring, ring->next_to_use); } else { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", @@ -3519,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_reuse_pg++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, non_reuse_pg); } ring_ptr_move_fw(ring, next_to_use); @@ -3536,6 +3565,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb) return page_count(cb->priv) == cb->pagecnt_bias; } +static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, + int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + u32 frag_offset = desc_cb->page_offset + pull_len; + int size = le16_to_cpu(desc->rx.size); + u32 frag_size = size - pull_len; + void *frag = napi_alloc_frag(frag_size); + + if (unlikely(!frag)) { + hns3_ring_stats_update(ring, frag_alloc_err); + + hns3_rl_err(ring_to_netdev(ring), + "failed to allocate rx frag\n"); + return -ENOMEM; + } + + desc_cb->reuse_flag = 1; + memcpy(frag, desc_cb->buf + frag_offset, frag_size); + skb_add_rx_frag(skb, i, virt_to_page(frag), + offset_in_page(frag), frag_size, frag_size); + + hns3_ring_stats_update(ring, frag_alloc); + return 0; +} + static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) @@ -3545,6 +3602,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, int size = le16_to_cpu(desc->rx.size); u32 truesize = hns3_buf_size(ring); u32 frag_size = size - pull_len; + int ret = 0; bool reused; if (ring->page_pool) { @@ -3579,27 +3637,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc_cb->page_offset = 0; desc_cb->reuse_flag = 1; } else if (frag_size <= ring->rx_copybreak) { - void *frag = napi_alloc_frag(frag_size); - - if (unlikely(!frag)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc_err++; - u64_stats_update_end(&ring->syncp); - - hns3_rl_err(ring_to_netdev(ring), - "failed to allocate rx frag\n"); + ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); + if (ret) goto out; - } - - desc_cb->reuse_flag = 1; - memcpy(frag, desc_cb->buf + frag_offset, frag_size); - skb_add_rx_frag(skb, i, virt_to_page(frag), - offset_in_page(frag), frag_size, frag_size); - - u64_stats_update_begin(&ring->syncp); - ring->stats.frag_alloc++; - u64_stats_update_end(&ring->syncp); - return; } out: @@ -3682,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring, hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) return false; - u64_stats_update_begin(&ring->syncp); - ring->stats.csum_complete++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, csum_complete); skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)csum); @@ -3758,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL4E_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, l3l4_csum_err); return; } @@ -3851,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, skb = ring->skb; if (unlikely(!skb)) { hns3_rl_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); return -ENOMEM; } @@ -3885,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, if (ring->page_pool) skb_mark_for_recycle(skb); - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, seg_pkt_cnt); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); __skb_put(skb, ring->pull_len); @@ -4015,6 +4046,39 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, skb_set_hash(skb, rss_hash, rss_type); } +static void hns3_handle_rx_ts_info(struct net_device *netdev, + struct hns3_desc *desc, struct sk_buff *skb, + u32 bd_base_info) +{ + if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 nsec = le32_to_cpu(desc->ts_nsec); + u32 sec = le32_to_cpu(desc->ts_sec); + + if (h->ae_algo->ops->get_rx_hwts) + h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); + } +} + +static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, + struct hns3_desc *desc, struct sk_buff *skb, + u32 l234info) +{ + struct net_device *netdev = ring_to_netdev(ring); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } +} + static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring_to_netdev(ring); @@ -4037,26 +4101,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ol_info = le32_to_cpu(desc->rx.ol_info); csum = le16_to_cpu(desc->csum); - if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) { - struct hnae3_handle *h = hns3_get_handle(netdev); - u32 nsec = le32_to_cpu(desc->ts_nsec); - u32 sec = le32_to_cpu(desc->ts_sec); - - if (h->ae_algo->ops->get_rx_hwts) - h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); - } - - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; + hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info); - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), - vlan_tag); - } + hns3_handle_rx_vlan_tag(ring, desc, skb, l234info); if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | BIT(HNS3_RXD_L2E_B))))) { @@ -4079,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info, ol_info, csum); if (unlikely(ret)) { - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, rx_err_cnt); return ret; } @@ -4297,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) return rx_pkt_total; } -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) +static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node **head, + bool is_tx) { + u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX; + u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX; + struct hnae3_ring_chain_node *cur_chain = *head; struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - hnae3_set_field(chain->int_gl_idx, - HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, - HNAE3_RING_GL_TX); - - cur_chain = chain; - } - } + struct hns3_enet_ring *ring; - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; - rx_ring = rx_ring->next; + if (cur_chain) { + while (cur_chain->next) + cur_chain = cur_chain->next; } - while (rx_ring) { + while (ring) { chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); if (!chain) - goto err_free_chain; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; + return -ENOMEM; + if (cur_chain) + cur_chain->next = chain; + else + *head = chain; + chain->tqp_index = ring->tqp->tqp_index; hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); + bit_value); + hnae3_set_field(chain->int_gl_idx, + HNAE3_RING_GL_IDX_M, + HNAE3_RING_GL_IDX_S, field_value); cur_chain = chain; - rx_ring = rx_ring->next; + ring = ring->next; } return 0; +} + +static struct hnae3_ring_chain_node * +hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = NULL; + struct hnae3_ring_chain_node *chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, true)) + goto err_free_chain; + + if (hns3_create_ring_chain(tqp_vector, &cur_chain, false)) + goto err_free_chain; + + return cur_chain; err_free_chain: - cur_chain = head->next; while (cur_chain) { chain = cur_chain->next; devm_kfree(&pdev->dev, cur_chain); cur_chain = chain; } - head->next = NULL; - return -ENOMEM; + return NULL; } static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, @@ -4386,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, struct pci_dev *pdev = tqp_vector->handle->pdev; struct hnae3_ring_chain_node *chain_tmp, *chain; - chain = head->next; + chain = head; while (chain) { chain_tmp = chain->next; @@ -4501,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) } for (i = 0; i < priv->vector_num; i++) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; tqp_vector = &priv->tqp_vector[i]; @@ -4511,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector->tx_group.total_packets = 0; tqp_vector->handle = h; - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) { + ret = -ENOMEM; goto map_ring_fail; + } ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); if (ret) goto map_ring_fail; @@ -4618,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) { - struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_ring_chain_node *vector_ring_chain; struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; int i; @@ -4633,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) * chain between vector and ring, we should go on to deal with * the remaining options. */ - if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) + vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector); + if (!vector_ring_chain) dev_warn(priv->dev, "failed to get ring chain\n"); h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); + tqp_vector->vector_irq, vector_ring_chain); - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain); hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->tx_group); @@ -4934,6 +4964,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv) static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; int ret = 0; @@ -4944,8 +4975,9 @@ static int hns3_init_mac_addr(struct net_device *netdev) /* Check if the MAC address is valid, if not get a random one */ if (!is_valid_ether_addr(mac_addr_temp)) { eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); + dev_warn(priv->dev, "using random MAC address %s\n", + format_mac_addr); } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { eth_hw_addr_set(netdev, mac_addr_temp); ether_addr_copy(netdev->perm_addr, mac_addr_temp); @@ -4997,8 +5029,10 @@ static void hns3_client_stop(struct hnae3_handle *handle) static void hns3_info_show(struct hns3_nic_priv *priv) { struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; - dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); + dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); @@ -5287,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) if (!ring->desc_cb[ring->next_to_use].reuse_flag) { ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); + hns3_ring_stats_update(ring, sw_err_cnt); /* if alloc new buffer fail, exit directly * and reclear in up flow. */ @@ -5531,8 +5563,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) return 0; } -static int hns3_reset_notify(struct hnae3_handle *handle, - enum hnae3_reset_notify_type type) +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) { int ret = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 1715c98d906d..2803b2cd7f30 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -621,6 +621,11 @@ static inline int ring_space(struct hns3_enet_ring *ring) (begin - end)) - 1; } +static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg) +{ + return readl_relaxed(ring->tqp->io_base + reg); +} + static inline u32 hns3_read_reg(void __iomem *base, u32 reg) { return readl(base + reg); @@ -655,6 +660,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hns3_buf_size(_ring) ((_ring)->buf_size) +#define hns3_ring_stats_update(ring, cnt) do { \ + typeof(ring) (tmp) = (ring); \ + u64_stats_update_begin(&(tmp)->syncp); \ + ((tmp)->stats.cnt)++; \ + u64_stats_update_end(&(tmp)->syncp); \ +} while (0) \ + static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) { #if (PAGE_SIZE < 8192) @@ -705,6 +717,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector, u32 ql_value); void hns3_request_update_promisc_mode(struct hnae3_handle *handle); +int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index c9b4568d7a8d..c06c39ece80d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -643,11 +643,13 @@ static u32 hns3_get_link(struct net_device *netdev) } static void hns3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int queue_num = h->kinfo.num_tqps; + int rx_queue_index = h->kinfo.num_tqps; if (hns3_nic_resetting(netdev)) { netdev_err(netdev, "dev resetting!"); @@ -658,7 +660,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->rx_max_pending = HNS3_RING_MAX_PENDING; param->tx_pending = priv->ring[0].desc_num; - param->rx_pending = priv->ring[queue_num].desc_num; + param->rx_pending = priv->ring[rx_queue_index].desc_num; + kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size; } static void hns3_get_pauseparam(struct net_device *netdev, @@ -1064,14 +1067,23 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) } static int hns3_check_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param) { +#define RX_BUF_LEN_2K 2048 +#define RX_BUF_LEN_4K 4096 if (hns3_nic_resetting(ndev)) return -EBUSY; if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; + if (kernel_param->rx_buf_len != RX_BUF_LEN_2K && + kernel_param->rx_buf_len != RX_BUF_LEN_4K) { + netdev_err(ndev, "Rx buf len only support 2048 and 4096\n"); + return -EINVAL; + } + if (param->tx_pending > HNS3_RING_MAX_PENDING || param->tx_pending < HNS3_RING_MIN_PENDING || param->rx_pending > HNS3_RING_MAX_PENDING || @@ -1084,8 +1096,26 @@ static int hns3_check_ringparam(struct net_device *ndev, return 0; } +static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.rx_buf_len = rx_buf_len; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + h->kinfo.tqp[i]->buf_size = rx_buf_len; + priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len; + } + + return 0; +} + static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kernel_param, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; @@ -1094,9 +1124,10 @@ static int hns3_set_ringparam(struct net_device *ndev, u32 old_tx_desc_num, new_tx_desc_num; u32 old_rx_desc_num, new_rx_desc_num; u16 queue_num = h->kinfo.num_tqps; + u32 old_rx_buf_len; int ret, i; - ret = hns3_check_ringparam(ndev, param); + ret = hns3_check_ringparam(ndev, param, kernel_param); if (ret) return ret; @@ -1105,8 +1136,10 @@ static int hns3_set_ringparam(struct net_device *ndev, new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); old_tx_desc_num = priv->ring[0].desc_num; old_rx_desc_num = priv->ring[queue_num].desc_num; + old_rx_buf_len = priv->ring[queue_num].buf_size; if (old_tx_desc_num == new_tx_desc_num && - old_rx_desc_num == new_rx_desc_num) + old_rx_desc_num == new_rx_desc_num && + kernel_param->rx_buf_len == old_rx_buf_len) return 0; tmp_rings = hns3_backup_ringparam(priv); @@ -1117,19 +1150,22 @@ static int hns3_set_ringparam(struct net_device *ndev, } netdev_info(ndev, - "Changing Tx/Rx ring depth from %u/%u to %u/%u\n", + "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %d to %d\n", old_tx_desc_num, old_rx_desc_num, - new_tx_desc_num, new_rx_desc_num); + new_tx_desc_num, new_rx_desc_num, + old_rx_buf_len, kernel_param->rx_buf_len); if (if_running) ndev->netdev_ops->ndo_stop(ndev); hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num); + hns3_change_rx_buf_len(ndev, kernel_param->rx_buf_len); ret = hns3_init_all_ring(priv); if (ret) { - netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n", + netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n", ret); + hns3_change_rx_buf_len(ndev, old_rx_buf_len); hns3_change_all_ring_bd_num(priv, old_tx_desc_num, old_rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) @@ -1699,6 +1735,7 @@ static int hns3_get_tunable(struct net_device *netdev, void *data) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; int ret = 0; switch (tuna->id) { @@ -1709,6 +1746,9 @@ static int hns3_get_tunable(struct net_device *netdev, case ETHTOOL_RX_COPYBREAK: *(u32 *)data = priv->rx_copybreak; break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + *(u32 *)data = h->kinfo.tx_spare_buf_size; + break; default: ret = -EOPNOTSUPP; break; @@ -1717,11 +1757,43 @@ static int hns3_get_tunable(struct net_device *netdev, return ret; } +static int hns3_set_tx_spare_buf_size(struct net_device *netdev, + u32 data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + h->kinfo.tx_spare_buf_size = data; + + ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) + return ret; + + ret = hns3_reset_notify(h, HNAE3_UP_CLIENT); + if (ret) + hns3_reset_notify(h, HNAE3_UNINIT_CLIENT); + + return ret; +} + static int hns3_set_tunable(struct net_device *netdev, const struct ethtool_tunable *tuna, const void *data) { struct hns3_nic_priv *priv = netdev_priv(netdev); + u32 old_tx_spare_buf_size, new_tx_spare_buf_size; struct hnae3_handle *h = priv->ae_handle; int i, ret = 0; @@ -1740,6 +1812,26 @@ static int hns3_set_tunable(struct net_device *netdev, priv->ring[i].rx_copybreak = priv->rx_copybreak; break; + case ETHTOOL_TX_COPYBREAK_BUF_SIZE: + old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size; + new_tx_spare_buf_size = *(u32 *)data; + ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size); + if (ret) { + int ret1; + + netdev_warn(netdev, + "change tx spare buf size fail, revert to old value\n"); + ret1 = hns3_set_tx_spare_buf_size(netdev, + old_tx_spare_buf_size); + if (ret1) { + netdev_err(netdev, + "revert to old tx spare buf size fail\n"); + return ret1; + } + + return ret; + } + break; default: ret = -EOPNOTSUPP; break; @@ -1755,6 +1847,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_MAX_FRAMES | \ ETHTOOL_COALESCE_USE_CQE) +#define HNS3_ETHTOOL_RING ETHTOOL_RING_USE_RX_BUF_LEN + static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { @@ -1833,6 +1927,7 @@ static int hns3_get_link_ext_state(struct net_device *netdev, static const struct ethtool_ops hns3vf_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, .set_ringparam = hns3_set_ringparam, @@ -1864,6 +1959,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { static const struct ethtool_ops hns3_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, + .supported_ring_params = HNS3_ETHTOOL_RING, .self_test = hns3_self_test, .get_drvinfo = hns3_get_drvinfo, .get_link = hns3_get_link, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 4e0a8c2f7c05..65168125c42e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -258,12 +258,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev, return 0; } +static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = { + {HCLGE_MAC_TX_EN_B, "mac_trans_en"}, + {HCLGE_MAC_RX_EN_B, "mac_rcv_en"}, + {HCLGE_MAC_PAD_TX_B, "pad_trans_en"}, + {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"}, + {HCLGE_MAC_1588_TX_B, "1588_trans_en"}, + {HCLGE_MAC_1588_RX_B, "1588_rcv_en"}, + {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"}, + {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"}, + {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"}, + {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"}, + {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"}, + {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"}, + {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"}, + {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"} +}; + static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, int len, int *pos) { struct hclge_config_mac_mode_cmd *req; struct hclge_desc desc; - u32 loop_en; + u32 loop_en, i, offset; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true); @@ -278,39 +295,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf, req = (struct hclge_config_mac_mode_cmd *)desc.data; loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); - *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B)); - *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_rx_oversize_truncate_en: %#x\n", - hnae3_get_bit(loop_en, - HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B)); - *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_tx_under_min_err_en: %#x\n", - hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B)); - *pos += scnprintf(buf + *pos, len - *pos, - "mac_tx_oversize_truncate_en: %#x\n", - hnae3_get_bit(loop_en, - HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B)); + for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) { + offset = hclge_dbg_mac_en_status[i].offset; + *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n", + hclge_dbg_mac_en_status[i].message, + hnae3_get_bit(loop_en, offset)); + } return 0; } @@ -1614,8 +1604,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len) return 0; } +static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = { + {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"}, + {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"}, + {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"}, + {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"}, + {HCLGE_GLOBAL_RESET_REG, "hardware reset status"}, + {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"}, + {HCLGE_FUN_RST_ING, "function reset status"} +}; + int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) { + u32 i, offset; int pos = 0; pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n", @@ -1634,22 +1635,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len) hdev->rst_stats.reset_cnt); pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt); - pos += scnprintf(buf + pos, len - pos, - "vector0 interrupt enable status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE)); - pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG)); - pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS)); - pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n", - hclge_read_dev(&hdev->hw, - HCLGE_RAS_PF_OTHER_INT_STS_REG)); - pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); - pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); - pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n", - hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); + + for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) { + offset = hclge_dbg_rst_info[i].offset; + pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n", + hclge_dbg_rst_info[i].message, + hclge_read_dev(&hdev->hw, offset)); + } + pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n", hdev->state); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h index c526591a7240..724052928b88 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -94,6 +94,11 @@ struct hclge_dbg_func { char *buf, int len); }; +struct hclge_dbg_status_dfx_info { + u32 offset; + char message[HCLGE_DBG_MAX_DFX_MSG_LEN]; +}; + static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { {false, "Reserved"}, {true, "BP_CPU_STATE"}, @@ -321,10 +326,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { {true, "IGU_RX_OUT_UDP0_PKT"}, {true, "IGU_RX_IN_UDP0_PKT"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, - {false, "Reserved"}, + {true, "IGU_MC_CAR_DROP_PKT_L"}, + {true, "IGU_MC_CAR_DROP_PKT_H"}, + {true, "IGU_BC_CAR_DROP_PKT_L"}, + {true, "IGU_BC_CAR_DROP_PKT_H"}, {false, "Reserved"}, {true, "IGU_RX_OVERSIZE_PKT_L"}, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c2a58101144e..6d68cc23f1c0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1613,12 +1613,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) hdev->num_rx_desc = HCLGE_MIN_RX_DESC; } +static void hclge_init_tc_config(struct hclge_dev *hdev) +{ + unsigned int i; + + if (hdev->tc_max > HNAE3_MAX_TC || + hdev->tc_max < 1) { + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", + hdev->tc_max); + hdev->tc_max = 1; + } + + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = 1; + + /* Currently not support uncontiuous tc */ + for (i = 0; i < hdev->tm_info.num_tc; i++) + hnae3_set_bit(hdev->hw_tc_map, i, 1); + + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); const struct cpumask *cpumask = cpu_online_mask; struct hclge_cfg cfg; - unsigned int i; int node, ret; ret = hclge_get_cfg(hdev, &cfg); @@ -1662,29 +1689,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); - if ((hdev->tc_max > HNAE3_MAX_TC) || - (hdev->tc_max < 1)) { - dev_warn(&hdev->pdev->dev, "TC num = %u.\n", - hdev->tc_max); - hdev->tc_max = 1; - } - - /* Dev does not support DCB */ - if (!hnae3_dev_dcb_supported(hdev)) { - hdev->tc_max = 1; - hdev->pfc_max = 0; - } else { - hdev->pfc_max = hdev->tc_max; - } - - hdev->tm_info.num_tc = 1; - - /* Currently not support uncontiuous tc */ - for (i = 0; i < hdev->tm_info.num_tc; i++) - hnae3_set_bit(hdev->hw_tc_map, i, 1); - - hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; - + hclge_init_tc_config(hdev); hclge_init_kdump_kernel_config(hdev); /* Set the affinity based on numa node */ @@ -2653,11 +2658,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed) return duplex; } +static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M}, + {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M}, + {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G}, + {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G}, + {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G}, + {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G}, + {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G}, + {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G}, + {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G}, +}; + +static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) { + if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) { + *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw; + return 0; + } + } + + return -EINVAL; +} + static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; + u32 speed_fw; int ret; req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; @@ -2667,48 +2699,14 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, if (duplex) hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1); - switch (speed) { - case HCLGE_MAC_SPEED_10M: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M); - break; - case HCLGE_MAC_SPEED_100M: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M); - break; - case HCLGE_MAC_SPEED_1G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G); - break; - case HCLGE_MAC_SPEED_10G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G); - break; - case HCLGE_MAC_SPEED_25G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G); - break; - case HCLGE_MAC_SPEED_40G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G); - break; - case HCLGE_MAC_SPEED_50G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G); - break; - case HCLGE_MAC_SPEED_100G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G); - break; - case HCLGE_MAC_SPEED_200G: - hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G); - break; - default: + ret = hclge_convert_to_fw_speed(speed, &speed_fw); + if (ret) { dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); - return -EINVAL; + return ret; } + hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S, + speed_fw); hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, 1); @@ -2933,16 +2931,20 @@ static int hclge_mac_init(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { + hdev->last_mbx_scheduled = jiffies; mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } } static void hclge_reset_task_schedule(struct hclge_dev *hdev) { if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && - !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) + !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { + hdev->last_rst_scheduled = jiffies; mod_delayed_work(hclge_wq, &hdev->service_task, 0); + } } static void hclge_errhand_task_schedule(struct hclge_dev *hdev) @@ -3831,6 +3833,13 @@ static void hclge_mailbox_service_task(struct hclge_dev *hdev) test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) return; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "mbx service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), + smp_processor_id()); + hclge_mbx_handler(hdev); clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); @@ -4480,6 +4489,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev) if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) return; + if (time_is_before_jiffies(hdev->last_rst_scheduled + + HCLGE_RESET_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "reset service task is scheduled after %ums on cpu%u!\n", + jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), + smp_processor_id()); + down(&hdev->reset_sem); set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); @@ -7161,6 +7177,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, } } +static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, + u16 location) +{ + struct hclge_fd_rule *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { + if (rule->location == location) + return rule; + else if (rule->location > location) + return NULL; + } + + return NULL; +} + +static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs, + struct hclge_fd_rule *rule) +{ + if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { + fs->ring_cookie = RX_CLS_FLOW_DISC; + } else { + u64 vf_id; + + fs->ring_cookie = rule->queue_id; + vf_id = rule->vf_id; + vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + fs->ring_cookie |= vf_id; + } +} + static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd) { @@ -7168,7 +7215,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, struct hclge_fd_rule *rule = NULL; struct hclge_dev *hdev = vport->back; struct ethtool_rx_flow_spec *fs; - struct hlist_node *node2; if (!hnae3_dev_fd_supported(hdev)) return -EOPNOTSUPP; @@ -7177,14 +7223,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, spin_lock_bh(&hdev->fd_rule_lock); - hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { - if (rule->location >= fs->location) - break; - } - - if (!rule || fs->location != rule->location) { + rule = hclge_get_fd_rule(hdev, fs->location); + if (!rule) { spin_unlock_bh(&hdev->fd_rule_lock); - return -ENOENT; } @@ -7222,16 +7263,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, hclge_fd_get_ext_info(fs, rule); - if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { - fs->ring_cookie = RX_CLS_FLOW_DISC; - } else { - u64 vf_id; - - fs->ring_cookie = rule->queue_id; - vf_id = rule->vf_id; - vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; - fs->ring_cookie |= vf_id; - } + hclge_fd_get_ring_cookie(fs, rule); spin_unlock_bh(&hdev->fd_rule_lock); @@ -8743,6 +8775,7 @@ int hclge_update_mac_list(struct hclge_vport *vport, enum HCLGE_MAC_ADDR_TYPE mac_type, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_node *mac_node; struct list_head *list; @@ -8767,9 +8800,10 @@ int hclge_update_mac_list(struct hclge_vport *vport, /* if this address is never added, unnecessary to delete */ if (state == HCLGE_MAC_TO_DEL) { spin_unlock_bh(&vport->mac_list_lock); + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "failed to delete address %pM from mac list\n", - addr); + "failed to delete address %s from mac list\n", + format_mac_addr); return -ENOENT; } @@ -8802,6 +8836,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle, int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc; @@ -8812,9 +8847,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", - addr, is_zero_ether_addr(addr), + "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n", + format_mac_addr, is_zero_ether_addr(addr), is_broadcast_ether_addr(addr), is_multicast_ether_addr(addr)); return -EINVAL; @@ -8871,6 +8907,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle, int hclge_rm_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; int ret; @@ -8879,8 +8916,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, if (is_zero_ether_addr(addr) || is_broadcast_ether_addr(addr) || is_multicast_ether_addr(addr)) { - dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n", - addr); + hnae3_format_mac_addr(format_mac_addr, addr); + dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -8911,6 +8949,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle, int hclge_add_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; @@ -8919,9 +8958,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_err(&hdev->pdev->dev, - "Add mc mac err! invalid mac:%pM.\n", - addr); + "Add mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } memset(&req, 0, sizeof(req)); @@ -8973,6 +9013,7 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle, int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; @@ -8980,9 +9021,10 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, /* mac addr check */ if (!is_multicast_ether_addr(addr)) { + hnae3_format_mac_addr(format_mac_addr, addr); dev_dbg(&hdev->pdev->dev, - "Remove mc mac err! invalid mac:%pM.\n", - addr); + "Remove mc mac err! invalid mac:%s.\n", + format_mac_addr); return -EINVAL; } @@ -9422,16 +9464,18 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, u8 *mac_addr) { struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; vport = hclge_get_vf_vport(hdev, vf); if (!vport) return -EINVAL; + hnae3_format_mac_addr(format_mac_addr, mac_addr); if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { dev_info(&hdev->pdev->dev, - "Specified MAC(=%pM) is same as before, no change committed!\n", - mac_addr); + "Specified MAC(=%s) is same as before, no change committed!\n", + format_mac_addr); return 0; } @@ -9439,13 +9483,13 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { dev_info(&hdev->pdev->dev, - "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", - vf, mac_addr); + "MAC of VF %d has been set to %s, and it will be reinitialized!\n", + vf, format_mac_addr); return hclge_inform_reset_assert_to_vf(vport); } - dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n", - vf, mac_addr); + dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n", + vf, format_mac_addr); return 0; } @@ -9549,6 +9593,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, { const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclge_dev *hdev = vport->back; unsigned char *old_addr = NULL; int ret; @@ -9557,9 +9602,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, if (is_zero_ether_addr(new_addr) || is_broadcast_ether_addr(new_addr) || is_multicast_ether_addr(new_addr)) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "change uc mac err! invalid mac: %pM.\n", - new_addr); + "change uc mac err! invalid mac: %s.\n", + format_mac_addr); return -EINVAL; } @@ -9577,9 +9623,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p, spin_lock_bh(&vport->mac_list_lock); ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr); if (ret) { + hnae3_format_mac_addr(format_mac_addr, new_addr); dev_err(&hdev->pdev->dev, - "failed to change the mac addr:%pM, ret = %d\n", - new_addr, ret); + "failed to change the mac addr:%s, ret = %d\n", + format_mac_addr, ret); spin_unlock_bh(&vport->mac_list_lock); if (!is_first) @@ -10168,67 +10215,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) return status; } -static int hclge_init_vlan_config(struct hclge_dev *hdev) +static int hclge_init_vlan_filter(struct hclge_dev *hdev) { -#define HCLGE_DEF_VLAN_TYPE 0x8100 - - struct hnae3_handle *handle = &hdev->vport[0].nic; struct hclge_vport *vport; int ret; int i; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - /* for revision 0x21, vf vlan filter is per function */ - for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - ret = hclge_set_vlan_filter_ctrl(hdev, - HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS, - true, - vport->vport_id); - if (ret) - return ret; - vport->cur_vlan_fltr_en = true; - } + if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_EGRESS_V1_B, + true, 0); - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, - 0); - if (ret) - return ret; - } else { + /* for revision 0x21, vf vlan filter is per function */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, - HCLGE_FILTER_FE_EGRESS_V1_B, - true, 0); + HCLGE_FILTER_FE_EGRESS, true, + vport->vport_id); if (ret) return ret; + vport->cur_vlan_fltr_en = true; } - hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; - hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, + HCLGE_FILTER_FE_INGRESS, true, 0); +} - ret = hclge_set_vlan_protocol_type(hdev); - if (ret) - return ret; +static int hclge_init_vlan_type(struct hclge_dev *hdev) +{ + hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; + hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; - for (i = 0; i < hdev->num_alloc_vport; i++) { - u16 vlan_tag; - u8 qos; + return hclge_set_vlan_protocol_type(hdev); +} +static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) +{ + struct hclge_port_base_vlan_config *cfg; + struct hclge_vport *vport; + int ret; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { vport = &hdev->vport[i]; - vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - qos = vport->port_base_vlan_cfg.vlan_info.qos; + cfg = &vport->port_base_vlan_cfg; - ret = hclge_vlan_offload_cfg(vport, - vport->port_base_vlan_cfg.state, - vlan_tag, qos); + ret = hclge_vlan_offload_cfg(vport, cfg->state, + cfg->vlan_info.vlan_tag, + cfg->vlan_info.qos); if (ret) return ret; } + return 0; +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport[0].nic; + int ret; + + ret = hclge_init_vlan_filter(hdev); + if (ret) + return ret; + + ret = hclge_init_vlan_type(hdev); + if (ret) + return ret; + + ret = hclge_init_vport_vlan_offload(hdev); + if (ret) + return ret; return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } @@ -10485,12 +10545,41 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, return false; } +static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), + vport->vport_id, new_info->vlan_tag, + false); + if (ret) + return ret; + + /* remove old VLAN tag */ + if (old_info->vlan_tag == 0) + ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, + true, 0); + else + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + old_info->vlan_tag, true); + if (ret) + dev_err(&hdev->pdev->dev, + "failed to clear vport%u port base vlan %u, ret = %d.\n", + vport->vport_id, old_info->vlan_tag, ret); + + return ret; +} + int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, struct hclge_vlan_info *vlan_info) { struct hnae3_handle *nic = &vport->nic; struct hclge_vlan_info *old_vlan_info; - struct hclge_dev *hdev = vport->back; int ret; old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; @@ -10503,38 +10592,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info)) goto out; - if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { - /* add new VLAN tag */ - ret = hclge_set_vlan_filter_hw(hdev, - htons(vlan_info->vlan_proto), - vport->vport_id, - vlan_info->vlan_tag, - false); - if (ret) - return ret; - - /* remove old VLAN tag */ - if (old_vlan_info->vlan_tag == 0) - ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, - true, 0); - else - ret = hclge_set_vlan_filter_hw(hdev, - htons(ETH_P_8021Q), - vport->vport_id, - old_vlan_info->vlan_tag, - true); - if (ret) { - dev_err(&hdev->pdev->dev, - "failed to clear vport%u port base vlan %u, ret = %d.\n", - vport->vport_id, old_vlan_info->vlan_tag, ret); - return ret; - } - - goto out; - } - - ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, - old_vlan_info); + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) + ret = hclge_modify_port_base_vlan_tag(vport, vlan_info, + old_vlan_info); + else + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); if (ret) return ret; @@ -11556,24 +11619,20 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev, int retry_cnt = 0; int ret; -retry: - down(&hdev->reset_sem); - set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = rst_type; - ret = hclge_reset_prepare(hdev); - if (ret || hdev->reset_pending) { - dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", - ret); - if (hdev->reset_pending || - retry_cnt++ < HCLGE_RESET_RETRY_CNT) { - dev_err(&hdev->pdev->dev, - "reset_pending:0x%lx, retry_cnt:%d\n", - hdev->reset_pending, retry_cnt); - clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); - msleep(HCLGE_RESET_RETRY_WAIT_MS); - goto retry; - } + while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclge_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGE_RESET_RETRY_WAIT_MS); } /* disable misc vector before reset done */ @@ -12288,19 +12347,42 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, *max_rss_size = hdev->pf_rss_size_max; } +static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; + struct hclge_dev *hdev = vport->back; + u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + unsigned int i; + + roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = vport->nic.kinfo.rss_size * i; + } + + return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); +} + static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, bool rxfh_configured) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hclge_vport *vport = hclge_get_vport(handle); struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; - u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; struct hclge_dev *hdev = vport->back; - u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; u16 cur_rss_size = kinfo->rss_size; u16 cur_tqps = kinfo->num_tqps; - u16 tc_valid[HCLGE_MAX_TC_NUM]; - u16 roundup_size; u32 *rss_indir; unsigned int i; int ret; @@ -12313,20 +12395,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, return ret; } - roundup_size = roundup_pow_of_two(kinfo->rss_size); - roundup_size = ilog2(roundup_size); - /* Set the RSS TC mode according to the new RSS size */ - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - tc_valid[i] = 0; - - if (!(hdev->hw_tc_map & BIT(i))) - continue; - - tc_valid[i] = 1; - tc_size[i] = roundup_size; - tc_offset[i] = kinfo->rss_size * i; - } - ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + ret = hclge_set_rss_tc_mode_cfg(handle); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index ebba603483a0..a51418fdbb24 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -955,6 +955,8 @@ struct hclge_dev { u16 hclge_fd_rule_num; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + unsigned long last_rst_scheduled; + unsigned long last_mbx_scheduled; unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; @@ -1093,6 +1095,11 @@ struct hclge_speed_bit_map { u32 speed_bit; }; +struct hclge_mac_speed_map { + u32 speed_drv; /* speed defined in driver */ + u32 speed_fw; /* speed defined in firmware */ +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 65d78ee4d65a..c495df2e5953 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -848,6 +848,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev) if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { resp_msg.status = ret; + if (time_is_before_jiffies(hdev->last_mbx_scheduled + + HCLGE_MBX_SCHED_TIMEOUT)) + dev_warn(&hdev->pdev->dev, + "resp vport%u mbx(%u,%u) late\n", + req->mbx_src_vfid, + req->msg.code, + req->msg.subcode); + hclge_gen_resp_to_vf(vport, req, &resp_msg); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 429652a8cde1..3edbfc8d17e8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -916,38 +916,63 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, return 0; } -static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; + u16 i, k; int ret; - u32 i, k; - if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - /* Cfg qs -> pri mapping, one by one mapping */ - for (k = 0; k < hdev->num_alloc_vport; k++) { - struct hnae3_knic_private_info *kinfo = - &vport[k].nic.kinfo; - - for (i = 0; i < kinfo->tc_info.num_tc; i++) { - ret = hclge_tm_qs_to_pri_map_cfg( - hdev, vport[k].qs_offset + i, i); - if (ret) - return ret; - } + /* Cfg qs -> pri mapping, one by one mapping */ + for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + for (i = 0; i < kinfo->tc_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + i); + if (ret) + return ret; } - } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { - /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ - for (k = 0; k < hdev->num_alloc_vport; k++) - for (i = 0; i < HNAE3_MAX_TC; i++) { - ret = hclge_tm_qs_to_pri_map_cfg( - hdev, vport[k].qs_offset + i, k); - if (ret) - return ret; - } - } else { - return -EINVAL; } + return 0; +} + +static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, k; + int ret; + + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, + vport[k].qs_offset + i, + k); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); + else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); + else + return -EINVAL; + + if (ret) + return ret; + /* Cfg q -> qs mapping */ for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_vport_q_to_qs_map(hdev, vport); @@ -1274,6 +1299,27 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) return 0; } +static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u16 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_qs_schd_mode_cfg(hdev, + vport[i].qs_offset + pri_id, + HCLGE_SCH_MODE_DWRR); + if (ret) + return ret; + } + + return 0; +} + static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -1304,21 +1350,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; - u8 i, k; + u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_tm_pri_schd_mode_cfg(hdev, i); + ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; - - for (k = 0; k < hdev->num_alloc_vport; k++) { - ret = hclge_tm_qs_schd_mode_cfg( - hdev, vport[k].qs_offset + i, - HCLGE_SCH_MODE_DWRR); - if (ret) - return ret; - } } } else { for (i = 0; i < hdev->num_alloc_vport; i++) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 41afaeea881b..0568cc31d391 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1514,15 +1514,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, struct list_head *list, enum HCLGEVF_MAC_ADDR_TYPE mac_type) { + char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; struct hclgevf_mac_addr_node *mac_node, *tmp; int ret; list_for_each_entry_safe(mac_node, tmp, list, node) { ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); if (ret) { + hnae3_format_mac_addr(format_mac_addr, + mac_node->mac_addr); dev_err(&hdev->pdev->dev, - "failed to configure mac %pM, state = %d, ret = %d\n", - mac_node->mac_addr, mac_node->state, ret); + "failed to configure mac %s, state = %d, ret = %d\n", + format_mac_addr, mac_node->state, ret); return; } if (mac_node->state == HCLGEVF_MAC_TO_ADD) { @@ -2163,24 +2166,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, int retry_cnt = 0; int ret; -retry: - down(&hdev->reset_sem); - set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - hdev->reset_type = rst_type; - ret = hclgevf_reset_prepare(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n", - ret); - if (hdev->reset_pending || - retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { - dev_err(&hdev->pdev->dev, - "reset_pending:0x%lx, retry_cnt:%d\n", - hdev->reset_pending, retry_cnt); - clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); - up(&hdev->reset_sem); - msleep(HCLGEVF_RESET_RETRY_WAIT_MS); - goto retry; - } + while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { + down(&hdev->reset_sem); + set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + hdev->reset_type = rst_type; + ret = hclgevf_reset_prepare(hdev); + if (!ret && !hdev->reset_pending) + break; + + dev_err(&hdev->pdev->dev, + "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", + ret, hdev->reset_pending, retry_cnt); + clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); + up(&hdev->reset_sem); + msleep(HCLGEVF_RESET_RETRY_WAIT_MS); } /* disable misc vector before reset done */ |