diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon/hns3')
22 files changed, 1374 insertions, 543 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index f8a87f8ca983..1b0313900f98 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -45,8 +45,9 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ + HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */ - HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ + HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */ }; @@ -71,7 +72,7 @@ enum hclge_mbx_vlan_cfg_subcode { }; #define HCLGE_MBX_MAX_MSG_SIZE 16 -#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8 +#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U #define HCLGE_MBX_RING_MAP_BASIC_MSG_NUM 3 #define HCLGE_MBX_RING_NODE_VARIABLE_NUM 3 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 03ca7d925e8e..eef1b2764d34 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -146,7 +146,7 @@ void hnae3_unregister_client(struct hnae3_client *client) return; mutex_lock(&hnae3_common_lock); - + /* one system should only have one client for every type */ list_for_each_entry(client_tmp, &hnae3_client_list, node) { if (client_tmp->type == client->type) { existed = true; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a0998937727d..3b5e2d7251e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -130,7 +130,6 @@ enum hnae3_module_type { HNAE3_MODULE_TYPE_CR = 0x04, HNAE3_MODULE_TYPE_KR = 0x05, HNAE3_MODULE_TYPE_TP = 0x06, - }; enum hnae3_fec_mode { @@ -366,6 +365,19 @@ struct hnae3_ae_dev { * Enable/disable HW GRO * add_arfs_entry * Check the 5-tuples of flow, and create flow director rule + * get_vf_config + * Get the VF configuration setting by the host + * set_vf_link_state + * Set VF link status + * set_vf_spoofchk + * Enable/disable spoof check for specified vf + * set_vf_trust + * Enable/disable trust for specified vf, if the vf being trusted, then + * it can enable promisc mode + * set_vf_rate + * Set the max tx rate of specified vf. + * set_vf_mac + * Configure the default MAC for specified VF */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -531,6 +543,16 @@ struct hnae3_ae_ops { int (*mac_connect_phy)(struct hnae3_handle *handle); void (*mac_disconnect_phy)(struct hnae3_handle *handle); void (*restore_vlan_table)(struct hnae3_handle *handle); + int (*get_vf_config)(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf); + int (*set_vf_link_state)(struct hnae3_handle *handle, int vf, + int link_state); + int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf, + bool enable); + int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable); + int (*set_vf_rate)(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force); + int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p); }; struct hnae3_dcb_ops { @@ -553,7 +575,8 @@ struct hnae3_ae_algo { const struct pci_device_id *pdev_id_table; }; -#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) +#define HNAE3_INT_NAME_EXT_LEN 32 /* Max extra information length */ +#define HNAE3_INT_NAME_LEN (IFNAMSIZ + HNAE3_INT_NAME_EXT_LEN) #define HNAE3_ITR_COUNTDOWN_START 100 struct hnae3_tc_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 28961a68e333..6b328a259efc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, const char *cmd_buf) { struct hns3_nic_priv *priv = h->priv; - struct hns3_nic_ring_data *ring_data; struct hns3_enet_ring *ring; u32 base_add_l, base_add_h; u32 queue_num, queue_max; u32 value, i = 0; int cnt; - if (!priv->ring_data) { - dev_err(&h->pdev->dev, "ring_data is NULL\n"); + if (!priv->ring) { + dev_err(&h->pdev->dev, "priv->ring is NULL\n"); return -EFAULT; } @@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, return -EINVAL; } - ring_data = priv->ring_data; for (i = queue_num; i < queue_max; i++) { /* Each cycle needs to determine whether the instance is reset, * to prevent reference to invalid memory. And need to ensure @@ -54,73 +52,73 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) return -EPERM; - ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; + ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)]; base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BASEADDR_H_REG); base_add_l = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BASEADDR_L_REG); - dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i, + dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i, base_add_h, base_add_l); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_NUM_REG); - dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_BD_LEN_REG); - dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); - dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); - dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_PKTNUM_RECORD_REG); - dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); + dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value); - ring = ring_data[i].ring; + ring = &priv->ring[i]; base_add_h = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BASEADDR_H_REG); base_add_l = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BASEADDR_L_REG); - dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i, + dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i, base_add_h, base_add_l); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_BD_NUM_REG); - dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); - dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); - dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_FBDNUM_REG); - dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_OFFSET_REG); - dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value); + dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG); - dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i, + dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i, value); } @@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, static int hns3_dbg_queue_map(struct hnae3_handle *h) { struct hns3_nic_priv *priv = h->priv; - struct hns3_nic_ring_data *ring_data; int i; if (!h->ae_algo->ops->get_global_queue_id) @@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) u16 global_qid; global_qid = h->ae_algo->ops->get_global_queue_id(h, i); - ring_data = &priv->ring_data[i]; - if (!ring_data || !ring_data->ring || - !ring_data->ring->tqp_vector) + if (!priv->ring || !priv->ring[i].tqp_vector) continue; dev_info(&h->pdev->dev, " %4d %4d %4d\n", - i, global_qid, - ring_data->ring->tqp_vector->vector_irq); + i, global_qid, priv->ring[i].tqp_vector->vector_irq); } return 0; @@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) { struct hns3_nic_priv *priv = h->priv; - struct hns3_nic_ring_data *ring_data; struct hns3_desc *rx_desc, *tx_desc; struct device *dev = &h->pdev->dev; struct hns3_enet_ring *ring; @@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) return -EINVAL; } - ring_data = priv->ring_data; - ring = ring_data[q_num].ring; + ring = &priv->ring[q_num]; value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); tx_index = (cnt == 1) ? value : tx_index; @@ -198,23 +190,26 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) addr = le64_to_cpu(tx_desc->addr); dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); dev_info(dev, "(TX)addr: %pad\n", &addr); - dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag); - dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size); + dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag)); + dev_info(dev, "(TX)send_size: %u\n", + le16_to_cpu(tx_desc->tx.send_size)); dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); - dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag); - dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv); + dev_info(dev, "(TX)vlan_tag: %u\n", + le16_to_cpu(tx_desc->tx.outer_vlan_tag)); + dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv)); dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec); dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); - dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen); - dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); - dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); + dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen)); + dev_info(dev, "(TX)vld_ra_ri: %u\n", + le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri)); + dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss)); - ring = ring_data[q_num + h->kinfo.num_tqps].ring; + ring = &priv->ring[q_num + h->kinfo.num_tqps]; value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); rx_index = (cnt == 1) ? value : tx_index; rx_desc = &ring->desc[rx_index]; @@ -222,15 +217,19 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) addr = le64_to_cpu(rx_desc->addr); dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); dev_info(dev, "(RX)addr: %pad\n", &addr); - dev_info(dev, "(RX)l234_info: %u\n", rx_desc->rx.l234_info); - dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len); - dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size); - dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash); - dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id); - dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag); - dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb); - dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag); - dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info); + dev_info(dev, "(RX)l234_info: %u\n", + le32_to_cpu(rx_desc->rx.l234_info)); + dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len)); + dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size)); + dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash)); + dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id)); + dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag)); + dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", + le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb)); + dev_info(dev, "(RX)ot_vlan_tag: %u\n", + le16_to_cpu(rx_desc->rx.ot_vlan_tag)); + dev_info(dev, "(RX)bd_base_info: %u\n", + le32_to_cpu(rx_desc->rx.bd_base_info)); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 616cad0faa21..ba0536802b13 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) for (i = 0; i < h->kinfo.num_tqps; i++) { dev_queue = netdev_get_tx_queue(ndev, - priv->ring_data[i].queue_index); + priv->ring[i].queue_index); netdev_tx_reset_queue(dev_queue); } } @@ -681,7 +681,7 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; ret = skb_cow_head(skb, 0); - if (unlikely(ret)) + if (unlikely(ret < 0)) return ret; l3.hdr = skb_network_header(skb); @@ -962,14 +962,6 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, return 0; } -static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) -{ - /* Config bd buffer end */ - if (!!frag_end) - hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, 1U); - hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1U); -} - static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, struct sk_buff *skb) { @@ -1062,7 +1054,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, skb_reset_mac_len(skb); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (unlikely(ret)) { + if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_l4_proto_err++; u64_stats_update_end(&ring->syncp); @@ -1072,7 +1064,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, &ol_type_vlan_len_msec); - if (unlikely(ret)) { + if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_l2l3l4_err++; u64_stats_update_end(&ring->syncp); @@ -1081,7 +1073,7 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, ret = hns3_set_tso(skb, &paylen, &mss, &type_cs_vlan_tso); - if (unlikely(ret)) { + if (unlikely(ret < 0)) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_tso_err++; u64_stats_update_end(&ring->syncp); @@ -1102,9 +1094,10 @@ static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - unsigned int size, int frag_end, - enum hns_desc_type type) + unsigned int size, enum hns_desc_type type) { +#define HNS3_LIKELY_BD_NUM 1 + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); @@ -1118,7 +1111,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int ret; ret = hns3_fill_skb_desc(ring, skb, desc); - if (unlikely(ret)) + if (unlikely(ret < 0)) return ret; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); @@ -1137,19 +1130,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc_cb->length = size; if (likely(size <= HNS3_MAX_BD_SIZE)) { - u16 bdtp_fe_sc_vld_ra_ri = 0; - desc_cb->priv = priv; desc_cb->dma = dma; desc_cb->type = type; desc->addr = cpu_to_le64(dma); desc->tx.send_size = cpu_to_le16(size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); desc->tx.bdtp_fe_sc_vld_ra_ri = - cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); ring_ptr_move_fw(ring, next_to_use); - return 0; + return HNS3_LIKELY_BD_NUM; } frag_buf_num = hns3_tx_bd_count(size); @@ -1158,8 +1148,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { - u16 bdtp_fe_sc_vld_ra_ri = 0; - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; @@ -1170,11 +1158,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, - frag_end && (k == frag_buf_num - 1) ? - 1 : 0); desc->tx.bdtp_fe_sc_vld_ra_ri = - cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + cpu_to_le16(BIT(HNS3_TXD_VLD_B)); /* move ring pointer to next */ ring_ptr_move_fw(ring, next_to_use); @@ -1183,23 +1168,78 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, desc = &ring->desc[ring->next_to_use]; } - return 0; + return frag_buf_num; } -static unsigned int hns3_nic_bd_num(struct sk_buff *skb) +static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num) { - unsigned int bd_num; + unsigned int size; int i; - /* if the total len is within the max bd limit */ - if (likely(skb->len <= HNS3_MAX_BD_SIZE)) - return skb_shinfo(skb)->nr_frags + 1; + size = skb_headlen(skb); + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; - bd_num = hns3_tx_bd_count(skb_headlen(skb)); + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + if (size) { + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - bd_num += hns3_tx_bd_count(skb_frag_size(frag)); + size = skb_frag_size(frag); + if (!size) + continue; + + while (size > HNS3_MAX_BD_SIZE) { + bd_size[bd_num++] = HNS3_MAX_BD_SIZE; + size -= HNS3_MAX_BD_SIZE; + + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + bd_size[bd_num++] = size; + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + } + + return bd_num; +} + +static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size) +{ + struct sk_buff *frag_skb; + unsigned int bd_num = 0; + + /* If the total len is within the max bd limit */ + if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) && + skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM)) + return skb_shinfo(skb)->nr_frags + 1U; + + /* The below case will always be linearized, return + * HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized. + */ + if (unlikely(skb->len > HNS3_MAX_TSO_SIZE || + (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE))) + return HNS3_MAX_TSO_BD_NUM + 1U; + + bd_num = hns3_skb_bd_num(skb, bd_size, bd_num); + + if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; + + skb_walk_frags(skb, frag_skb) { + bd_num = hns3_skb_bd_num(frag_skb, bd_size, bd_num); + if (bd_num > HNS3_MAX_TSO_BD_NUM) + return bd_num; } return bd_num; @@ -1218,26 +1258,26 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) * 7 frags to to be larger than gso header len + mss, and the remaining * continuous 7 frags to be larger than MSS except the last 7 frags. */ -static bool hns3_skb_need_linearized(struct sk_buff *skb) +static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size, + unsigned int bd_num) { - int bd_limit = HNS3_MAX_BD_NUM_NORMAL - 1; unsigned int tot_len = 0; int i; - for (i = 0; i < bd_limit; i++) - tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); + for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++) + tot_len += bd_size[i]; - /* ensure headlen + the first 7 frags is greater than mss + header - * and the first 7 frags is greater than mss. - */ - if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size + - hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size)) + /* ensure the first 8 frags is greater than mss + header */ + if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] < + skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) return true; - /* ensure the remaining continuous 7 buffer is greater than mss */ - for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) { - tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]); - tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]); + /* ensure every continuous 7 buffer is greater than mss + * except the last one. + */ + for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) { + tot_len -= bd_size[i]; + tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U]; if (tot_len < skb_shinfo(skb)->gso_size) return true; @@ -1249,15 +1289,16 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb) static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct sk_buff **out_skb) { + unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U]; struct sk_buff *skb = *out_skb; unsigned int bd_num; - bd_num = hns3_nic_bd_num(skb); - if (unlikely(bd_num > HNS3_MAX_BD_NUM_NORMAL)) { + bd_num = hns3_tx_bd_num(skb, bd_size); + if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) { struct sk_buff *new_skb; - if (skb_is_gso(skb) && bd_num <= HNS3_MAX_BD_NUM_TSO && - !hns3_skb_need_linearized(skb)) + if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) && + !hns3_skb_need_linearized(skb, bd_size, bd_num)) goto out; /* manual split the send packet */ @@ -1267,9 +1308,10 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, dev_kfree_skb_any(skb); *out_skb = new_skb; - bd_num = hns3_nic_bd_num(new_skb); - if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_TSO) || - (!skb_is_gso(new_skb) && bd_num > HNS3_MAX_BD_NUM_NORMAL)) + bd_num = hns3_tx_bd_count(new_skb->len); + if ((skb_is_gso(new_skb) && bd_num > HNS3_MAX_TSO_BD_NUM) || + (!skb_is_gso(new_skb) && + bd_num > HNS3_MAX_NON_TSO_BD_NUM)) return -ENOMEM; u64_stats_update_begin(&ring->syncp); @@ -1314,73 +1356,98 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) } } +static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, + struct sk_buff *skb, enum hns_desc_type type) +{ + unsigned int size = skb_headlen(skb); + int i, ret, bd_num = 0; + + if (size) { + ret = hns3_fill_desc(ring, skb, size, type); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + size = skb_frag_size(frag); + if (!size) + continue; + + ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE); + if (unlikely(ret < 0)) + return ret; + + bd_num += ret; + } + + return bd_num; +} + netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_nic_ring_data *ring_data = - &tx_ring_data(priv, skb->queue_mapping); - struct hns3_enet_ring *ring = ring_data->ring; + struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct netdev_queue *dev_queue; - skb_frag_t *frag; - int next_to_use_head; - int buf_num; - int seg_num; - int size; + int pre_ntu, next_to_use_head; + struct sk_buff *frag_skb; + int bd_num = 0; int ret; - int i; /* Prefetch the data used later */ prefetch(skb->data); - buf_num = hns3_nic_maybe_stop_tx(ring, &skb); - if (unlikely(buf_num <= 0)) { - if (buf_num == -EBUSY) { + ret = hns3_nic_maybe_stop_tx(ring, &skb); + if (unlikely(ret <= 0)) { + if (ret == -EBUSY) { u64_stats_update_begin(&ring->syncp); ring->stats.tx_busy++; u64_stats_update_end(&ring->syncp); goto out_net_tx_busy; - } else if (buf_num == -ENOMEM) { + } else if (ret == -ENOMEM) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; u64_stats_update_end(&ring->syncp); } - hns3_rl_err(netdev, "xmit error: %d!\n", buf_num); + hns3_rl_err(netdev, "xmit error: %d!\n", ret); goto out_err_tx_ok; } - /* No. of segments (plus a header) */ - seg_num = skb_shinfo(skb)->nr_frags + 1; - /* Fill the first part */ - size = skb_headlen(skb); - next_to_use_head = ring->next_to_use; - ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (unlikely(ret)) + ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); + if (unlikely(ret < 0)) goto fill_err; - /* Fill the fragments */ - for (i = 1; i < seg_num; i++) { - frag = &skb_shinfo(skb)->frags[i - 1]; - size = skb_frag_size(frag); + bd_num += ret; - ret = hns3_fill_desc(ring, frag, size, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); + if (!skb_has_frag_list(skb)) + goto out; - if (unlikely(ret)) + skb_walk_frags(skb, frag_skb) { + ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE); + if (unlikely(ret < 0)) goto fill_err; + + bd_num += ret; } +out: + pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : + (ring->desc_num - 1); + ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= + cpu_to_le16(BIT(HNS3_TXD_FE_B)); /* Complete translate all packets */ - dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); netdev_tx_sent_queue(dev_queue, skb->len); wmb(); /* Commit all data before submit */ - hnae3_queue_xmit(ring->tqp, buf_num); + hnae3_queue_xmit(ring->tqp, bd_num); return NETDEV_TX_OK; @@ -1392,7 +1459,7 @@ out_err_tx_ok: return NETDEV_TX_OK; out_net_tx_busy: - netif_stop_subqueue(netdev, ring_data->queue_index); + netif_stop_subqueue(netdev, ring->queue_index); smp_mb(); /* Commit all data before submit */ return NETDEV_TX_BUSY; @@ -1413,6 +1480,16 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) return 0; } + /* For VF device, if there is a perm_addr, then the user will not + * be allowed to change the address. + */ + if (!hns3_is_phys_func(h->pdev) && + !is_zero_ether_addr(netdev->perm_addr)) { + netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n", + netdev->perm_addr, mac_addr->sa_data); + return -EPERM; + } + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); if (ret) { netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); @@ -1505,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev, for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ - ring = priv->ring_data[idx].ring; + ring = &priv->ring[idx]; do { start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; @@ -1523,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev, } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ - ring = priv->ring_data[idx + queue_num].ring; + ring = &priv->ring[idx + queue_num]; do { start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; @@ -1633,8 +1710,8 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, int ret = -EIO; netif_dbg(h, drv, netdev, - "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=%u\n", - vf, vlan, qos, vlan_proto); + "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n", + vf, vlan, qos, ntohs(vlan_proto)); if (h->ae_algo->ops->set_vf_vlan_filter) ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, @@ -1643,6 +1720,29 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, return ret; } +static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (hns3_nic_resetting(netdev)) + return -EBUSY; + + if (!handle->ae_algo->ops->set_vf_spoofchk) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); +} + +static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable) +{ + struct hnae3_handle *handle = hns3_get_handle(netdev); + + if (!handle->ae_algo->ops->set_vf_trust) + return -EOPNOTSUPP; + + return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); +} + static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -1671,7 +1771,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = hns3_get_handle(ndev); - struct hns3_enet_ring *tx_ring = NULL; + struct hns3_enet_ring *tx_ring; struct napi_struct *napi; int timeout_queue = 0; int hw_head, hw_tail; @@ -1692,6 +1792,9 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) time_after(jiffies, (trans_start + ndev->watchdog_timeo))) { timeout_queue = i; + netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n", + q->state, + jiffies_to_msecs(jiffies - trans_start)); break; } } @@ -1705,7 +1808,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) priv->tx_timeout_count++; - tx_ring = priv->ring_data[timeout_queue].ring; + tx_ring = &priv->ring[timeout_queue]; napi = &tx_ring->tqp_vector->napi; netdev_info(ndev, @@ -1805,6 +1908,57 @@ static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, } #endif +static int hns3_nic_get_vf_config(struct net_device *ndev, int vf, + struct ifla_vf_info *ivf) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->get_vf_config) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_vf_config(h, vf, ivf); +} + +static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf, + int link_state) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->set_vf_link_state) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); +} + +static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf, + int min_tx_rate, int max_tx_rate) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (!h->ae_algo->ops->set_vf_rate) + return -EOPNOTSUPP; + + return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, + false); +} + +static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo->ops->set_vf_mac) + return -EOPNOTSUPP; + + if (is_multicast_ether_addr(mac)) { + netdev_err(netdev, + "Invalid MAC:%pM specified. Could not set MAC\n", + mac); + return -EINVAL; + } + + return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); +} + static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, @@ -1820,10 +1974,15 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, + .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk, + .ndo_set_vf_trust = hns3_set_vf_trust, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = hns3_rx_flow_steer, #endif - + .ndo_get_vf_config = hns3_nic_get_vf_config, + .ndo_set_vf_link_state = hns3_nic_set_vf_link_state, + .ndo_set_vf_rate = hns3_nic_set_vf_rate, + .ndo_set_vf_mac = hns3_nic_set_vf_mac, }; bool hns3_is_phys_func(struct pci_dev *pdev) @@ -1843,7 +2002,7 @@ bool hns3_is_phys_func(struct pci_dev *pdev) case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF: return false; default: - dev_warn(&pdev->dev, "un-recognized pci device-id %d", + dev_warn(&pdev->dev, "un-recognized pci device-id %u", dev_id); } @@ -2069,9 +2228,8 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; - - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; @@ -2081,21 +2239,24 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC | + NETIF_F_FRAGLIST; if (pdev->revision >= 0x21) { netdev->hw_features |= NETIF_F_GRO_HW; @@ -2320,18 +2481,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) void hns3_clean_tx_ring(struct hns3_enet_ring *ring) { - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct net_device *netdev = ring_to_netdev(ring); struct hns3_nic_priv *priv = netdev_priv(netdev); struct netdev_queue *dev_queue; int bytes, pkts; int head; head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) return; /* no data to poll */ + rmb(); /* Make sure head is ready before touch any data */ + if (unlikely(!is_valid_clean_head(ring, head))) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, ring->next_to_use, ring->next_to_clean); @@ -2358,7 +2520,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) netdev_tx_completed_queue(dev_queue, pkts, bytes); if (unlikely(pkts && netif_carrier_ok(netdev) && - (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ @@ -2401,7 +2563,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ring->stats.sw_err_cnt++; u64_stats_update_end(&ring->syncp); - hns3_rl_err(ring->tqp_vector->napi.dev, + hns3_rl_err(ring_to_netdev(ring), "alloc rx buffer failed: %d\n", ret); break; @@ -2510,7 +2672,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 l234info, u32 bd_base_info, u32 ol_info) { - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct net_device *netdev = ring_to_netdev(ring); int l3_type, l4_type; int ol4_type; @@ -2626,7 +2788,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, { #define HNS3_NEED_ADD_FRAG 1 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct net_device *netdev = ring_to_netdev(ring); struct sk_buff *skb; ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); @@ -2672,10 +2834,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, } static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, - struct sk_buff **out_skb, bool pending) + bool pending) { - struct sk_buff *skb = *out_skb; - struct sk_buff *head_skb = *out_skb; + struct sk_buff *skb = ring->skb; + struct sk_buff *head_skb = skb; struct sk_buff *new_skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *pre_desc; @@ -2704,10 +2866,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, return -ENXIO; if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { - new_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); if (unlikely(!new_skb)) { - hns3_rl_err(ring->tqp_vector->napi.dev, + hns3_rl_err(ring_to_netdev(ring), "alloc rx fraglist skb fail\n"); return -ENXIO; } @@ -2783,7 +2944,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct net_device *netdev = ring_to_netdev(ring); enum hns3_pkt_l2t_type l2_frame_type; u32 bd_base_info, l234info, ol_info; struct hns3_desc *desc; @@ -2858,8 +3019,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) return 0; } -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb) +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) { struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; @@ -2897,12 +3057,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, if (!skb) { ret = hns3_alloc_skb(ring, length, ring->va); - *out_skb = skb = ring->skb; + skb = ring->skb; if (ret < 0) /* alloc buffer fail */ return ret; if (ret > 0) { /* need add frag */ - ret = hns3_add_frag(ring, desc, &skb, false); + ret = hns3_add_frag(ring, desc, false); if (ret) return ret; @@ -2913,7 +3073,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ALIGN(ring->pull_len, sizeof(long))); } } else { - ret = hns3_add_frag(ring, desc, &skb, true); + ret = hns3_add_frag(ring, desc, true); if (ret) return ret; @@ -2931,8 +3091,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, } skb_record_rx_queue(skb, ring->tqp->tqp_index); - *out_skb = skb; - return 0; } @@ -2941,17 +3099,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = ring->skb; int recv_pkts = 0; int recv_bds = 0; int err, num; num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - rmb(); /* Make sure num taken effect before the other data is touched */ - num -= unused_count; unused_count -= ring->pending_buf; + if (num <= 0) + goto out; + + rmb(); /* Make sure num taken effect before the other data is touched */ + while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { @@ -2961,27 +3121,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, } /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb); - if (unlikely(!skb)) /* This fault cannot be repaired */ - goto out; - - if (err == -ENXIO) { /* Do not get FE for the packet */ + err = hns3_handle_rx_bd(ring); + /* Do not get FE for the packet or failed to alloc skb */ + if (unlikely(!ring->skb || err == -ENXIO)) { goto out; - } else if (unlikely(err)) { /* Do jump the err */ - recv_bds += ring->pending_buf; - unused_count += ring->pending_buf; - ring->skb = NULL; - ring->pending_buf = 0; - continue; + } else if (likely(!err)) { + rx_fn(ring, ring->skb); + recv_pkts++; } - rx_fn(ring, skb); recv_bds += ring->pending_buf; unused_count += ring->pending_buf; ring->skb = NULL; ring->pending_buf = 0; - - recv_pkts++; } out: @@ -3324,13 +3476,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) tqp_vector = &priv->tqp_vector[vector_i]; hns3_add_ring_to_group(&tqp_vector->tx_group, - priv->ring_data[i].ring); + &priv->ring[i]); hns3_add_ring_to_group(&tqp_vector->rx_group, - priv->ring_data[i + tqp_num].ring); + &priv->ring[i + tqp_num]); - priv->ring_data[i].ring->tqp_vector = tqp_vector; - priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + priv->ring[i].tqp_vector = tqp_vector; + priv->ring[i + tqp_num].tqp_vector = tqp_vector; tqp_vector->num_tqps++; } @@ -3474,28 +3626,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) return 0; } -static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, - unsigned int ring_type) +static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + unsigned int ring_type) { - struct hns3_nic_ring_data *ring_data = priv->ring_data; int queue_num = priv->ae_handle->kinfo.num_tqps; - struct pci_dev *pdev = priv->ae_handle->pdev; struct hns3_enet_ring *ring; int desc_num; - ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); - if (!ring) - return -ENOMEM; - if (ring_type == HNAE3_RING_TYPE_TX) { + ring = &priv->ring[q->tqp_index]; desc_num = priv->ae_handle->kinfo.num_tx_desc; - ring_data[q->tqp_index].ring = ring; - ring_data[q->tqp_index].queue_index = q->tqp_index; + ring->queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { + ring = &priv->ring[q->tqp_index + queue_num]; desc_num = priv->ae_handle->kinfo.num_rx_desc; - ring_data[q->tqp_index + queue_num].ring = ring; - ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; + ring->queue_index = q->tqp_index; ring->io_base = q->io_base; } @@ -3510,76 +3656,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ring->desc_num = desc_num; ring->next_to_use = 0; ring->next_to_clean = 0; - - return 0; } -static int hns3_queue_to_ring(struct hnae3_queue *tqp, - struct hns3_nic_priv *priv) +static void hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) { - int ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); - if (ret) - return ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); - if (ret) { - devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring); - return ret; - } - - return 0; + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); } static int hns3_get_ring_config(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; struct pci_dev *pdev = h->pdev; - int i, ret; + int i; - priv->ring_data = devm_kzalloc(&pdev->dev, - array3_size(h->kinfo.num_tqps, - sizeof(*priv->ring_data), - 2), - GFP_KERNEL); - if (!priv->ring_data) + priv->ring = devm_kzalloc(&pdev->dev, + array3_size(h->kinfo.num_tqps, + sizeof(*priv->ring), 2), + GFP_KERNEL); + if (!priv->ring) return -ENOMEM; - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); - if (ret) - goto err; - } + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_queue_to_ring(h->kinfo.tqp[i], priv); return 0; -err: - while (i--) { - devm_kfree(priv->dev, priv->ring_data[i].ring); - devm_kfree(priv->dev, - priv->ring_data[i + h->kinfo.num_tqps].ring); - } - - devm_kfree(&pdev->dev, priv->ring_data); - priv->ring_data = NULL; - return ret; } static void hns3_put_ring_config(struct hns3_nic_priv *priv) { - struct hnae3_handle *h = priv->ae_handle; - int i; - - if (!priv->ring_data) + if (!priv->ring) return; - for (i = 0; i < h->kinfo.num_tqps; i++) { - devm_kfree(priv->dev, priv->ring_data[i].ring); - devm_kfree(priv->dev, - priv->ring_data[i + h->kinfo.num_tqps].ring); - } - devm_kfree(priv->dev, priv->ring_data); - priv->ring_data = NULL; + devm_kfree(priv->dev, priv->ring); + priv->ring = NULL; } static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) @@ -3696,7 +3807,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) for (j = 0; j < tc_info->tqp_count; j++) { struct hnae3_queue *q; - q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; + q = priv->ring[tc_info->tqp_offset + j].tqp; hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, tc_info->tc); } @@ -3711,21 +3822,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int ret; for (i = 0; i < ring_num; i++) { - ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { dev_err(priv->dev, "Alloc ring memory fail! ret=%d\n", ret); goto out_when_alloc_ring_memory; } - u64_stats_init(&priv->ring_data[i].ring->syncp); + u64_stats_init(&priv->ring[i].syncp); } return 0; out_when_alloc_ring_memory: for (j = i - 1; j >= 0; j--) - hns3_fini_ring(priv->ring_data[j].ring); + hns3_fini_ring(&priv->ring[j]); return -ENOMEM; } @@ -3736,30 +3847,31 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) int i; for (i = 0; i < h->kinfo.num_tqps; i++) { - hns3_fini_ring(priv->ring_data[i].ring); - hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + hns3_fini_ring(&priv->ring[i]); + hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); } return 0; } /* Set mac addr if it is configured. or leave it to the AE driver */ -static int hns3_init_mac_addr(struct net_device *netdev, bool init) +static int hns3_init_mac_addr(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; u8 mac_addr_temp[ETH_ALEN]; int ret = 0; - if (h->ae_algo->ops->get_mac_addr && init) { + if (h->ae_algo->ops->get_mac_addr) h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); - ether_addr_copy(netdev->dev_addr, mac_addr_temp); - } /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(netdev->dev_addr)) { + if (!is_valid_ether_addr(mac_addr_temp)) { eth_hw_addr_random(netdev); dev_warn(priv->dev, "using random MAC address %pM\n", netdev->dev_addr); + } else { + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + ether_addr_copy(netdev->perm_addr, mac_addr_temp); } if (h->ae_algo->ops->set_mac_addr) @@ -3827,14 +3939,14 @@ static void hns3_info_show(struct hns3_nic_priv *priv) struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); - dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); - dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); - dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); - dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); - dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); - dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); - dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); - dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); + dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc); + dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } static int hns3_client_init(struct hnae3_handle *handle) @@ -3863,7 +3975,7 @@ static int hns3_client_init(struct hnae3_handle *handle) handle->kinfo.netdev = netdev; handle->priv = (void *)priv; - hns3_init_mac_addr(netdev, true); + hns3_init_mac_addr(netdev); hns3_set_default_feature(netdev); @@ -3897,7 +4009,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ret = hns3_init_all_ring(priv); if (ret) { ret = -ENOMEM; - goto out_init_ring_data; + goto out_init_ring; } ret = hns3_init_phy(netdev); @@ -3936,12 +4048,12 @@ out_reg_netdev_fail: hns3_uninit_phy(netdev); out_init_phy: hns3_uninit_all_ring(priv); -out_init_ring_data: +out_init_ring: hns3_nic_uninit_vector_data(priv); out_init_vector_data: hns3_nic_dealloc_vector_data(priv); out_alloc_vector_data: - priv->ring_data = NULL; + priv->ring = NULL; out_get_ring_cfg: priv->ae_handle = NULL; free_netdev(netdev); @@ -4102,7 +4214,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) /* if alloc new buffer fail, exit directly * and reclear in up flow. */ - netdev_warn(ring->tqp->handle->kinfo.netdev, + netdev_warn(ring_to_netdev(ring), "reserve buffer map failed, ret = %d\n", ret); return ret; @@ -4148,10 +4260,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) for (i = 0; i < h->kinfo.num_tqps; i++) { struct hns3_enet_ring *ring; - ring = priv->ring_data[i].ring; + ring = &priv->ring[i]; hns3_clear_tx_ring(ring); - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + ring = &priv->ring[i + h->kinfo.num_tqps]; /* Continue to clear other rings even if clearing some * rings failed. */ @@ -4175,16 +4287,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) if (ret) return ret; - hns3_init_ring_hw(priv->ring_data[i].ring); + hns3_init_ring_hw(&priv->ring[i]); /* We need to clear tx ring here because self test will * use the ring and will not run down before up */ - hns3_clear_tx_ring(priv->ring_data[i].ring); - priv->ring_data[i].ring->next_to_clean = 0; - priv->ring_data[i].ring->next_to_use = 0; + hns3_clear_tx_ring(&priv->ring[i]); + priv->ring[i].next_to_clean = 0; + priv->ring[i].next_to_use = 0; - rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + rx_ring = &priv->ring[i + h->kinfo.num_tqps]; hns3_init_ring_hw(rx_ring); ret = hns3_clear_rx_ring(rx_ring); if (ret) @@ -4331,7 +4443,7 @@ static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle) bool vlan_filter_enable; int ret; - ret = hns3_init_mac_addr(netdev, false); + ret = hns3_init_mac_addr(netdev); if (ret) return ret; @@ -4454,7 +4566,7 @@ int hns3_set_channels(struct net_device *netdev, if (new_tqp_num > hns3_get_max_available_channels(h) || new_tqp_num < 1) { dev_err(&netdev->dev, - "Change tqps fail, the tqp range is from 1 to %d", + "Change tqps fail, the tqp range is from 1 to %u", hns3_get_max_available_channels(h)); return -EINVAL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 5d468ed404a6..9d47abd5c37c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -76,7 +76,7 @@ enum hns3_nic_state { #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 #define HNS3_RING_MAX_PENDING 32760 -#define HNS3_RING_MIN_PENDING 24 +#define HNS3_RING_MIN_PENDING 72 #define HNS3_RING_BD_MULTIPLE 8 /* max frame size of mac */ #define HNS3_MAC_MAX_FRAME 9728 @@ -186,7 +186,7 @@ enum hns3_nic_state { #define HNS3_TXD_MSS_S 0 #define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) -#define HNS3_TX_LAST_SIZE_M 0xffff +#define HNS3_TX_LAST_SIZE_M 0xffff #define HNS3_VECTOR_TX_IRQ BIT_ULL(0) #define HNS3_VECTOR_RX_IRQ BIT_ULL(1) @@ -195,9 +195,13 @@ enum hns3_nic_state { #define HNS3_VECTOR_INITED 1 #define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_NUM_NORMAL 8 -#define HNS3_MAX_BD_NUM_TSO 63 -#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS +#define HNS3_MAX_NON_TSO_BD_NUM 8U +#define HNS3_MAX_TSO_BD_NUM 63U +#define HNS3_MAX_TSO_SIZE \ + (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM) + +#define HNS3_MAX_NON_TSO_SIZE \ + (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM) #define HNS3_VECTOR_GL0_OFFSET 0x100 #define HNS3_VECTOR_GL1_OFFSET 0x200 @@ -309,7 +313,7 @@ struct hns3_desc_cb { u16 reuse_flag; - /* desc type, used by the ring user to mark the type of the priv data */ + /* desc type, used by the ring user to mark the type of the priv data */ u16 type; }; @@ -405,6 +409,7 @@ struct hns3_enet_ring { struct hns3_enet_ring *next; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_queue *tqp; + int queue_index; struct device *dev; /* will be used for DMA mapping of descriptors */ /* statistic */ @@ -430,18 +435,7 @@ struct hns3_enet_ring { int pending_buf; struct sk_buff *skb; struct sk_buff *tail_skb; -}; - -struct hns_queue; - -struct hns3_nic_ring_data { - struct hns3_enet_ring *ring; - struct napi_struct napi; - int queue_index; - int (*poll_one)(struct hns3_nic_ring_data *, int, void *); - void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); - void (*fini_process)(struct hns3_nic_ring_data *); -}; +} ____cacheline_internodealigned_in_smp; enum hns3_flow_level_range { HNS3_FLOW_LOW = 0, @@ -518,7 +512,7 @@ struct hns3_nic_priv { * the cb for nic to manage the ring buffer, the first half of the * array is for tx_ring and vice versa for the second half */ - struct hns3_nic_ring_data *ring_data; + struct hns3_enet_ring *ring; struct hns3_enet_tqp_vector *tqp_vector; u16 vector_num; @@ -613,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define ring_to_dev(ring) ((ring)->dev) +#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev) + #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ DMA_TO_DEVICE : DMA_FROM_DEVICE) -#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) - #define hns3_buf_size(_ring) ((_ring)->buf_size) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 52c9d204fe3d..6e0212b79438 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -198,7 +198,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) kinfo = &h->kinfo; for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring *ring = &priv->ring[i]; struct hns3_enet_ring_group *rx_group; u64 pre_rx_pkt; @@ -221,7 +221,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, u32 i; for (i = start_ringid; i <= end_ringid; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring *ring = &priv->ring[i]; hns3_clean_tx_ring(ring); } @@ -486,7 +486,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i].ring; + ring = &nic_priv->ring[i]; for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; @@ -495,7 +495,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + ring = &nic_priv->ring[i + kinfo->num_tqps]; for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; @@ -598,8 +598,8 @@ static void hns3_get_ringparam(struct net_device *netdev, param->tx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING; - param->tx_pending = priv->ring_data[0].ring->desc_num; - param->rx_pending = priv->ring_data[queue_num].ring->desc_num; + param->tx_pending = priv->ring[0].desc_num; + param->rx_pending = priv->ring[queue_num].desc_num; } static void hns3_get_pauseparam(struct net_device *netdev, @@ -901,9 +901,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, h->kinfo.num_rx_desc = rx_desc_num; for (i = 0; i < h->kinfo.num_tqps; i++) { - priv->ring_data[i].ring->desc_num = tx_desc_num; - priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = - rx_desc_num; + priv->ring[i].desc_num = tx_desc_num; + priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num; } } @@ -919,7 +918,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) return NULL; for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { - memcpy(&tmp_rings[i], priv->ring_data[i].ring, + memcpy(&tmp_rings[i], &priv->ring[i], sizeof(struct hns3_enet_ring)); tmp_rings[i].skb = NULL; } @@ -967,8 +966,8 @@ static int hns3_set_ringparam(struct net_device *ndev, /* Hardware requires that its descriptors must be multiple of eight */ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); - old_tx_desc_num = priv->ring_data[0].ring->desc_num; - old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; + old_tx_desc_num = priv->ring[0].desc_num; + old_rx_desc_num = priv->ring[queue_num].desc_num; if (old_tx_desc_num == new_tx_desc_num && old_rx_desc_num == new_rx_desc_num) return 0; @@ -981,7 +980,7 @@ static int hns3_set_ringparam(struct net_device *ndev, } netdev_info(ndev, - "Changing Tx/Rx ring depth from %d/%d to %d/%d\n", + "Changing Tx/Rx ring depth from %u/%u to %u/%u\n", old_tx_desc_num, old_rx_desc_num, new_tx_desc_num, new_rx_desc_num); @@ -997,7 +996,7 @@ static int hns3_set_ringparam(struct net_device *ndev, hns3_change_all_ring_bd_num(priv, old_tx_desc_num, old_rx_desc_num); for (i = 0; i < h->kinfo.num_tqps * 2; i++) - memcpy(priv->ring_data[i].ring, &tmp_rings[i], + memcpy(&priv->ring[i], &tmp_rings[i], sizeof(struct hns3_enet_ring)); } else { for (i = 0; i < h->kinfo.num_tqps * 2; i++) @@ -1093,13 +1092,13 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, if (queue >= queue_num) { netdev_err(netdev, - "Invalid queue value %d! Queue max id=%d\n", + "Invalid queue value %u! Queue max id=%u\n", queue, queue_num - 1); return -EINVAL; } - tx_vector = priv->ring_data[queue].ring->tqp_vector; - rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + tx_vector = priv->ring[queue].tqp_vector; + rx_vector = priv->ring[queue_num + queue].tqp_vector; cmd->use_adaptive_tx_coalesce = tx_vector->tx_group.coal.gl_adapt_enable; @@ -1143,14 +1142,14 @@ static int hns3_check_gl_coalesce_para(struct net_device *netdev, rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs); if (rx_gl != cmd->rx_coalesce_usecs) { netdev_info(netdev, - "rx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", cmd->rx_coalesce_usecs, rx_gl); } tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs); if (tx_gl != cmd->tx_coalesce_usecs) { netdev_info(netdev, - "tx_usecs(%d) rounded down to %d, because it must be multiple of 2.\n", + "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n", cmd->tx_coalesce_usecs, tx_gl); } @@ -1178,7 +1177,7 @@ static int hns3_check_rl_coalesce_para(struct net_device *netdev, rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high); if (rl != cmd->rx_coalesce_usecs_high) { netdev_info(netdev, - "usecs_high(%d) rounded down to %d, because it must be multiple of 4.\n", + "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n", cmd->rx_coalesce_usecs_high, rl); } @@ -1207,7 +1206,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev, if (cmd->use_adaptive_tx_coalesce == 1 || cmd->use_adaptive_rx_coalesce == 1) { netdev_info(netdev, - "adaptive-tx=%d and adaptive-rx=%d, tx_usecs or rx_usecs will changed dynamically.\n", + "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n", cmd->use_adaptive_tx_coalesce, cmd->use_adaptive_rx_coalesce); } @@ -1224,8 +1223,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int queue_num = h->kinfo.num_tqps; - tx_vector = priv->ring_data[queue].ring->tqp_vector; - rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; + tx_vector = priv->ring[queue].tqp_vector; + rx_vector = priv->ring[queue_num + queue].tqp_vector; tx_vector->tx_group.coal.gl_adapt_enable = cmd->use_adaptive_tx_coalesce; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index ecf58cfd253d..940ead3970d1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -145,7 +145,7 @@ static int hclge_cmd_csq_clean(struct hclge_hw *hw) rmb(); /* Make sure head is ready before touch any data */ if (!is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, csq->next_to_use, csq->next_to_clean); dev_warn(&hdev->pdev->dev, "Disabling any further commands to IMP firmware\n"); @@ -314,11 +314,10 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) } while (timeout < hw->cmq.tx_timeout); } - if (!complete) { + if (!complete) retval = -EBADE; - } else { + else retval = hclge_cmd_check_retval(hw, desc, num, ntc); - } /* Clean the command send queue */ handle = hclge_cmd_csq_clean(hw); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 1426eb5ddf3d..d97da67f07a1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -5,8 +5,10 @@ #define __HCLGE_CMD_H #include <linux/types.h> #include <linux/io.h> +#include <linux/etherdevice.h> #define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_DESC_DATA_LEN 6 struct hclge_dev; struct hclge_desc { @@ -18,7 +20,7 @@ struct hclge_desc { __le16 flag; __le16 retval; __le16 rsv; - __le32 data[6]; + __le32 data[HCLGE_DESC_DATA_LEN]; }; struct hclge_cmq_ring { @@ -244,7 +246,7 @@ enum hclge_opcode_type { /* QCN commands */ HCLGE_OPC_QCN_MOD_CFG = 0x1A01, HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, - HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03, HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, @@ -259,6 +261,7 @@ enum hclge_opcode_type { /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + /* M7 stats command */ HCLGE_OPC_M7_STATS_BD = 0x7012, HCLGE_OPC_M7_STATS_INFO = 0x7013, @@ -428,8 +431,10 @@ struct hclge_rx_pkt_buf_cmd { #define HCLGE_PF_MAC_NUM_MASK 0x3 #define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) #define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +#define HCLGE_VF_RST_STATUS_CMD 4 + struct hclge_func_status_cmd { - __le32 vf_rst_state[4]; + __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD]; u8 pf_state; u8 mac_id; u8 rsv1; @@ -485,10 +490,12 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_UMV_TBL_SPACE_S 16 #define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16) +#define HCLGE_CFG_CMD_CNT 4 + struct hclge_cfg_param_cmd { __le32 offset; __le32 rsv; - __le32 param[4]; + __le32 param[HCLGE_CFG_CMD_CNT]; }; #define HCLGE_MAC_MODE 0x0 @@ -712,8 +719,7 @@ struct hclge_mac_mgr_tbl_entry_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; - __le32 mac_addr_hi32; - __le16 mac_addr_lo16; + u8 mac_addr[ETH_ALEN]; __le16 rsv1; __le16 ethter_type; __le16 egress_port; @@ -758,20 +764,27 @@ struct hclge_vlan_filter_ctrl_cmd { u8 rsv2[19]; }; +#define HCLGE_VLAN_ID_OFFSET_STEP 160 +#define HCLGE_VLAN_BYTE_SIZE 8 +#define HCLGE_VLAN_OFFSET_BITMAP \ + (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE) + struct hclge_vlan_filter_pf_cfg_cmd { u8 vlan_offset; u8 vlan_cfg; u8 rsv[2]; - u8 vlan_offset_bitmap[20]; + u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP]; }; +#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vlan_filter_vf_cfg_cmd { __le16 vlan_id; u8 resp_code; u8 rsv; u8 vlan_cfg; u8 rsv1[3]; - u8 vf_bitmap[16]; + u8 vf_bitmap[HCLGE_MAX_VF_BYTES]; }; #define HCLGE_SWITCH_ANTI_SPOOF_B 0U @@ -806,6 +819,7 @@ enum hclge_mac_vlan_cfg_sel { #define HCLGE_CFG_NIC_ROCE_SEL_B 4 #define HCLGE_ACCEPT_TAG2_B 5 #define HCLGE_ACCEPT_UNTAG2_B 6 +#define HCLGE_VF_NUM_PER_BYTE 8 struct hclge_vport_vtag_tx_cfg_cmd { u8 vport_vlan_cfg; @@ -813,7 +827,7 @@ struct hclge_vport_vtag_tx_cfg_cmd { u8 rsv1[2]; __le16 def_vlan_tag1; __le16 def_vlan_tag2; - u8 vf_bitmap[8]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; u8 rsv2[8]; }; @@ -825,7 +839,7 @@ struct hclge_vport_vtag_rx_cfg_cmd { u8 vport_vlan_cfg; u8 vf_offset; u8 rsv1[6]; - u8 vf_bitmap[8]; + u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE]; u8 rsv2[8]; }; @@ -864,7 +878,7 @@ struct hclge_mac_ethertype_idx_rd_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; - u8 mac_addr[6]; + u8 mac_addr[ETH_ALEN]; __le16 index; __le16 ethter_type; __le16 egress_port; @@ -1090,9 +1104,6 @@ void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, enum hclge_opcode_type opcode, bool is_read); void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); -int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param); - enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, struct hclge_desc *desc); enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index a1790af73096..d6c3952aba04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -87,7 +87,7 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (prio_tc[i] >= num_tc) { dev_err(&hdev->pdev->dev, - "prio_tc[%u] checking failed, %u >= num_tc(%u)\n", + "prio_tc[%d] checking failed, %u >= num_tc(%u)\n", i, prio_tc[i], num_tc); return -EINVAL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index d0128d792717..112df34b3869 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -145,7 +145,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, return; } - buf_len = sizeof(struct hclge_desc) * bd_num; + buf_len = sizeof(struct hclge_desc) * bd_num; desc_src = kzalloc(buf_len, GFP_KERNEL); if (!desc_src) { dev_err(&hdev->pdev->dev, "call kzalloc failed\n"); @@ -153,7 +153,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, } desc = desc_src; - ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd); if (ret) { kfree(desc_src); return; @@ -169,7 +169,7 @@ static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, if (dfx_message->flag) dev_info(&hdev->pdev->dev, "%s: 0x%x\n", dfx_message->message, - desc->data[i % entries_per_desc]); + le32_to_cpu(desc->data[i % entries_per_desc])); dfx_message++; } @@ -237,44 +237,48 @@ static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf) if (ret) return; - dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]); + dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); if (ret) return; - dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]); + dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1])); ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); if (ret) return; - dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]); - dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]); - dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]); - dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]); - dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]); - dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]); - dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]); + dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1])); + dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2])); + dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", + le32_to_cpu(desc[0].data[3])); + dev_info(dev, "tx_private_waterline: 0x%x\n", + le32_to_cpu(desc[0].data[4])); + dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5])); + dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0])); + dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1])); ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, HCLGE_OPC_TM_INTERNAL_CNT); if (ret) return; - dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]); - dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]); + dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1])); + dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2])); ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, HCLGE_OPC_TM_INTERNAL_STS_1); if (ret) return; - dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]); - dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]); - dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]); - dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]); - dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]); + dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1])); + dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2])); + dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3])); + dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", + le32_to_cpu(desc[0].data[4])); + dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", + le32_to_cpu(desc[0].data[5])); } static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf) @@ -364,7 +368,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", - pg_shap_cfg_cmd->pg_shapping_para); + le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); cmd = HCLGE_OPC_TM_PG_P_SHAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -375,7 +379,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", - pg_shap_cfg_cmd->pg_shapping_para); + le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para)); cmd = HCLGE_OPC_TM_PORT_SHAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -385,7 +389,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", - port_shap_cfg_cmd->port_shapping_para); + le32_to_cpu(port_shap_cfg_cmd->port_shapping_para)); cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -393,7 +397,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) if (ret) goto err_tm_pg_cmd_send; - dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]); + dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", + le32_to_cpu(desc.data[0])); cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -401,7 +406,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) if (ret) goto err_tm_pg_cmd_send; - dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]); + dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", + le32_to_cpu(desc.data[0])); cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -409,7 +415,8 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) if (ret) goto err_tm_pg_cmd_send; - dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]); + dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", + le32_to_cpu(desc.data[0])); if (!hnae3_dev_dcb_supported(hdev)) { dev_info(&hdev->pdev->dev, @@ -429,7 +436,7 @@ static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n", bp_to_qs_map_cmd->qs_group_id); dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", - bp_to_qs_map_cmd->qs_bit_map); + le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map)); return; err_tm_pg_cmd_send: @@ -471,7 +478,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", - qs_to_pri_map->qs_id); + le16_to_cpu(qs_to_pri_map->qs_id)); dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", qs_to_pri_map->priority); dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", @@ -484,9 +491,10 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) goto err_tm_cmd_send; nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; - dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); + dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", + le16_to_cpu(nq_to_qs_map->nq_id)); dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n", - nq_to_qs_map->qset_id); + le16_to_cpu(nq_to_qs_map->qset_id)); cmd = HCLGE_OPC_TM_PG_WEIGHT; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -505,7 +513,8 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) goto err_tm_cmd_send; qs_weight = (struct hclge_qs_weight_cmd *)desc.data; - dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id); + dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", + le16_to_cpu(qs_weight->qs_id)); dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); cmd = HCLGE_OPC_TM_PRI_WEIGHT; @@ -527,7 +536,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", - shap_cfg_cmd->pri_shapping_para); + le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; hclge_cmd_setup_basic_desc(&desc, cmd, true); @@ -538,7 +547,7 @@ static void hclge_dbg_dump_tm(struct hclge_dev *hdev) shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", - shap_cfg_cmd->pri_shapping_para); + le32_to_cpu(shap_cfg_cmd->pri_shapping_para)); hclge_dbg_dump_tm_pg(hdev); @@ -658,7 +667,7 @@ static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", pause_param->pause_trans_gap); dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", - pause_param->pause_trans_time); + le16_to_cpu(pause_param->pause_trans_time)); } static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) @@ -712,7 +721,7 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, - tx_buf_cmd->tx_pkt_buff[i]); + le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; hclge_cmd_setup_basic_desc(desc, cmd, true); @@ -724,10 +733,10 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, - rx_buf_cmd->buf_num[i]); + le16_to_cpu(rx_buf_cmd->buf_num[i])); dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", - rx_buf_cmd->shared_buf); + le16_to_cpu(rx_buf_cmd->shared_buf)); cmd = HCLGE_OPC_RX_COM_WL_ALLOC; hclge_cmd_setup_basic_desc(desc, cmd, true); @@ -738,7 +747,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", - rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); + le16_to_cpu(rx_com_wl->com_wl.high), + le16_to_cpu(rx_com_wl->com_wl.low)); cmd = HCLGE_OPC_RX_GBL_PKT_CNT; hclge_cmd_setup_basic_desc(desc, cmd, true); @@ -749,7 +759,8 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; dev_info(&hdev->pdev->dev, "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", - rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); + le16_to_cpu(rx_packet_cnt->com_wl.high), + le16_to_cpu(rx_packet_cnt->com_wl.low)); dev_info(&hdev->pdev->dev, "\n"); if (!hnae3_dev_dcb_supported(hdev)) { @@ -769,14 +780,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, - rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + HCLGE_TC_NUM_ONE_DESC, - rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + le16_to_cpu(rx_priv_wl->tc_wl[i].high), + le16_to_cpu(rx_priv_wl->tc_wl[i].low)); cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; hclge_cmd_setup_basic_desc(&desc[0], cmd, true); @@ -791,16 +804,16 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, - rx_com_thrd->com_thrd[i].high, - rx_com_thrd->com_thrd[i].low); + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) dev_info(&hdev->pdev->dev, "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + HCLGE_TC_NUM_ONE_DESC, - rx_com_thrd->com_thrd[i].high, - rx_com_thrd->com_thrd[i].low); + le16_to_cpu(rx_com_thrd->com_thrd[i].high), + le16_to_cpu(rx_com_thrd->com_thrd[i].low)); return; err_qos_cmd_send: @@ -845,7 +858,8 @@ static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); snprintf(printf_buf, HCLGE_DBG_BUF_LEN, "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", - req0->index, req0->mac_addr[0], req0->mac_addr[1], + le16_to_cpu(req0->index), + req0->mac_addr[0], req0->mac_addr[1], req0->mac_addr[2], req0->mac_addr[3], req0->mac_addr[4], req0->mac_addr[5]); @@ -929,7 +943,7 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) } } -static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) +void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) { dev_info(&hdev->pdev->dev, "PF reset count: %u\n", hdev->rst_stats.pf_rst_cnt); @@ -945,8 +959,6 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) hdev->rst_stats.hw_reset_done_cnt); dev_info(&hdev->pdev->dev, "reset count: %u\n", hdev->rst_stats.reset_cnt); - dev_info(&hdev->pdev->dev, "reset count: %u\n", - hdev->rst_stats.reset_cnt); dev_info(&hdev->pdev->dev, "reset fail count: %u\n", hdev->rst_stats.reset_fail_cnt); dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", @@ -961,6 +973,7 @@ static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING)); + dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); } static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev) @@ -1110,6 +1123,82 @@ static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) } } +static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid) +{ + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + u8 ir_u, ir_b, ir_s, bs_b, bs_s; + struct hclge_desc desc; + u32 shapping_para; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); + + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(qsid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "qs%u failed to get tx_rate, ret=%d\n", + qsid, ret); + return; + } + + shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); + ir_b = hclge_tm_get_field(shapping_para, IR_B); + ir_u = hclge_tm_get_field(shapping_para, IR_U); + ir_s = hclge_tm_get_field(shapping_para, IR_S); + bs_b = hclge_tm_get_field(shapping_para, BS_B); + bs_s = hclge_tm_get_field(shapping_para, BS_S); + + dev_info(&hdev->pdev->dev, + "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n", + qsid, ir_b, ir_u, ir_s, bs_b, bs_s); +} + +static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev) +{ + struct hnae3_knic_private_info *kinfo; + struct hclge_vport *vport; + int vport_id, i; + + for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) { + vport = &hdev->vport[vport_id]; + kinfo = &vport->nic.kinfo; + + dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id); + + for (i = 0; i < kinfo->num_tc; i++) { + u16 qsid = vport->qs_offset + i; + + hclge_dbg_dump_qs_shaper_single(hdev, qsid); + } + } +} + +static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev, + const char *cmd_buf) +{ +#define HCLGE_MAX_QSET_NUM 1024 + + u16 qsid; + int ret; + + ret = kstrtou16(cmd_buf, 0, &qsid); + if (ret) { + hclge_dbg_dump_qs_shaper_all(hdev); + return; + } + + if (qsid >= HCLGE_MAX_QSET_NUM) { + dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n", + qsid); + return; + } + + hclge_dbg_dump_qs_shaper_single(hdev, qsid); +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) { #define DUMP_REG "dump reg" @@ -1145,6 +1234,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf) &cmd_buf[sizeof("dump ncl_config")]); } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { hclge_dbg_dump_mac_tnl_status(hdev); + } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) { + hclge_dbg_dump_qs_shaper(hdev, + &cmd_buf[sizeof("dump qs shaper")]); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 87dece0e745d..dc66b4e13377 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -1747,7 +1747,7 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev, if (vf_id) { if (vf_id >= hdev->num_alloc_vport) { - dev_err(dev, "invalid vf id(%d)\n", vf_id); + dev_err(dev, "invalid vf id(%u)\n", vf_id); return; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c052bb33b3d3..7c7038676d6d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -55,6 +55,8 @@ #define HCLGE_LINK_STATUS_MS 10 +#define HCLGE_VF_VPORT_START_NUM 1 + static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static void hclge_sync_vlan_filter(struct hclge_dev *hdev); @@ -323,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { { .flags = HCLGE_MAC_MGR_MASK_VLAN_B, .ethter_type = cpu_to_le16(ETH_P_LLDP), - .mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), - .mac_addr_lo16 = cpu_to_le16(htons(0x000E)), + .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e}, .i_port_bitmap = 0x1, }, }; @@ -1194,6 +1195,35 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability) hclge_parse_backplane_link_mode(hdev, speed_ability); } +static u32 hclge_get_max_speed(u8 speed_ability) +{ + if (speed_ability & HCLGE_SUPPORT_100G_BIT) + return HCLGE_MAC_SPEED_100G; + + if (speed_ability & HCLGE_SUPPORT_50G_BIT) + return HCLGE_MAC_SPEED_50G; + + if (speed_ability & HCLGE_SUPPORT_40G_BIT) + return HCLGE_MAC_SPEED_40G; + + if (speed_ability & HCLGE_SUPPORT_25G_BIT) + return HCLGE_MAC_SPEED_25G; + + if (speed_ability & HCLGE_SUPPORT_10G_BIT) + return HCLGE_MAC_SPEED_10G; + + if (speed_ability & HCLGE_SUPPORT_1G_BIT) + return HCLGE_MAC_SPEED_1G; + + if (speed_ability & HCLGE_SUPPORT_100M_BIT) + return HCLGE_MAC_SPEED_100M; + + if (speed_ability & HCLGE_SUPPORT_10M_BIT) + return HCLGE_MAC_SPEED_10M; + + return HCLGE_MAC_SPEED_1G; +} + static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { struct hclge_cfg_param_cmd *req; @@ -1364,9 +1394,11 @@ static int hclge_configure(struct hclge_dev *hdev) hclge_parse_link_mode(hdev, cfg.speed_ability); + hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); + if ((hdev->tc_max > HNAE3_MAX_TC) || (hdev->tc_max < 1)) { - dev_warn(&hdev->pdev->dev, "TC num = %d.\n", + dev_warn(&hdev->pdev->dev, "TC num = %u.\n", hdev->tc_max); hdev->tc_max = 1; } @@ -1626,7 +1658,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; if (hdev->num_tqps < num_vport) { - dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", + dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", hdev->num_tqps, num_vport); return -EINVAL; } @@ -1649,6 +1681,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) for (i = 0; i < num_vport; i++) { vport->back = hdev; vport->vport_id = i; + vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO; vport->mps = HCLGE_MAC_DEFAULT_FRAME; vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; vport->rxvlan_cfg.rx_vlan_offload_en = true; @@ -2312,7 +2345,7 @@ static int hclge_init_msi(struct hclge_dev *hdev) } if (vectors < hdev->num_msi) dev_warn(&hdev->pdev->dev, - "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", hdev->num_msi, vectors); hdev->num_msi = vectors; @@ -2744,7 +2777,7 @@ static void hclge_update_port_capability(struct hclge_mac *mac) else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER) mac->module_type = HNAE3_MODULE_TYPE_TP; - if (mac->support_autoneg == true) { + if (mac->support_autoneg) { linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported); linkmode_copy(mac->advertising, mac->supported); } else { @@ -2871,6 +2904,62 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } +static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) +{ + if (pci_num_vf(hdev->pdev) == 0) { + dev_err(&hdev->pdev->dev, + "SRIOV is disabled, can not get vport(%d) info.\n", vf); + return NULL; + } + + if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { + dev_err(&hdev->pdev->dev, + "vf id(%d) is out of range(0 <= vfid < %d)\n", + vf, pci_num_vf(hdev->pdev)); + return NULL; + } + + /* VF start from 1 in vport */ + vf += HCLGE_VF_VPORT_START_NUM; + return &hdev->vport[vf]; +} + +static int hclge_get_vf_config(struct hnae3_handle *handle, int vf, + struct ifla_vf_info *ivf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + ivf->vf = vf; + ivf->linkstate = vport->vf_info.link_state; + ivf->spoofchk = vport->vf_info.spoofchk; + ivf->trusted = vport->vf_info.trusted; + ivf->min_tx_rate = 0; + ivf->max_tx_rate = vport->vf_info.max_tx_rate; + ether_addr_copy(ivf->mac, vport->vf_info.mac); + + return 0; +} + +static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, + int link_state) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + vport->vf_info.link_state = link_state; + + return 0; +} + static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { u32 rst_src_reg, cmdq_src_reg, msix_src_reg; @@ -3191,7 +3280,7 @@ static int hclge_reset_wait(struct hclge_dev *hdev) if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { dev_err(&hdev->pdev->dev, - "flr wait timeout: %d\n", cnt); + "flr wait timeout: %u\n", cnt); return -EBUSY; } @@ -3241,7 +3330,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); if (ret) { dev_err(&hdev->pdev->dev, - "set vf(%d) rst failed %d!\n", + "set vf(%u) rst failed %d!\n", vport->vport_id, ret); return ret; } @@ -3256,7 +3345,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) ret = hclge_inform_reset_assert_to_vf(vport); if (ret) dev_warn(&hdev->pdev->dev, - "inform reset to vf(%d) failed %d!\n", + "inform reset to vf(%u) failed %d!\n", vport->vport_id, ret); } @@ -3569,7 +3658,7 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) hdev->rst_stats.reset_fail_cnt++; set_bit(hdev->reset_type, &hdev->reset_pending); dev_info(&hdev->pdev->dev, - "re-schedule reset task(%d)\n", + "re-schedule reset task(%u)\n", hdev->rst_stats.reset_fail_cnt); return true; } @@ -3580,6 +3669,9 @@ static bool hclge_reset_err_handle(struct hclge_dev *hdev) hclge_reset_handshake(hdev, true); dev_err(&hdev->pdev->dev, "Reset fail!\n"); + + hclge_dbg_dump_rst_info(hdev); + return false; } @@ -3779,12 +3871,13 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) HCLGE_RESET_INTERVAL))) { mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); return; - } else if (hdev->default_reset_request) + } else if (hdev->default_reset_request) { hdev->reset_level = hclge_get_reset_level(ae_dev, &hdev->default_reset_request); - else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) + } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { hdev->reset_level = HNAE3_FUNC_RESET; + } dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", hdev->reset_level); @@ -3909,6 +4002,7 @@ static void hclge_service_task(struct work_struct *work) hclge_update_link_status(hdev); hclge_update_vport_alive(hdev); hclge_sync_vlan_filter(hdev); + if (hdev->fd_arfs_expire_timer >= HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL) { hclge_rfs_filter_expire(hdev); hdev->fd_arfs_expire_timer = 0; @@ -4415,7 +4509,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev) */ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) { dev_err(&hdev->pdev->dev, - "Configure rss tc size failed, invalid TC_SIZE = %d\n", + "Configure rss tc size failed, invalid TC_SIZE = %u\n", rss_size); return -EINVAL; } @@ -4593,8 +4687,8 @@ static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector, return ret; } -int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, - struct hclge_promisc_param *param) +static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param) { struct hclge_promisc_cfg_cmd *req; struct hclge_desc desc; @@ -4621,8 +4715,9 @@ int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, return ret; } -void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, - bool en_mc, bool en_bc, int vport_id) +static void hclge_promisc_param_init(struct hclge_promisc_param *param, + bool en_uc, bool en_mc, bool en_bc, + int vport_id) { if (!param) return; @@ -4637,12 +4732,21 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, param->vf_id = vport_id; } +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_promisc_param param; + + hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, + vport->vport_id); + return hclge_cmd_set_promisc_mode(hdev, ¶m); +} + static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, bool en_mc_pmc) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct hclge_promisc_param param; bool en_bc_pmc = true; /* For revision 0x20, if broadcast promisc enabled, vlan filter is @@ -4652,9 +4756,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, if (handle->pdev->revision == 0x20) en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false; - hclge_promisc_param_init(¶m, en_uc_pmc, en_mc_pmc, en_bc_pmc, - vport->vport_id); - return hclge_cmd_set_promisc_mode(hdev, ¶m); + return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc, + en_bc_pmc); } static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) @@ -4756,7 +4859,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev) break; default: dev_err(&hdev->pdev->dev, - "Unsupported flow director mode %d\n", + "Unsupported flow director mode %u\n", hdev->fd_cfg.fd_mode); return -EOPNOTSUPP; } @@ -5086,7 +5189,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, true); if (ret) { dev_err(&hdev->pdev->dev, - "fd key_y config fail, loc=%d, ret=%d\n", + "fd key_y config fail, loc=%u, ret=%d\n", rule->queue_id, ret); return ret; } @@ -5095,7 +5198,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, true); if (ret) dev_err(&hdev->pdev->dev, - "fd key_x config fail, loc=%d, ret=%d\n", + "fd key_x config fail, loc=%u, ret=%d\n", rule->queue_id, ret); return ret; } @@ -5344,7 +5447,7 @@ static int hclge_fd_update_rule_list(struct hclge_dev *hdev, } } else if (!is_add) { dev_err(&hdev->pdev->dev, - "delete fail, rule %d is inexistent\n", + "delete fail, rule %u is inexistent\n", location); return -EINVAL; } @@ -5584,7 +5687,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, if (vf > hdev->num_req_vfs) { dev_err(&hdev->pdev->dev, - "Error: vf id (%d) > max vf num (%d)\n", + "Error: vf id (%u) > max vf num (%u)\n", vf, hdev->num_req_vfs); return -EINVAL; } @@ -5594,7 +5697,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, if (ring >= tqps) { dev_err(&hdev->pdev->dev, - "Error: queue id (%d) > max tqp num (%d)\n", + "Error: queue id (%u) > max tqp num (%u)\n", ring, tqps - 1); return -EINVAL; } @@ -5653,7 +5756,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle, if (!hclge_fd_rule_exist(hdev, fs->location)) { dev_err(&hdev->pdev->dev, - "Delete fail, rule %d is inexistent\n", fs->location); + "Delete fail, rule %u is inexistent\n", fs->location); return -ENOENT; } @@ -5730,7 +5833,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) if (ret) { dev_warn(&hdev->pdev->dev, - "Restore rule %d failed, remove it\n", + "Restore rule %u failed, remove it\n", rule->location); clear_bit(rule->location, hdev->fd_bmap); hlist_del(&rule->rule_node); @@ -6735,7 +6838,7 @@ static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, if (cmdq_resp) { dev_err(&hdev->pdev->dev, - "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", + "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n", cmdq_resp); return -EIO; } @@ -6987,7 +7090,7 @@ static int hclge_init_umv_space(struct hclge_dev *hdev) if (allocated_size < hdev->wanted_umv_size) dev_warn(&hdev->pdev->dev, - "Alloc umv space failed, want %d, get %d\n", + "Alloc umv space failed, want %u, get %u\n", hdev->wanted_umv_size, allocated_size); mutex_init(&hdev->umv_mutex); @@ -7155,7 +7258,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, /* check if we just hit the duplicate */ if (!ret) { - dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n", vport->vport_id, addr); return 0; } @@ -7336,7 +7439,7 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, mc_flag = is_write_tbl && mac_type == HCLGE_MAC_ADDR_MC; list_for_each_entry_safe(mac_cfg, tmp, list, node) { - if (strncmp(mac_cfg->mac_addr, mac_addr, ETH_ALEN) == 0) { + if (ether_addr_equal(mac_cfg->mac_addr, mac_addr)) { if (uc_flag && mac_cfg->hd_tbl_status) hclge_rm_uc_addr_common(vport, mac_addr); @@ -7408,7 +7511,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, if (cmdq_resp) { dev_err(&hdev->pdev->dev, - "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n", + "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n", cmdq_resp); return -EIO; } @@ -7430,7 +7533,7 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, break; default: dev_err(&hdev->pdev->dev, - "add mac ethertype failed for undefined, code=%d.\n", + "add mac ethertype failed for undefined, code=%u.\n", resp_code); return_status = -EIO; } @@ -7438,6 +7541,67 @@ static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, return return_status; } +static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx, + u8 *mac_addr) +{ + struct hclge_mac_vlan_tbl_entry_cmd req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u16 egress_port = 0; + int i; + + if (is_zero_ether_addr(mac_addr)) + return false; + + memset(&req, 0, sizeof(req)); + hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + req.egress_port = cpu_to_le16(egress_port); + hclge_prepare_mac_addr(&req, mac_addr, false); + + if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT) + return true; + + vf_idx += HCLGE_VF_VPORT_START_NUM; + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) + if (i != vf_idx && + ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac)) + return true; + + return false; +} + +static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf, + u8 *mac_addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (ether_addr_equal(mac_addr, vport->vf_info.mac)) { + dev_info(&hdev->pdev->dev, + "Specified MAC(=%pM) is same as before, no change committed!\n", + mac_addr); + return 0; + } + + if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) { + dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n", + mac_addr); + return -EEXIST; + } + + ether_addr_copy(vport->vf_info.mac, mac_addr); + dev_info(&hdev->pdev->dev, + "MAC of VF %d has been set to %pM, and it will be reinitialized!\n", + vf, mac_addr); + + return hclge_inform_reset_assert_to_vf(vport); +} + static int hclge_add_mgr_tbl(struct hclge_dev *hdev, const struct hclge_mac_mgr_tbl_entry_cmd *req) { @@ -7610,7 +7774,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, bool is_kill, u16 vlan, __be16 proto) { -#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vport *vport = &hdev->vport[vfid]; struct hclge_vlan_filter_vf_cfg_cmd *req0; struct hclge_vlan_filter_vf_cfg_cmd *req1; struct hclge_desc desc[2]; @@ -7619,10 +7783,18 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, int ret; /* if vf vlan table is full, firmware will close vf vlan filter, it - * is unable and unnecessary to add new vlan id to vf vlan filter + * is unable and unnecessary to add new vlan id to vf vlan filter. + * If spoof check is enable, and vf vlan is full, it shouldn't add + * new vlan, because tx packets with these vlan id will be dropped. */ - if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) + if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { + if (vport->vf_info.spoofchk && vlan) { + dev_err(&hdev->pdev->dev, + "Can't add vlan due to spoof check is on and vf vlan table is full\n"); + return -EPERM; + } return 0; + } hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_VLAN_FILTER_VF_CFG, false); @@ -7666,7 +7838,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, } dev_err(&hdev->pdev->dev, - "Add vf vlan filter fail, ret =%d.\n", + "Add vf vlan filter fail, ret =%u.\n", req0->resp_code); } else { #define HCLGE_VF_VLAN_DEL_NO_FOUND 1 @@ -7682,7 +7854,7 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, return 0; dev_err(&hdev->pdev->dev, - "Kill vf vlan filter fail, ret =%d.\n", + "Kill vf vlan filter fail, ret =%u.\n", req0->resp_code); } @@ -7701,9 +7873,10 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); - vlan_offset_160 = vlan_id / 160; - vlan_offset_byte = (vlan_id % 160) / 8; - vlan_offset_byte_val = 1 << (vlan_id % 8); + vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP; + vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) / + HCLGE_VLAN_BYTE_SIZE; + vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE); req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req->vlan_offset = vlan_offset_160; @@ -7731,7 +7904,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, proto); if (ret) { dev_err(&hdev->pdev->dev, - "Set %d vport vlan filter config fail, ret =%d.\n", + "Set %u vport vlan filter config fail, ret =%d.\n", vport_id, ret); return ret; } @@ -7743,7 +7916,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { dev_err(&hdev->pdev->dev, - "Add port vlan failed, vport %d is already in vlan %d\n", + "Add port vlan failed, vport %u is already in vlan %u\n", vport_id, vlan_id); return -EINVAL; } @@ -7751,7 +7924,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, if (is_kill && !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { dev_err(&hdev->pdev->dev, - "Delete port vlan failed, vport %d is not in vlan %d\n", + "Delete port vlan failed, vport %u is not in vlan %u\n", vport_id, vlan_id); return -EINVAL; } @@ -8119,12 +8292,15 @@ static void hclge_restore_vlan_table(struct hnae3_handle *handle) } list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { - if (vlan->hd_tbl_status) - hclge_set_vlan_filter_hw(hdev, - htons(ETH_P_8021Q), - vport->vport_id, - vlan->vlan_id, - false); + int ret; + + if (!vlan->hd_tbl_status) + continue; + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, false); + if (ret) + break; } } @@ -8404,6 +8580,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) struct hclge_dev *hdev = vport->back; int i, max_frm_size, ret; + /* HW supprt 2 layer vlan */ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; if (max_frm_size < HCLGE_MAC_MIN_FRAME || max_frm_size > HCLGE_MAC_MAX_FRAME) @@ -8819,16 +8996,16 @@ static void hclge_info_show(struct hclge_dev *hdev) dev_info(dev, "PF info begin:\n"); - dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); - dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); - dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); - dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); - dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); - dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); - dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); - dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); - dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); - dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport); + dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); dev_info(dev, "This is %s PF\n", hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); dev_info(dev, "DCB %s\n", @@ -8844,10 +9021,9 @@ static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, { struct hnae3_client *client = vport->nic.client; struct hclge_dev *hdev = ae_dev->priv; - int rst_cnt; + int rst_cnt = hdev->rst_stats.reset_cnt; int ret; - rst_cnt = hdev->rst_stats.reset_cnt; ret = client->ops->init_instance(&vport->nic); if (ret) return ret; @@ -8947,7 +9123,6 @@ static int hclge_init_client_instance(struct hnae3_client *client, switch (client->type) { case HNAE3_CLIENT_KNIC: - hdev->nic_client = client; vport->nic.client = client; ret = hclge_init_nic_client_instance(ae_dev, vport); @@ -9146,7 +9321,7 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev) ret = hclge_set_vf_rst(hdev, vport->vport_id, false); if (ret) dev_warn(&hdev->pdev->dev, - "clear vf(%d) rst failed %d!\n", + "clear vf(%u) rst failed %d!\n", vport->vport_id, ret); } } @@ -9168,6 +9343,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->reset_type = HNAE3_NONE_RESET; hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + + /* HW supprt 2 layer vlan */ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; mutex_init(&hdev->vport_lock); @@ -9366,6 +9543,219 @@ static void hclge_stats_clear(struct hclge_dev *hdev) memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); } +static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_config_switch_param(hdev, vf, enable, + HCLGE_SWITCH_ANTI_SPOOF_MASK); +} + +static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) +{ + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, + HCLGE_FILTER_FE_NIC_INGRESS_B, + enable, vf); +} + +static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) +{ + int ret; + + ret = hclge_set_mac_spoofchk(hdev, vf, enable); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set vf %d mac spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + return ret; + } + + ret = hclge_set_vlan_spoofchk(hdev, vf, enable); + if (ret) + dev_err(&hdev->pdev->dev, + "Set vf %d vlan spoof check %s failed, ret=%d\n", + vf, enable ? "on" : "off", ret); + + return ret; +} + +static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf, + bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_spoofchk = enable ? 1 : 0; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.spoofchk == new_spoofchk) + return 0; + + if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) + dev_warn(&hdev->pdev->dev, + "vf %d vlan table is full, enable spoof check may cause its packet send fail\n", + vf); + else if (enable && hclge_is_umv_space_full(vport)) + dev_warn(&hdev->pdev->dev, + "vf %d mac table is full, enable spoof check may cause its packet send fail\n", + vf); + + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); + if (ret) + return ret; + + vport->vf_info.spoofchk = new_spoofchk; + return 0; +} + +static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + int i; + + if (hdev->pdev->revision == 0x20) + return 0; + + /* resume the vf spoof check state after reset */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, + vport->vf_info.spoofchk); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u32 new_trusted = enable ? 1 : 0; + bool en_bc_pmc; + int ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (vport->vf_info.trusted == new_trusted) + return 0; + + /* Disable promisc mode for VF if it is not trusted any more. */ + if (!enable && vport->vf_info.promisc_enable) { + en_bc_pmc = hdev->pdev->revision != 0x20; + ret = hclge_set_vport_promisc_mode(vport, false, false, + en_bc_pmc); + if (ret) + return ret; + vport->vf_info.promisc_enable = 0; + hclge_inform_vf_promisc_info(vport); + } + + vport->vf_info.trusted = new_trusted; + + return 0; +} + +static void hclge_reset_vf_rate(struct hclge_dev *hdev) +{ + int ret; + int vf; + + /* reset vf rate to default value */ + for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { + struct hclge_vport *vport = &hdev->vport[vf]; + + vport->vf_info.max_tx_rate = 0; + ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate); + if (ret) + dev_err(&hdev->pdev->dev, + "vf%d failed to reset to default, ret=%d\n", + vf - HCLGE_VF_VPORT_START_NUM, ret); + } +} + +static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf, + int min_tx_rate, int max_tx_rate) +{ + if (min_tx_rate != 0 || + max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) { + dev_err(&hdev->pdev->dev, + "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n", + min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); + return -EINVAL; + } + + return 0; +} + +static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf, + int min_tx_rate, int max_tx_rate, bool force) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate); + if (ret) + return ret; + + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + if (!force && max_tx_rate == vport->vf_info.max_tx_rate) + return 0; + + ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate); + if (ret) + return ret; + + vport->vf_info.max_tx_rate = max_tx_rate; + + return 0; +} + +static int hclge_resume_vf_rate(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle = &hdev->vport->nic; + struct hclge_vport *vport; + int ret; + int vf; + + /* resume the vf max_tx_rate after reset */ + for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { + vport = hclge_get_vf_vport(hdev, vf); + if (!vport) + return -EINVAL; + + /* zero means max rate, after reset, firmware already set it to + * max rate, so just continue. + */ + if (!vport->vf_info.max_tx_rate) + continue; + + ret = hclge_set_vf_rate(handle, vf, 0, + vport->vf_info.max_tx_rate, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf%d failed to resume tx_rate:%u, ret=%d\n", + vf, vport->vf_info.max_tx_rate, ret); + return ret; + } + } + + return 0; +} + static void hclge_reset_vport_state(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; @@ -9443,6 +9833,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + /* Log and clear the hw errors those already occurred */ + hclge_handle_all_hns_hw_errors(ae_dev); + /* Re-enable the hw error interrupts because * the interrupts get disabled on global reset. */ @@ -9465,6 +9858,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) } hclge_reset_vport_state(hdev); + ret = hclge_reset_vport_spoofchk(hdev); + if (ret) + return ret; + + ret = hclge_resume_vf_rate(hdev); + if (ret) + return ret; dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -9477,6 +9877,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) struct hclge_dev *hdev = ae_dev->priv; struct hclge_mac *mac = &hdev->hw.mac; + hclge_reset_vf_rate(hdev); hclge_misc_affinity_teardown(hdev); hclge_state_uninit(hdev); @@ -9541,8 +9942,8 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, u16 tc_offset[HCLGE_MAX_TC_NUM] = {0}; struct hclge_dev *hdev = vport->back; u16 tc_size[HCLGE_MAX_TC_NUM] = {0}; - int cur_rss_size = kinfo->rss_size; - int cur_tqps = kinfo->num_tqps; + u16 cur_rss_size = kinfo->rss_size; + u16 cur_tqps = kinfo->num_tqps; u16 tc_valid[HCLGE_MAX_TC_NUM]; u16 roundup_size; u32 *rss_indir; @@ -9596,7 +9997,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, out: if (!ret) dev_info(&hdev->pdev->dev, - "Channels changed, rss_size from %d to %d, tqps from %d to %d", + "Channels changed, rss_size from %u to %u, tqps from %u to %u", cur_rss_size, kinfo->rss_size, cur_tqps, kinfo->rss_size * kinfo->num_tc); @@ -10199,6 +10600,12 @@ static const struct hnae3_ae_ops hclge_ops = { .mac_connect_phy = hclge_mac_connect_phy, .mac_disconnect_phy = hclge_mac_disconnect_phy, .restore_vlan_table = hclge_restore_vlan_table, + .get_vf_config = hclge_get_vf_config, + .set_vf_link_state = hclge_set_vf_link_state, + .set_vf_spoofchk = hclge_set_vf_spoofchk, + .set_vf_trust = hclge_set_vf_trust, + .set_vf_rate = hclge_set_vf_rate, + .set_vf_mac = hclge_set_vf_mac, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 615cde1cbf0b..ebb4c6e9aed3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -141,7 +141,6 @@ /* Factor used to calculate offset and bitmap of VF num */ #define HCLGE_VF_NUM_PER_CMD 64 -#define HCLGE_VF_NUM_PER_BYTE 8 enum HLCGE_PORT_TYPE { HOST_PORT, @@ -226,8 +225,6 @@ enum hclge_evt_cause { HCLGE_VECTOR0_EVENT_OTHER, }; -#define HCLGE_MPF_ENBALE 1 - enum HCLGE_MAC_SPEED { HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ @@ -258,6 +255,7 @@ struct hclge_mac { u8 support_autoneg; u8 speed_type; /* 0: sfp speed, 1: active speed */ u32 speed; + u32 max_speed; u32 speed_ability; /* speed ability supported by current media */ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ u32 fec_mode; /* active fec mode */ @@ -655,7 +653,6 @@ struct hclge_rst_stats { u32 hw_reset_done_cnt; /* the number of HW reset has completed */ u32 pf_rst_cnt; /* the number of PF reset */ u32 flr_rst_cnt; /* the number of FLR */ - u32 core_rst_cnt; /* the number of CORE reset */ u32 global_rst_cnt; /* the number of GLOBAL */ u32 imp_rst_cnt; /* the number of IMP reset */ u32 reset_cnt; /* the number of reset */ @@ -886,6 +883,15 @@ struct hclge_port_base_vlan_config { struct hclge_vlan_info vlan_info; }; +struct hclge_vf_info { + int link_state; + u8 mac[ETH_ALEN]; + u32 spoofchk; + u32 max_tx_rate; + u32 trusted; + u16 promisc_enable; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -917,15 +923,15 @@ struct hclge_vport { unsigned long state; unsigned long last_active_jiffies; u32 mps; /* Max packet size */ + struct hclge_vf_info vf_info; struct list_head uc_mac_list; /* Store VF unicast table */ struct list_head mc_mac_list; /* Store VF multicast table */ struct list_head vlan_list; /* Store VF vlan table */ }; -void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, - bool en_mc, bool en_bc, int vport_id); - +int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, + bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr); int hclge_rm_uc_addr_common(struct hclge_vport *vport, @@ -994,4 +1000,6 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc); void hclge_report_hw_error(struct hclge_dev *hdev, enum hnae3_hw_error_type type); +void hclge_inform_vf_promisc_info(struct hclge_vport *vport); +void hclge_dbg_dump_rst_info(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f5da28a60d00..0b433ebe6a2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -26,7 +26,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, if (resp_data_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { dev_err(&hdev->pdev->dev, - "PF fail to gen resp to VF len %d exceeds max len %d\n", + "PF fail to gen resp to VF len %u exceeds max len %u\n", resp_data_len, HCLGE_MBX_MAX_RESP_DATA_SIZE); /* If resp_data_len is too long, set the value to max length @@ -205,12 +205,38 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *req) { - bool en_bc = req->msg[1] ? true : false; - struct hclge_promisc_param param; +#define HCLGE_MBX_BC_INDEX 1 +#define HCLGE_MBX_UC_INDEX 2 +#define HCLGE_MBX_MC_INDEX 3 - /* vf is not allowed to enable unicast/multicast broadcast */ - hclge_promisc_param_init(¶m, false, false, en_bc, vport->vport_id); - return hclge_cmd_set_promisc_mode(vport->back, ¶m); + bool en_bc = req->msg[HCLGE_MBX_BC_INDEX] ? true : false; + bool en_uc = req->msg[HCLGE_MBX_UC_INDEX] ? true : false; + bool en_mc = req->msg[HCLGE_MBX_MC_INDEX] ? true : false; + int ret; + + if (!vport->vf_info.trusted) { + en_uc = false; + en_mc = false; + } + + ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc); + if (req->mbx_need_resp) + hclge_gen_resp_to_vf(vport, req, ret, NULL, 0); + + vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0; + + return ret; +} + +void hclge_inform_vf_promisc_info(struct hclge_vport *vport) +{ + u8 dest_vfid = (u8)vport->vport_id; + u8 msg_data[2]; + + memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16)); + + hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid); } static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, @@ -223,6 +249,20 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, if (mbx_req->msg[1] == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { const u8 *old_addr = (const u8 *)(&mbx_req->msg[8]); + /* If VF MAC has been configured by the host then it + * cannot be overridden by the MAC specified by the VM. + */ + if (!is_zero_ether_addr(vport->vf_info.mac) && + !ether_addr_equal(mac_addr, vport->vf_info.mac)) { + status = -EPERM; + goto out; + } + + if (!is_valid_ether_addr(mac_addr)) { + status = -EINVAL; + goto out; + } + hclge_rm_uc_addr_common(vport, old_addr); status = hclge_add_uc_addr_common(vport, mac_addr); if (status) { @@ -245,11 +285,12 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, false, HCLGE_MAC_ADDR_UC); } else { dev_err(&hdev->pdev->dev, - "failed to set unicast mac addr, unknown subcode %d\n", + "failed to set unicast mac addr, unknown subcode %u\n", mbx_req->msg[1]); return -EIO; } +out: if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT) hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); @@ -278,7 +319,7 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, false, HCLGE_MAC_ADDR_MC); } else { dev_err(&hdev->pdev->dev, - "failed to set mcast mac addr, unknown subcode %d\n", + "failed to set mcast mac addr, unknown subcode %u\n", mbx_req->msg[1]); return -EIO; } @@ -324,6 +365,9 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, proto = msg_cmd->proto; status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); + if (mbx_req->mbx_need_resp) + return hclge_gen_resp_to_vf(vport, mbx_req, status, + NULL, 0); } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = msg_cmd->is_kill ? true : false; @@ -398,6 +442,13 @@ static int hclge_get_vf_queue_info(struct hclge_vport *vport, HCLGE_TQPS_RSS_INFO_LEN); } +static int hclge_get_vf_mac_addr(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + return hclge_gen_resp_to_vf(vport, mbx_req, 0, vport->vf_info.mac, + ETH_ALEN); +} + static int hclge_get_vf_queue_depth(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -428,6 +479,9 @@ static int hclge_get_vf_media_type(struct hclge_vport *vport, static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { +#define HCLGE_VF_LINK_STATE_UP 1U +#define HCLGE_VF_LINK_STATE_DOWN 0U + struct hclge_dev *hdev = vport->back; u16 link_status; u8 msg_data[8]; @@ -435,7 +489,19 @@ static int hclge_get_link_info(struct hclge_vport *vport, u16 duplex; /* mac.link can only be 0 or 1 */ - link_status = (u16)hdev->hw.mac.link; + switch (vport->vf_info.link_state) { + case IFLA_VF_LINK_STATE_ENABLE: + link_status = HCLGE_VF_LINK_STATE_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + link_status = HCLGE_VF_LINK_STATE_DOWN; + break; + case IFLA_VF_LINK_STATE_AUTO: + default: + link_status = (u16)hdev->hw.mac.link; + break; + } + duplex = hdev->hw.mac.duplex; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); @@ -489,7 +555,7 @@ static void hclge_reset_vf(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; int ret; - dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", + dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", vport->vport_id); ret = hclge_func_reset_cmd(hdev, vport->vport_id); @@ -524,7 +590,8 @@ static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf)); - return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, + sizeof(resp_data)); } static int hclge_get_rss_key(struct hclge_vport *vport, @@ -614,7 +681,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %d\n", + "dropped invalid mailbox message, code = %u\n", req->msg[0]); /* dropping/not processing this invalid message */ @@ -749,12 +816,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_PUSH_LINK_STATUS: hclge_handle_link_change_event(hdev, req); break; + case HCLGE_MBX_GET_MAC_ADDR: + ret = hclge_get_vf_mac_addr(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get MAC for VF\n", + ret); + break; case HCLGE_MBX_NCSI_ERROR: hclge_handle_ncsi_error(hdev); break; default: dev_err(&hdev->pdev->dev, - "un-supported mailbox message, code = %d\n", + "un-supported mailbox message, code = %u\n", req->msg[0]); break; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index dc4dfd4602ab..696c5ae922e3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -134,7 +134,7 @@ int hclge_mac_mdio_config(struct hclge_dev *hdev) "no phy device is connected to mdio bus\n"); return 0; } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { - dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", + dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n", hdev->hw.mac.phy_addr); return -EINVAL; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 62399cc1c5a6..fbc39a2480d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -46,7 +46,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, #define DIVISOR_CLK (1000 * 8) #define DIVISOR_IR_B_126 (126 * DIVISOR_CLK) - const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 6 * 256, /* Prioriy level */ 6 * 32, /* Prioriy group level */ 6 * 8, /* Port level */ @@ -511,6 +511,49 @@ static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, return hclge_cmd_send(&hdev->hw, &desc, 1); } +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_qs_shapping_cmd *shap_cfg_cmd; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 ir_b, ir_u, ir_s; + u32 shaper_para; + int ret, i; + + if (!max_tx_rate) + max_tx_rate = HCLGE_ETHER_MAX_RATE; + + ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + + for (i = 0; i < kinfo->num_tc; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, + false); + + shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; + shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); + shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n", + vport->vport_id, shap_cfg_cmd->qs_id, + max_tx_rate, ret); + return ret; + } + } + + return 0; +} + static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; @@ -532,7 +575,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) /* Set to user value, no larger than max_rss_size. */ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && kinfo->req_rss_size <= max_rss_size) { - dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", + dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", kinfo->rss_size, kinfo->req_rss_size); kinfo->rss_size = kinfo->req_rss_size; } else if (kinfo->rss_size > max_rss_size || diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 260f22d19d81..45bcb67f90fd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -96,6 +96,12 @@ struct hclge_pg_shapping_cmd { __le32 pg_shapping_para; }; +struct hclge_qs_shapping_cmd { + __le16 qs_id; + u8 rsvd[2]; + __le32 qs_shapping_para; +}; + #define HCLGE_BP_GRP_NUM 32 #define HCLGE_BP_SUB_GRP_ID_S 0 #define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0) @@ -154,4 +160,6 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); +int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); + #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index d5d1cc5d1b6e..af2245e3bb95 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -50,7 +50,7 @@ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) rmb(); /* Make sure head is ready before touch any data */ if (!hclgevf_is_valid_csq_clean_head(csq, head)) { - dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head, csq->next_to_use, csq->next_to_clean); dev_warn(&hdev->pdev->dev, "Disabling any further commands to IMP firmware\n"); @@ -92,9 +92,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) u32 reg_val; if (ring->flag == HCLGEVF_TYPE_CSQ) { - reg_val = (u32)ring->desc_dma_addr; + reg_val = lower_32_bits(ring->desc_dma_addr); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + reg_val = upper_32_bits(ring->desc_dma_addr); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG); @@ -105,9 +105,9 @@ static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); } else { - reg_val = (u32)ring->desc_dma_addr; + reg_val = lower_32_bits(ring->desc_dma_addr); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + reg_val = upper_32_bits(ring->desc_dma_addr); hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 7d7e712691b9..25d78a5aaa34 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1113,6 +1113,7 @@ static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) } static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, + bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc) { struct hclge_mbx_vf_to_pf_cmd *req; @@ -1120,10 +1121,11 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, int ret; req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data; - hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE; req->msg[1] = en_bc_pmc ? 1 : 0; + req->msg[2] = en_uc_pmc ? 1 : 0; + req->msg[3] = en_mc_pmc ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -1133,9 +1135,17 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, return ret; } -static int hclgevf_set_promisc_mode(struct hclgevf_dev *hdev, bool en_bc_pmc) +static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, + bool en_mc_pmc) { - return hclgevf_cmd_set_promisc_mode(hdev, en_bc_pmc); + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct pci_dev *pdev = hdev->pdev; + bool en_bc_pmc; + + en_bc_pmc = pdev->revision != 0x20; + + return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, + en_bc_pmc); } static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id, @@ -1174,11 +1184,37 @@ static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) } } +static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) +{ + u8 host_mac[ETH_ALEN]; + int status; + + status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MAC_ADDR, 0, NULL, 0, + true, host_mac, ETH_ALEN); + if (status) { + dev_err(&hdev->pdev->dev, + "fail to get VF MAC from host %d", status); + return status; + } + + ether_addr_copy(p, host_mac); + + return 0; +} + static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 host_mac_addr[ETH_ALEN]; - ether_addr_copy(p, hdev->hw.mac.mac_addr); + if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) + return; + + hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); + if (hdev->has_pf_mac) + ether_addr_copy(p, host_mac_addr); + else + ether_addr_copy(p, hdev->hw.mac.mac_addr); } static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p, @@ -1275,7 +1311,7 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, memcpy(&msg_data[3], &proto, sizeof(proto)); ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, HCLGE_MBX_VLAN_FILTER, msg_data, - HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0); + HCLGEVF_VLAN_MBX_MSG_LEN, true, NULL, 0); /* when remove hw vlan filter failed, record the vlan id, * and try to remove it from hw later, to be consistence @@ -1513,12 +1549,39 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) return ret; } +static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", + hdev->rst_stats.vf_func_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "VF reset count: %u\n", + hdev->rst_stats.vf_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %u\n", + hdev->rst_stats.rst_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", + hdev->rst_stats.hw_rst_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %u\n", + hdev->rst_stats.rst_cnt); + dev_info(&hdev->pdev->dev, "reset fail count: %u\n", + hdev->rst_stats.rst_fail_cnt); + dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); + dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STAT_REG)); + dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", + hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); + dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); +} + static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) { /* recover handshake status with IMP when reset fail */ hclgevf_reset_handshake(hdev, true); hdev->rst_stats.rst_fail_cnt++; - dev_err(&hdev->pdev->dev, "failed to reset VF(%d)\n", + dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", hdev->rst_stats.rst_fail_cnt); if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) @@ -1527,6 +1590,8 @@ static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) if (hclgevf_is_reset_pending(hdev)) { set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); + } else { + hclgevf_dump_rst_info(hdev); } } @@ -1748,6 +1813,8 @@ static void hclgevf_service_timer(struct timer_list *t) static void hclgevf_reset_service_task(struct work_struct *work) { +#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 + struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, rst_service_task); int ret; @@ -1800,7 +1867,7 @@ static void hclgevf_reset_service_task(struct work_struct *work) * We cannot do much for 2. but to check first we can try reset * our PCIe + stack and see if it alleviates the problem. */ - if (hdev->reset_attempts > 3) { + if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { /* prepare for full reset of stack + pcie interface */ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); @@ -2103,7 +2170,6 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); if (ret) return ret; - } /* Initialize RSS indirect table */ @@ -2272,7 +2338,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) } if (vectors < hdev->num_msi) dev_warn(&hdev->pdev->dev, - "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", hdev->num_msi, vectors); hdev->num_msi = vectors; @@ -2348,12 +2414,12 @@ static void hclgevf_info_show(struct hclgevf_dev *hdev) dev_info(dev, "VF info begin:\n"); - dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); - dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); - dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); - dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); - dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); - dev_info(dev, "PF media type of this VF: %d\n", + dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %u\n", hdev->hw.mac.media_type); dev_info(dev, "VF info end.\n"); @@ -2648,12 +2714,6 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - if (pdev->revision >= 0x21) { - ret = hclgevf_set_promisc_mode(hdev, true); - if (ret) - return ret; - } - dev_info(&hdev->pdev->dev, "Reset done\n"); return 0; @@ -2728,17 +2788,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; - /* vf is not allowed to enable unicast/multicast promisc mode. - * For revision 0x20, default to disable broadcast promisc mode, - * firmware makes sure broadcast packets can be accepted. - * For revision 0x21, default to enable broadcast promisc mode. - */ - if (pdev->revision >= 0x21) { - ret = hclgevf_set_promisc_mode(hdev, true); - if (ret) - goto err_config; - } - /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -3152,6 +3201,7 @@ static const struct hnae3_ae_ops hclgevf_ops = { .get_global_queue_id = hclgevf_get_qid_global, .set_timer_task = hclgevf_set_timer_task, .get_link_mode = hclgevf_get_link_mode, + .set_promisc_mode = hclgevf_set_promisc_mode, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 2b8d6bc6d224..2f4c81bf4169 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -150,8 +150,6 @@ enum hclgevf_states { HCLGEVF_STATE_CMD_DISABLE, }; -#define HCLGEVF_MPF_ENBALE 1 - struct hclgevf_mac { u8 media_type; u8 module_type; @@ -266,6 +264,7 @@ struct hclgevf_dev { u16 num_tx_desc; /* desc num of per tx queue */ u16 num_rx_desc; /* desc num of per rx queue */ u8 hw_tc_map; + u8 has_pf_mac; u16 num_msi; u16 num_msi_left; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index a108191c9e50..7cbd715d5e7a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -33,7 +33,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { dev_err(&hdev->pdev->dev, - "VF mbx response len(=%d) exceeds maximum(=%d)\n", + "VF mbx response len(=%u) exceeds maximum(=%u)\n", resp_len, HCLGE_MBX_MAX_RESP_DATA_SIZE); return -EINVAL; @@ -49,7 +49,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, - "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n", + "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n", code0, code1, hdev->mbx_resp.received_resp, i); return -EIO; } @@ -68,10 +68,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { dev_err(&hdev->pdev->dev, - "VF could not match resp code(code0=%d,code1=%d), %d\n", + "VF could not match resp code(code0=%u,code1=%u), %d\n", code0, code1, mbx_resp->resp_status); dev_err(&hdev->pdev->dev, - "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n", + "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n", r_code0, r_code1); return -EIO; } @@ -168,7 +168,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) { dev_warn(&hdev->pdev->dev, - "dropped invalid mailbox message, code = %d\n", + "dropped invalid mailbox message, code = %u\n", req->msg[0]); /* dropping/not processing this invalid message */ @@ -187,7 +187,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_PF_VF_RESP: if (resp->received_resp) dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%d)\n", + "VF mbx resp flag not clear(%u)\n", req->msg[1]); resp->received_resp = true; @@ -205,6 +205,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_PUSH_VLAN_INFO: + case HCLGE_MBX_PUSH_PROMISC_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -218,7 +219,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) if (atomic_read(&hdev->arq.count) >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, - "Async Q full, dropping msg(%d)\n", + "Async Q full, dropping msg(%u)\n", req->msg[1]); break; } @@ -235,7 +236,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) break; default: dev_err(&hdev->pdev->dev, - "VF received unsupported(%d) mbx msg from PF\n", + "VF received unsupported(%u) mbx msg from PF\n", req->msg[0]); break; } @@ -248,6 +249,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq->next_to_use); } +static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev, + u16 promisc_info) +{ + if (!promisc_info) + dev_info(&hdev->pdev->dev, + "Promisc mode is closed by host for being untrusted.\n"); +} + void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { enum hnae3_reset_type reset_type; @@ -313,9 +322,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_update_port_base_vlan_info(hdev, state, (u8 *)vlan_info, 8); break; + case HCLGE_MBX_PUSH_PROMISC_INFO: + hclgevf_parse_promisc_info(hdev, msg_q[1]); + break; default: dev_err(&hdev->pdev->dev, - "fetched unsupported(%d) message from arq\n", + "fetched unsupported(%u) message from arq\n", msg_q[0]); break; } |