diff options
Diffstat (limited to 'drivers/net/ethernet')
279 files changed, 13154 insertions, 4573 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 5a274b99f299..6a19b5393ed1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -15,9 +15,6 @@ if ETHERNET config MDIO tristate -config SUNGEM_PHY - tristate - source "drivers/net/ethernet/3com/Kconfig" source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig index da3bdd302502..760a9a60bc15 100644 --- a/drivers/net/ethernet/adi/Kconfig +++ b/drivers/net/ethernet/adi/Kconfig @@ -21,6 +21,7 @@ config ADIN1110 tristate "Analog Devices ADIN1110 MAC-PHY" depends on SPI && NET_SWITCHDEV select CRC8 + select PHYLIB help Say yes here to build support for Analog Devices ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY. diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index d7c274af6d4d..8b4ef5121308 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -464,8 +464,9 @@ static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg) * bitfield of ADIN1110_MDIOACC register will contain * the requested register value. */ - ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val, - (val & ADIN1110_MDIO_TRDONE), 10000, 30000); + ret = readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val, + (val & ADIN1110_MDIO_TRDONE), + 100, 30000); if (ret < 0) return ret; @@ -495,8 +496,9 @@ static int adin1110_mdio_write(struct mii_bus *bus, int phy_id, if (ret < 0) return ret; - return readx_poll_timeout(adin1110_read_mdio_acc, priv, val, - (val & ADIN1110_MDIO_TRDONE), 10000, 30000); + return readx_poll_timeout_atomic(adin1110_read_mdio_acc, priv, val, + (val & ADIN1110_MDIO_TRDONE), + 100, 30000); } /* ADIN1110 MAC-PHY contains an ADIN1100 PHY. diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 633b321d7fdd..9e9e4a03f1a8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) struct ena_com_admin_sq *sq = &admin_queue->sq; u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); - sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, - &sq->dma_addr, GFP_KERNEL); + sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL); if (!sq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) struct ena_com_admin_cq *cq = &admin_queue->cq; u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); - cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, - &cq->dma_addr, GFP_KERNEL); + cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL); if (!cq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); - aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, - &aenq->dma_addr, GFP_KERNEL); + aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, aenq_caps = 0; aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; - aenq_caps |= (sizeof(struct ena_admin_aenq_entry) - << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & - ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + aenq_caps |= + (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { - netdev_err(ena_dev->net_device, - "AENQ handlers pointer is NULL\n"); + netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n"); return -EINVAL; } @@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu } if (unlikely(!admin_queue->comp_ctx)) { - netdev_err(admin_queue->ena_dev->net_device, - "Completion context is NULL\n"); + netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n"); return NULL; } if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { - netdev_err(admin_queue->ena_dev->net_device, - "Completion context is occupied\n"); + netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n"); return NULL; } @@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu /* In case of queue FULL */ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - netdev_dbg(admin_queue->ena_dev->net_device, - "Admin queue is full.\n"); + netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } @@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) struct ena_comp_ctx *comp_ctx; u16 i; - admin_queue->comp_ctx = - devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); + admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); if (unlikely(!admin_queue->comp_ctx)) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; @@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->desc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, + dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, - GFP_KERNEL); + &io_sq->desc_addr.phys_addr, GFP_KERNEL); } if (!io_sq->desc_addr.virt_addr) { - netdev_err(ena_dev->net_device, - "Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } } @@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); - io_sq->bounce_buf_ctrl.base_buffer = - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->bounce_buf_ctrl.base_buffer) io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); if (!io_sq->bounce_buf_ctrl.base_buffer) { - netdev_err(ena_dev->net_device, - "Bounce buffer memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n"); return -ENOMEM; } @@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, prev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_cq->cdesc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, prev_node); if (!io_cq->cdesc_addr.virt_addr) { io_cq->cdesc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, + dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); } @@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, u8 comp_status) { if (unlikely(comp_status != 0)) - netdev_err(admin_queue->ena_dev->net_device, - "Admin command failed[%u]\n", comp_status); + netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n", + comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: @@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { - netdev_err(admin_queue->ena_dev->net_device, - "Command was aborted\n"); + netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n"); spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", - comp_ctx->status); + WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: @@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set LLQ configurations: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret); return ret; } @@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_default_cfg->llq_header_location; } else { netdev_err(ena_dev->net_device, - "Invalid header location control, supported: 0x%x\n", - supported_feat); + "Invalid header location control, supported: 0x%x\n", supported_feat); return -EINVAL; } @@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, netdev_err(ena_dev->net_device, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_stride_ctrl, - supported_feat, llq_info->desc_stride_ctrl); + llq_default_cfg->llq_stride_ctrl, supported_feat, + llq_info->desc_stride_ctrl); } } else { llq_info->desc_stride_ctrl = 0; @@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->desc_list_entry_size = 256; } else { netdev_err(ena_dev->net_device, - "Invalid entry_size_ctrl, supported: 0x%x\n", - supported_feat); + "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); return -EINVAL; } @@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, netdev_err(ena_dev->net_device, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_num_decs_before_header, - supported_feat, llq_info->descs_num_before_header); + llq_default_cfg->llq_num_decs_before_header, supported_feat, + llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ llq_accel_mode_get = llq_features->accel_mode.u.get; @@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, rc = ena_com_set_llq(ena_dev); if (rc) - netdev_err(ena_dev->net_device, - "Cannot set LLQ configuration: %d\n", rc); + netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc); return rc; } @@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com int ret; wait_for_completion_timeout(&comp_ctx->wait_event, - usecs_to_jiffies( - admin_queue->completion_timeout)); + usecs_to_jiffies(admin_queue->completion_timeout)); /* In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors @@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com if (comp_ctx->status == ENA_CMD_COMPLETED) { netdev_err(admin_queue->ena_dev->net_device, "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", - comp_ctx->cmd_opcode, - admin_queue->auto_polling ? "ON" : "OFF"); + comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; @@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) if (unlikely(i == timeout)) { netdev_err(ena_dev->net_device, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; goto err; } if (read_resp->reg_off != offset) { - netdev_err(ena_dev->net_device, - "Read failure: wrong offset provided\n"); + netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - netdev_err(ena_dev->net_device, - "Failed to destroy io sq error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret); return ret; } @@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_cq->cdesc_addr.virt_addr) { size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; - dma_free_coherent(ena_dev->dmadev, size, - io_cq->cdesc_addr.virt_addr, + dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr, io_cq->cdesc_addr.phys_addr); io_cq->cdesc_addr.virt_addr = NULL; @@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; - dma_free_coherent(ena_dev->dmadev, size, - io_sq->desc_addr.virt_addr, + dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; @@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { - netdev_err(ena_dev->net_device, - "Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } @@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", - feature_id); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id); return -EOPNOTSUPP; } @@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, if (unlikely(ret)) netdev_err(ena_dev->net_device, - "Failed to submit get_feature command %d error: %d\n", - feature_id, ret); + "Failed to submit get_feature command %d error: %d\n", feature_id, ret); return ret; } @@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_FUNCTION)) + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) return -EOPNOTSUPP; - rss->hash_key = - dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), - &rss->hash_key_dma_addr, GFP_KERNEL); + rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + &rss->hash_key_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_key)) return -ENOMEM; @@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; if (rss->hash_key) - dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), - rss->hash_key, rss->hash_key_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key, + rss->hash_key_dma_addr); rss->hash_key = NULL; } @@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - rss->hash_ctrl = - dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), - &rss->hash_ctrl_dma_addr, GFP_KERNEL); + rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + &rss->hash_ctrl_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_ctrl)) return -ENOMEM; @@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; if (rss->hash_ctrl) - dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), - rss->hash_ctrl, rss->hash_ctrl_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl, + rss->hash_ctrl_dma_addr); rss->hash_ctrl = NULL; } @@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); - rss->rss_ind_tbl = - dma_alloc_coherent(ena_dev->dmadev, tbl_size, - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); + rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr, + GFP_KERNEL); if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; tbl_size = (1ULL << log_size) * sizeof(u16); - rss->host_rss_ind_tbl = - devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); + rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); if (unlikely(!rss->host_rss_ind_tbl)) goto mem_err2; @@ -1197,8 +1161,7 @@ mem_err2: tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); - dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, - rss->rss_ind_tbl_dma_addr); + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr); rss->rss_ind_tbl = NULL; mem_err1: rss->tbl_log_size = 0; @@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } } @@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to create IO SQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret); return ret; } @@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (uintptr_t)cmd_completion.sq_doorbell_offset); if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar - + cmd_completion.llq_headers_offset); - io_sq->desc_addr.pbuf_dev_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + cmd_completion.llq_descriptors_offset); } - netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", - io_sq->idx, io_sq->q_depth); + netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); return ret; } @@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to create IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret); return ret; } @@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.cq_interrupt_unmask_register_offset); - if (cmd_completion.cq_head_db_register_offset) - io_cq->cq_head_db_reg = - (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + - cmd_completion.cq_head_db_register_offset); - if (cmd_completion.numa_node_register_offset) io_cq->numa_node_cfg_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); - netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", - io_cq->idx, io_cq->q_depth); + netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); return ret; } @@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Invalid queue number %d but the max is %d\n", qid, + netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); - ena_delay_exponential_backoff_us(exp++, - ena_dev->ena_min_poll_delay_us); + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - netdev_err(ena_dev->net_device, - "Failed to destroy IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret); return ret; } @@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to config AENQ ret: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret); return ret; } @@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev) netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { - netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", - width); + netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width); return -EINVAL; } @@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) ctrl_ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CONTROLLER_VERSION_OFF); - if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || - (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", - (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - dev_info(ena_dev->dmadev, - "ENA controller version: %d.%d.%d implementation version %d\n", + dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> @@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) size = ADMIN_SQ_SIZE(admin_queue->q_depth); if (sq->entries) - dma_free_coherent(ena_dev->dmadev, size, sq->entries, - sq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr); sq->entries = NULL; size = ADMIN_CQ_SIZE(admin_queue->q_depth); if (cq->entries) - dma_free_coherent(ena_dev->dmadev, size, cq->entries, - cq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr); cq->entries = NULL; size = ADMIN_AENQ_SIZE(aenq->q_depth); if (ena_dev->aenq.entries) - dma_free_coherent(ena_dev->dmadev, size, aenq->entries, - aenq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr); aenq->entries = NULL; } @@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; spin_lock_init(&mmio_read->lock); - mmio_read->read_resp = - dma_alloc_coherent(ena_dev->dmadev, - sizeof(*mmio_read->read_resp), - &mmio_read->read_resp_dma_addr, GFP_KERNEL); + mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), + &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) goto err; @@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); - dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), - mmio_read->read_resp, mmio_read->read_resp_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp, + mmio_read->read_resp_dma_addr); mmio_read->read_resp = NULL; } @@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { - netdev_err(ena_dev->net_device, - "Device isn't ready, abort com init\n"); + netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n"); return -ENODEV; } @@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Qid (%d) is bigger than max num of queues (%d)\n", + netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", ctx->qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) /* header length is limited to 8 bits */ - io_sq->tx_max_header_size = - min_t(u32, ena_dev->tx_max_header_size, SZ_256); + io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256); ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); if (ret) @@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Qid (%d) is bigger than max num of queues (%d)\n", + netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", qid, ENA_TOTAL_NUM_QUEUES); return; } @@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, if (rc) return rc; - if (get_resp.u.max_queue_ext.version != - ENA_FEATURE_MAX_QUEUE_EXT_VER) + if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) return -EINVAL; memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, @@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); if (!rc) - memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, - sizeof(get_resp.u.hw_hints)); + memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints)); else if (rc == -EOPNOTSUPP) - memset(&get_feat_ctx->hw_hints, 0x0, - sizeof(get_feat_ctx->hw_hints)); + memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); else return rc; rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); if (!rc) - memcpy(&get_feat_ctx->llq, &get_resp.u.llq, - sizeof(get_resp.u.llq)); + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq)); else if (rc == -EOPNOTSUPP) memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); else @@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) aenq_common = &aenq_e->aenq_common_desc; /* Go over all the events */ - while ((READ_ONCE(aenq_common->flags) & - ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { + while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { /* Make sure the phase bit (ownership) is as expected before * reading the rest of the descriptor. */ @@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); - netdev_dbg(ena_dev->net_device, - "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", + netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrome, timestamp); /* Handle specific event*/ @@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - writel_relaxed((u32)aenq->head, - ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, @@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); - if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || - (cap == ENA_MMIO_READ_TIMEOUT))) { + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); return -ETIME; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { - netdev_err(ena_dev->net_device, - "Device isn't ready, can't reset device\n"); + netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n"); return -EINVAL; } @@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { - netdev_err(ena_dev->net_device, - "Reset indication didn't turn on\n"); + netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n"); return rc; } @@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { - netdev_err(ena_dev->net_device, - "Reset indication didn't turn off\n"); + netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n"); return rc; } @@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to get stats. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); return ret; } @@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, int ret; if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { - netdev_err(ena_dev->net_device, - "Capability %d isn't supported\n", + netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS); return -EOPNOTSUPP; } @@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", - ENA_ADMIN_MTU); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU); return -EOPNOTSUPP; } @@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set mtu %d. error: %d\n", mtu, ret); + netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } @@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to get offload capabilities %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret); return ret; } @@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) struct ena_admin_get_feat_resp get_resp; int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_FUNCTION)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_FUNCTION); return -EOPNOTSUPP; @@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { - netdev_err(ena_dev->net_device, - "Func hash %d isn't supported by device, abort\n", + netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n", rss->hash_func); return -EOPNOTSUPP; } @@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to set hash function %d. error: %d\n", + netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n", rss->hash_func, ret); return -EINVAL; } @@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { - netdev_err(ena_dev->net_device, - "Flow hash function %d isn't supported\n", func); + netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } if ((func == ENA_ADMIN_TOEPLITZ) && key) { if (key_len != sizeof(hash_key->key)) { netdev_err(ena_dev->net_device, - "key len (%u) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); + "key len (%u) doesn't equal the supported size (%zu)\n", key_len, + sizeof(hash_key->key)); return -EINVAL; } memcpy(hash_key->key, key, key_len); @@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) struct ena_admin_set_feat_resp resp; int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_INPUT)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); return -EOPNOTSUPP; @@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set hash input. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret); return ret; } @@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { - netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", - proto); + netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto); return -EINVAL; } @@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) struct ena_admin_set_feat_resp resp; int ret; - if (!ena_com_check_supported_feature_id( - ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return -EOPNOTSUPP; @@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set indirect table. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret); return ret; } @@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; - host_attr->host_info = - dma_alloc_coherent(ena_dev->dmadev, SZ_4K, - &host_attr->host_info_dma_addr, GFP_KERNEL); + host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K, + &host_attr->host_info_dma_addr, GFP_KERNEL); if (unlikely(!host_attr->host_info)) return -ENOMEM; @@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) if (host_attr->debug_area_virt_addr) { dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, - host_attr->debug_area_virt_addr, - host_attr->debug_area_dma_addr); + host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr); host_attr->debug_area_virt_addr = NULL; } } @@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set host attributes: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret); return ret; } @@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en u32 *intr_moder_interval) { if (!intr_delay_resolution) { - netdev_err(ena_dev->net_device, - "Illegal interrupt delay granularity value\n"); + netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n"); return -EFAULT; } @@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EOPNOTSUPP) { - netdev_dbg(ena_dev->net_device, - "Feature %d isn't supported\n", + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { netdev_err(ena_dev->net_device, - "Failed to get interrupt moderation admin cmd. rc: %d\n", - rc); + "Failed to get interrupt moderation admin cmd. rc: %d\n", rc); } /* no moderation supported, disable adaptive support */ @@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { - netdev_err(ena_dev->net_device, - "The size of the LLQ entry is smaller than needed\n"); + netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 3c5081d9d25d..fea57eb8e58b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -109,16 +109,13 @@ struct ena_com_io_cq { /* Interrupt unmask register */ u32 __iomem *unmask_reg; - /* The completion queue head doorbell register */ - u32 __iomem *cq_head_db_reg; - /* numa configuration register (for TPH) */ u32 __iomem *numa_node_cfg_reg; /* The value to write to the above register to unmask * the interrupt of this queue */ - u32 msix_vector; + u32 msix_vector ____cacheline_aligned; enum queue_direction direction; @@ -134,7 +131,6 @@ struct ena_com_io_cq { /* Device queue index */ u16 idx; u16 head; - u16 last_head_update; u8 phase; u8 cdesc_entry_size_in_bytes; @@ -158,7 +154,6 @@ struct ena_com_io_sq { struct ena_com_io_desc_addr desc_addr; u32 __iomem *db_addr; - u8 __iomem *header_addr; enum queue_direction direction; enum ena_admin_placement_policy_type mem_queue_type; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index f9f886289b97..933e619b3a31 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr + (head_masked * io_cq->cdesc_entry_size_in_bytes)); - desc_phase = (READ_ONCE(cdesc->status) & - ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> + desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; if (desc_phase != expected_phase) @@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, io_sq->entries_in_tx_burst_left--; netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Decreasing entries_in_tx_burst_left of queue %d to %d\n", - io_sq->qid, io_sq->entries_in_tx_burst_left); + "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid, + io_sq->entries_in_tx_burst_left); } /* Make sure everything was written into the bounce buffer before @@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, wmb(); /* The line is completed. Copy it to dev */ - __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, - bounce_buffer, (llq_info->desc_list_entry_size) / 8); + __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer, + (llq_info->desc_list_entry_size) / 8); io_sq->tail++; @@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, header_offset = llq_info->descs_num_before_header * io_sq->desc_entry_size; - if (unlikely((header_offset + header_len) > - llq_info->desc_list_entry_size)) { + if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Trying to write header larger than llq entry can accommodate\n"); return -EFAULT; } if (unlikely(!bounce_buffer)) { - netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); return -EFAULT; } @@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) bounce_buffer = pkt_ctrl->curr_bounce_buf; if (unlikely(!bounce_buffer)) { - netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Bounce buffer is NULL\n"); + netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n"); return NULL; } @@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ena_com_cq_inc_head(io_cq); count++; - last = (READ_ONCE(cdesc->status) & - ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> + last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; } while (!last); @@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", - ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, - ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, - ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); + ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err, + ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status); } /*****************************************************************************/ @@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (unlikely(header_len > io_sq->tx_max_header_size)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Header size is too large %d max header: %d\n", - header_len, io_sq->tx_max_header_size); + "Header size is too large %d max header: %d\n", header_len, + io_sq->tx_max_header_size); return -EINVAL; } - if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && - !buffer_to_push)) { + if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) { netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Push header wasn't provided in LLQ mode\n"); return -EINVAL; @@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, } netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, - "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, - nb_hw_desc); + "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc); if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, - "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, - ena_rx_ctx->max_bufs); + "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs); return -ENOSPC; } @@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, io_sq->next_to_comp += nb_hw_desc; netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, - "[%s][QID#%d] Updating SQ head to: %d\n", __func__, - io_sq->qid, io_sq->next_to_comp); + "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid, + io_sq->next_to_comp); /* Get rx flags from the last pkt */ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); @@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->req_id = req_id; netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", - __func__, io_sq->qid, req_id); + "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid, + req_id); desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_hi = diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 372b259279ec..72b019758caa 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -8,8 +8,6 @@ #include "ena_com.h" -/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */ -#define ENA_COMP_HEAD_THRESH 4 /* we allow 2 DMA descriptors per LLQ entry */ #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc)) #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) @@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, } netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Queue: %d num_descs: %d num_entries_needed: %d\n", - io_sq->qid, num_descs, num_entries_needed); + "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs, + num_entries_needed); return num_entries_needed > io_sq->entries_in_tx_burst_left; } @@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) u16 tail = io_sq->tail; netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Write submission queue doorbell for queue: %d tail: %d\n", - io_sq->qid, tail); + "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail); writel(tail, io_sq->db_addr); if (is_llq_max_tx_burst_exists(io_sq)) { netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, - "Reset available entries in tx burst for queue %d to %d\n", - io_sq->qid, max_entries_in_tx_burst); + "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid, + max_entries_in_tx_burst); io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; } return 0; } -static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) -{ - u16 unreported_comp, head; - bool need_update; - - if (unlikely(io_cq->cq_head_db_reg)) { - head = io_cq->head; - unreported_comp = head - io_cq->last_head_update; - need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH); - - if (unlikely(need_update)) { - netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, - "Write completion queue doorbell for queue %d: head: %d\n", - io_cq->qid, head); - writel(head, io_cq->cq_head_db_reg); - io_cq->last_head_update = head; - } - } - - return 0; -} - static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, u8 numa_node) { @@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, *req_id = READ_ONCE(cdesc->req_id); if (unlikely(*req_id >= io_cq->q_depth)) { - netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, - "Invalid req id %d\n", cdesc->req_id); + netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n", + cdesc->req_id); return -EINVAL; } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 1c0a7828d397..09e7da1a69c9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -32,7 +32,7 @@ MODULE_LICENSE("GPL"); #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus()) #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \ - NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) + NETIF_MSG_IFDOWN | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) static struct ena_aenq_handlers aenq_handlers; @@ -47,19 +47,44 @@ static int ena_restore_device(struct ena_adapter *adapter); static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue) { + enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_OS_NETDEV_WD; struct ena_adapter *adapter = netdev_priv(dev); + unsigned int time_since_last_napi, threshold; + struct ena_ring *tx_ring; + int napi_scheduled; + + if (txqueue >= adapter->num_io_queues) { + netdev_err(dev, "TX timeout on invalid queue %u\n", txqueue); + goto schedule_reset; + } + + threshold = jiffies_to_usecs(dev->watchdog_timeo); + tx_ring = &adapter->tx_ring[txqueue]; + time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); + napi_scheduled = !!(tx_ring->napi->state & NAPIF_STATE_SCHED); + + netdev_err(dev, + "TX q %d is paused for too long (threshold %u). Time since last napi %u usec. napi scheduled: %d\n", + txqueue, + threshold, + time_since_last_napi, + napi_scheduled); + + if (threshold < time_since_last_napi && napi_scheduled) { + netdev_err(dev, + "napi handler hasn't been called for a long time but is scheduled\n"); + reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; + } +schedule_reset: /* Change the state of the device to trigger reset * Check that we are not in the middle or a trigger already */ - if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) return; - ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD); + ena_reset_device(adapter, reset_reason); ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); - - netif_err(adapter, tx_err, dev, "Transmit time out\n"); } static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) @@ -116,11 +141,9 @@ int ena_xmit_common(struct ena_adapter *adapter, if (unlikely(rc)) { netif_err(adapter, tx_queued, adapter->netdev, "Failed to prepare tx bufs\n"); - ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, - &ring->syncp); + ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp); if (rc != -ENOMEM) - ena_reset_device(adapter, - ENA_REGS_RESET_DRIVER_INVALID_STATE); + ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE); return rc; } @@ -485,8 +508,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring, */ page = dev_alloc_page(); if (!page) { - ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, - &rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp); return ERR_PTR(-ENOSPC); } @@ -523,7 +545,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring, /* We handle DMA here */ page = ena_alloc_map_page(rx_ring, &dma); - if (unlikely(IS_ERR(page))) + if (IS_ERR(page)) return PTR_ERR(page); netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, @@ -545,8 +567,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring, struct ena_rx_buffer *rx_info, unsigned long attrs) { - dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, - DMA_BIDIRECTIONAL, attrs); + dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL, + attrs); } static void ena_free_rx_page(struct ena_ring *rx_ring, @@ -819,8 +841,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) &req_id); if (rc) { if (unlikely(rc == -EINVAL)) - handle_invalid_req_id(tx_ring, req_id, NULL, - false); + handle_invalid_req_id(tx_ring, req_id, NULL, false); break; } @@ -856,7 +877,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) tx_ring->next_to_clean = next_to_clean; ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); - ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); netdev_tx_completed_queue(txq, tx_pkts, tx_bytes); @@ -1046,8 +1066,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, DMA_FROM_DEVICE); if (!reuse_rx_buf_page) - ena_unmap_rx_buff_attrs(rx_ring, rx_info, - DMA_ATTR_SKIP_CPU_SYNC); + ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, page_offset + buf_offset, len, buf_len); @@ -1303,10 +1322,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ENA_RX_REFILL_THRESH_PACKET); /* Optimization, try to batch new rx buffers */ - if (refill_required > refill_threshold) { - ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); + if (refill_required > refill_threshold) ena_refill_rx_bufs(rx_ring, refill_required); - } if (xdp_flags & ENA_XDP_REDIRECT) xdp_do_flush(); @@ -1320,8 +1337,7 @@ error: adapter = netdev_priv(rx_ring->netdev); if (rc == -ENOSPC) { - ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, - &rx_ring->syncp); + ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp); ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS); } else { ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, @@ -1811,8 +1827,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) if (!ena_dev->rss.tbl_log_size) { rc = ena_rss_init_default(adapter); if (rc && (rc != -EOPNOTSUPP)) { - netif_err(adapter, ifup, adapter->netdev, - "Failed to init RSS rc: %d\n", rc); + netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc); return rc; } } @@ -2134,6 +2149,12 @@ int ena_up(struct ena_adapter *adapter) */ ena_init_napi_in_range(adapter, 0, io_queue_count); + /* Enabling DIM needs to happen before enabling IRQs since DIM + * is run from napi routine + */ + if (ena_com_interrupt_moderation_supported(adapter->ena_dev)) + ena_com_enable_adaptive_moderation(adapter->ena_dev); + rc = ena_request_io_irq(adapter); if (rc) goto err_req_irq; @@ -2184,7 +2205,7 @@ void ena_down(struct ena_adapter *adapter) { int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; - netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); + netif_dbg(adapter, ifdown, adapter->netdev, "%s\n", __func__); clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); @@ -2197,8 +2218,6 @@ void ena_down(struct ena_adapter *adapter) /* After this point the napi handler won't enable the tx queue */ ena_napi_disable_in_range(adapter, 0, io_queue_count); - /* After destroy the queue there won't be any new interrupts */ - if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { int rc; @@ -2588,8 +2607,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(rc)) goto error_drop_packet; - skb_tx_timestamp(skb); - next_to_use = tx_ring->next_to_use; req_id = tx_ring->free_ids[next_to_use]; tx_info = &tx_ring->tx_buffer_info[req_id]; @@ -2653,6 +2670,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) } } + skb_tx_timestamp(skb); + if (netif_xmit_stopped(txq) || !netdev_xmit_more()) /* trigger the dma engine. ena_ring_tx_doorbell() * calls a memory barrier inside it. @@ -2670,22 +2689,6 @@ error_drop_packet: return NETDEV_TX_OK; } -static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev) -{ - u16 qid; - /* we suspect that this is good for in--kernel network services that - * want to loop incoming skb rx to tx in normal user generated traffic, - * most probably we will not get to this - */ - if (skb_rx_queue_recorded(skb)) - qid = skb_get_rx_queue(skb); - else - qid = netdev_pick_tx(dev, skb, NULL); - - return qid; -} - static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct device *dev = &pdev->dev; @@ -2764,8 +2767,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(adapter->ena_dev); if (rc) { if (rc == -EOPNOTSUPP) - netif_warn(adapter, drv, adapter->netdev, - "Cannot set host attributes\n"); + netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n"); else netif_err(adapter, drv, adapter->netdev, "Cannot set host attributes\n"); @@ -2863,18 +2865,16 @@ static const struct net_device_ops ena_netdev_ops = { .ndo_open = ena_open, .ndo_stop = ena_close, .ndo_start_xmit = ena_start_xmit, - .ndo_select_queue = ena_select_queue, .ndo_get_stats64 = ena_get_stats64, .ndo_tx_timeout = ena_tx_timeout, .ndo_change_mtu = ena_change_mtu, - .ndo_set_mac_address = NULL, .ndo_validate_addr = eth_validate_addr, .ndo_bpf = ena_xdp, .ndo_xdp_xmit = ena_xdp_xmit, }; -static void ena_calc_io_queue_size(struct ena_adapter *adapter, - struct ena_com_dev_get_features_ctx *get_feat_ctx) +static int ena_calc_io_queue_size(struct ena_adapter *adapter, + struct ena_com_dev_get_features_ctx *get_feat_ctx) { struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -2933,6 +2933,18 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter, max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size); max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size); + if (max_tx_queue_size < ENA_MIN_RING_SIZE) { + netdev_err(adapter->netdev, "Device max TX queue size: %d < minimum: %d\n", + max_tx_queue_size, ENA_MIN_RING_SIZE); + return -EINVAL; + } + + if (max_rx_queue_size < ENA_MIN_RING_SIZE) { + netdev_err(adapter->netdev, "Device max RX queue size: %d < minimum: %d\n", + max_rx_queue_size, ENA_MIN_RING_SIZE); + return -EINVAL; + } + /* When forcing large headers, we multiply the entry size by 2, and therefore divide * the queue size by 2, leaving the amount of memory used by the queues unchanged. */ @@ -2963,6 +2975,8 @@ static void ena_calc_io_queue_size(struct ena_adapter *adapter, adapter->max_rx_ring_size = max_rx_queue_size; adapter->requested_tx_ring_size = tx_queue_size; adapter->requested_rx_ring_size = rx_queue_size; + + return 0; } static int ena_device_validate_params(struct ena_adapter *adapter, @@ -3070,6 +3084,7 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, bool *wd_state) { struct ena_com_dev *ena_dev = adapter->ena_dev; + struct net_device *netdev = adapter->netdev; struct ena_llq_configurations llq_config; struct device *dev = &pdev->dev; bool readless_supported; @@ -3159,15 +3174,19 @@ static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev, rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, &llq_config); if (rc) { - dev_err(dev, "ENA device init failed\n"); + netdev_err(netdev, "Cannot set queues placement policy rc= %d\n", rc); goto err_admin_init; } - ena_calc_io_queue_size(adapter, get_feat_ctx); + rc = ena_calc_io_queue_size(adapter, get_feat_ctx); + if (unlikely(rc)) + goto err_admin_init; return 0; err_admin_init: + ena_com_abort_admin_commands(ena_dev); + ena_com_wait_for_abort_completion(ena_dev); ena_com_delete_host_info(ena_dev); ena_com_admin_destroy(ena_dev); err_mmio_read_less: @@ -3226,7 +3245,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) if (!graceful) ena_com_set_admin_running_state(ena_dev, false); - if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + if (dev_up) ena_down(adapter); /* Stop the device from sending AENQ events (in case reset flag is set @@ -3372,14 +3391,18 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, struct ena_ring *tx_ring) { struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); + enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; unsigned int time_since_last_napi; unsigned int missing_tx_comp_to; bool is_tx_comp_time_expired; struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; + int napi_scheduled; u32 missed_tx = 0; int i, rc = 0; + missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); + for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; last_jiffies = tx_buf->last_jiffies; @@ -3406,25 +3429,45 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, adapter->missing_tx_completion_to); if (unlikely(is_tx_comp_time_expired)) { - if (!tx_buf->print_once) { - time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); - missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); - netif_notice(adapter, tx_err, adapter->netdev, - "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n", - tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); + time_since_last_napi = + jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); + napi_scheduled = !!(ena_napi->napi.state & NAPIF_STATE_SCHED); + + if (missing_tx_comp_to < time_since_last_napi && napi_scheduled) { + /* We suspect napi isn't called because the + * bottom half is not run. Require a bigger + * timeout for these cases + */ + if (!time_is_before_jiffies(last_jiffies + + 2 * adapter->missing_tx_completion_to)) + continue; + + reset_reason = ENA_REGS_RESET_SUSPECTED_POLL_STARVATION; } - tx_buf->print_once = 1; missed_tx++; + + if (tx_buf->print_once) + continue; + + netif_notice(adapter, tx_err, adapter->netdev, + "TX hasn't completed, qid %d, index %d. %u usecs from last napi execution, napi scheduled: %d\n", + tx_ring->qid, i, time_since_last_napi, napi_scheduled); + + tx_buf->print_once = 1; } } if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { netif_err(adapter, tx_err, adapter->netdev, - "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", + "Lost TX completions are above the threshold (%d > %d). Completion transmission timeout: %u.\n", missed_tx, - adapter->missing_tx_completion_threshold); - ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL); + adapter->missing_tx_completion_threshold, + missing_tx_comp_to); + netif_err(adapter, tx_err, adapter->netdev, + "Resetting the device\n"); + + ena_reset_device(adapter, reset_reason); rc = -EIO; } @@ -3762,8 +3805,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter) } } - rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, - ENA_HASH_KEY_SIZE, 0xFFFFFFFF); + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE, + 0xFFFFFFFF); if (unlikely(rc && (rc != -EOPNOTSUPP))) { dev_err(dev, "Cannot fill hash function\n"); goto err_fill_indir; @@ -4040,8 +4083,8 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) free_irq_cpu_rmap(netdev->rx_cpu_rmap); netdev->rx_cpu_rmap = NULL; } -#endif /* CONFIG_RFS_ACCEL */ +#endif /* CONFIG_RFS_ACCEL */ /* Make sure timer and reset routine won't be called after * freeing device resources. */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 1e007a41a525..2c3d6a77ea79 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types { ENA_REGS_RESET_USER_TRIGGER = 12, ENA_REGS_RESET_GENERIC = 13, ENA_REGS_RESET_MISS_INTERRUPT = 14, + ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15, }; /* ena_registers offsets */ diff --git a/drivers/net/ethernet/amazon/ena/ena_xdp.c b/drivers/net/ethernet/amazon/ena/ena_xdp.c index fc1c4ef73ba3..337c435d3ce9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_xdp.c +++ b/drivers/net/ethernet/amazon/ena/ena_xdp.c @@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget) tx_ring->next_to_clean = next_to_clean; ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); - ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, "tx_poll: q %d done. total pkts: %d\n", diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c index 5beadabc2136..c83a0a80d533 100644 --- a/drivers/net/ethernet/amd/pds_core/adminq.c +++ b/drivers/net/ethernet/amd/pds_core/adminq.c @@ -63,6 +63,15 @@ static int pdsc_process_notifyq(struct pdsc_qcq *qcq) return nq_work; } +static bool pdsc_adminq_inc_if_up(struct pdsc *pdsc) +{ + if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER) || + pdsc->state & BIT_ULL(PDSC_S_FW_DEAD)) + return false; + + return refcount_inc_not_zero(&pdsc->adminq_refcnt); +} + void pdsc_process_adminq(struct pdsc_qcq *qcq) { union pds_core_adminq_comp *comp; @@ -73,11 +82,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) unsigned long irqflags; int nq_work = 0; int aq_work = 0; - int credits; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return; } @@ -119,11 +127,10 @@ void pdsc_process_adminq(struct pdsc_qcq *qcq) credits: /* Return the interrupt credits, one for each completion */ - credits = nq_work + aq_work; - if (credits) - pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx], - credits, - PDS_CORE_INTR_CRED_REARM); + pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx], + nq_work + aq_work, + PDS_CORE_INTR_CRED_REARM); + refcount_dec(&pdsc->adminq_refcnt); } void pdsc_work_thread(struct work_struct *work) @@ -135,18 +142,19 @@ void pdsc_work_thread(struct work_struct *work) irqreturn_t pdsc_adminq_isr(int irq, void *data) { - struct pdsc_qcq *qcq = data; - struct pdsc *pdsc = qcq->pdsc; + struct pdsc *pdsc = data; + struct pdsc_qcq *qcq; - /* Don't process AdminQ when shutting down */ - if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) { - dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n", + /* Don't process AdminQ when it's not up */ + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_err(pdsc->dev, "%s: called while adminq is unavailable\n", __func__); return IRQ_HANDLED; } + qcq = &pdsc->adminqcq; queue_work(pdsc->wq, &qcq->work); - pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR); + refcount_dec(&pdsc->adminq_refcnt); return IRQ_HANDLED; } @@ -179,10 +187,16 @@ static int __pdsc_adminq_post(struct pdsc *pdsc, /* Check that the FW is running */ if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_info(pdsc->dev, "%s: post failed - BARs not setup\n", + __func__); + } ret = -ENXIO; goto err_out_unlock; @@ -230,6 +244,12 @@ int pdsc_adminq_post(struct pdsc *pdsc, int err = 0; int index; + if (!pdsc_adminq_inc_if_up(pdsc)) { + dev_dbg(pdsc->dev, "%s: preventing adminq cmd %u\n", + __func__, cmd->opcode); + return -ENXIO; + } + wc.qcq = &pdsc->adminqcq; index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc); if (index < 0) { @@ -248,10 +268,16 @@ int pdsc_adminq_post(struct pdsc *pdsc, break; if (!pdsc_is_fw_running(pdsc)) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); - - dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", - __func__, fw_status); + if (pdsc->info_regs) { + u8 fw_status = + ioread8(&pdsc->info_regs->fw_status); + + dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n", + __func__, fw_status); + } else { + dev_dbg(pdsc->dev, "%s: post wait failed - BARs not setup\n", + __func__); + } err = -ENXIO; break; } @@ -285,6 +311,8 @@ err_out: queue_work(pdsc->wq, &pdsc->health_work); } + refcount_dec(&pdsc->adminq_refcnt); + return err; } EXPORT_SYMBOL_GPL(pdsc_adminq_post); diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c index 11c23a7f3172..a3c79848a69a 100644 --- a/drivers/net/ethernet/amd/pds_core/auxbus.c +++ b/drivers/net/ethernet/amd/pds_core/auxbus.c @@ -184,6 +184,9 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) struct pds_auxiliary_dev *padev; int err = 0; + if (!cf) + return -ENODEV; + mutex_lock(&pf->config_lock); padev = pf->vfs[cf->vf_id].padev; @@ -202,14 +205,27 @@ int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf) int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf) { struct pds_auxiliary_dev *padev; - enum pds_core_vif_types vt; char devname[PDS_DEVNAME_LEN]; + enum pds_core_vif_types vt; + unsigned long mask; u16 vt_support; int client_id; int err = 0; + if (!cf) + return -ENODEV; + mutex_lock(&pf->config_lock); + mask = BIT_ULL(PDSC_S_FW_DEAD) | + BIT_ULL(PDSC_S_STOPPING_DRIVER); + if (cf->state & mask) { + dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n", + __func__, cf->state); + err = -ENXIO; + goto out_unlock; + } + /* We only support vDPA so far, so it is the only one to * be verified that it is available in the Core device and * enabled in the devlink param. In the future this might diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c index 0d2091e9eb28..9662ee72814c 100644 --- a/drivers/net/ethernet/amd/pds_core/core.c +++ b/drivers/net/ethernet/amd/pds_core/core.c @@ -125,10 +125,11 @@ static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq) snprintf(name, sizeof(name), "%s-%d-%s", PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name); - index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq); + index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc); if (index < 0) return index; qcq->intx = index; + qcq->cq.bound_intr = &pdsc->intr_info[index]; return 0; } @@ -222,7 +223,6 @@ int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index, goto err_out_free_irq; } - qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx]; qcq->cq.num_descs = num_descs; qcq->cq.desc_size = cq_desc_size; qcq->cq.tail_idx = 0; @@ -300,6 +300,17 @@ err_out: return err; } +static void pdsc_core_uninit(struct pdsc *pdsc) +{ + pdsc_qcq_free(pdsc, &pdsc->notifyqcq); + pdsc_qcq_free(pdsc, &pdsc->adminqcq); + + if (pdsc->kern_dbpage) { + iounmap(pdsc->kern_dbpage); + pdsc->kern_dbpage = NULL; + } +} + static int pdsc_core_init(struct pdsc *pdsc) { union pds_core_dev_comp comp = {}; @@ -310,9 +321,32 @@ static int pdsc_core_init(struct pdsc *pdsc) struct pds_core_dev_init_data_in cidi; u32 dbid_count; u32 dbpage_num; + int numdescs; size_t sz; int err; + /* Scale the descriptor ring length based on number of CPUs and VFs */ + numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus()); + numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev); + numdescs = roundup_pow_of_two(numdescs); + err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq", + PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR, + numdescs, + sizeof(union pds_core_adminq_cmd), + sizeof(union pds_core_adminq_comp), + 0, &pdsc->adminqcq); + if (err) + return err; + + err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq", + PDS_CORE_QCQ_F_NOTIFYQ, + PDSC_NOTIFYQ_LENGTH, + sizeof(struct pds_core_notifyq_cmd), + sizeof(union pds_core_notifyq_comp), + 0, &pdsc->notifyqcq); + if (err) + goto err_out_uninit; + cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa); cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa); cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa); @@ -336,7 +370,7 @@ static int pdsc_core_init(struct pdsc *pdsc) if (err) { dev_err(pdsc->dev, "Device init command failed: %pe\n", ERR_PTR(err)); - return err; + goto err_out_uninit; } pdsc->hw_index = le32_to_cpu(cido.core_hw_index); @@ -346,7 +380,8 @@ static int pdsc_core_init(struct pdsc *pdsc) pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num); if (!pdsc->kern_dbpage) { dev_err(pdsc->dev, "Cannot map dbpage, aborting\n"); - return -ENOMEM; + err = -ENOMEM; + goto err_out_uninit; } pdsc->adminqcq.q.hw_type = cido.adminq_hw_type; @@ -359,6 +394,10 @@ static int pdsc_core_init(struct pdsc *pdsc) pdsc->last_eid = 0; + return 0; + +err_out_uninit: + pdsc_core_uninit(pdsc); return err; } @@ -401,41 +440,12 @@ static int pdsc_viftypes_init(struct pdsc *pdsc) int pdsc_setup(struct pdsc *pdsc, bool init) { - int numdescs; int err; - if (init) - err = pdsc_dev_init(pdsc); - else - err = pdsc_dev_reinit(pdsc); + err = pdsc_dev_init(pdsc); if (err) return err; - /* Scale the descriptor ring length based on number of CPUs and VFs */ - numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus()); - numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev); - numdescs = roundup_pow_of_two(numdescs); - err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq", - PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR, - numdescs, - sizeof(union pds_core_adminq_cmd), - sizeof(union pds_core_adminq_comp), - 0, &pdsc->adminqcq); - if (err) - goto err_out_teardown; - - err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq", - PDS_CORE_QCQ_F_NOTIFYQ, - PDSC_NOTIFYQ_LENGTH, - sizeof(struct pds_core_notifyq_cmd), - sizeof(union pds_core_notifyq_comp), - 0, &pdsc->notifyqcq); - if (err) - goto err_out_teardown; - - /* NotifyQ rides on the AdminQ interrupt */ - pdsc->notifyqcq.intx = pdsc->adminqcq.intx; - /* Set up the Core with the AdminQ and NotifyQ info */ err = pdsc_core_init(pdsc); if (err) @@ -450,6 +460,7 @@ int pdsc_setup(struct pdsc *pdsc, bool init) pdsc_debugfs_add_viftype(pdsc); } + refcount_set(&pdsc->adminq_refcnt, 1); clear_bit(PDSC_S_FW_DEAD, &pdsc->state); return 0; @@ -460,32 +471,19 @@ err_out_teardown: void pdsc_teardown(struct pdsc *pdsc, bool removing) { - int i; - if (!pdsc->pdev->is_virtfn) pdsc_devcmd_reset(pdsc); - pdsc_qcq_free(pdsc, &pdsc->notifyqcq); - pdsc_qcq_free(pdsc, &pdsc->adminqcq); + if (pdsc->adminqcq.work.func) + cancel_work_sync(&pdsc->adminqcq.work); + + pdsc_core_uninit(pdsc); if (removing) { kfree(pdsc->viftype_status); pdsc->viftype_status = NULL; } - if (pdsc->intr_info) { - for (i = 0; i < pdsc->nintrs; i++) - pdsc_intr_free(pdsc, i); - - if (removing) { - kfree(pdsc->intr_info); - pdsc->intr_info = NULL; - } - } - - if (pdsc->kern_dbpage) { - iounmap(pdsc->kern_dbpage); - pdsc->kern_dbpage = NULL; - } + pdsc_dev_uninit(pdsc); set_bit(PDSC_S_FW_DEAD, &pdsc->state); } @@ -512,6 +510,24 @@ void pdsc_stop(struct pdsc *pdsc) PDS_CORE_INTR_MASK_SET); } +static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc) +{ + /* The driver initializes the adminq_refcnt to 1 when the adminq is + * allocated and ready for use. Other users/requesters will increment + * the refcnt while in use. If the refcnt is down to 1 then the adminq + * is not in use and the refcnt can be cleared and adminq freed. Before + * calling this function the driver will set PDSC_S_FW_DEAD, which + * prevent subsequent attempts to use the adminq and increment the + * refcnt to fail. This guarantees that this function will eventually + * exit. + */ + while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) { + dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n", + __func__); + cpu_relax(); + } +} + void pdsc_fw_down(struct pdsc *pdsc) { union pds_core_notifyq_comp reset_event = { @@ -527,6 +543,8 @@ void pdsc_fw_down(struct pdsc *pdsc) if (pdsc->pdev->is_virtfn) return; + pdsc_adminq_wait_and_dec_once_unused(pdsc); + /* Notify clients of fw_down */ if (pdsc->fw_reporter) devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc); @@ -577,14 +595,19 @@ err_out: static void pdsc_check_pci_health(struct pdsc *pdsc) { - u8 fw_status = ioread8(&pdsc->info_regs->fw_status); + u8 fw_status; + + /* some sort of teardown already in progress */ + if (!pdsc->info_regs) + return; + + fw_status = ioread8(&pdsc->info_regs->fw_status); /* is PCI broken? */ if (fw_status != PDS_RC_BAD_PCI) return; - pdsc_reset_prepare(pdsc->pdev); - pdsc_reset_done(pdsc->pdev); + pci_reset_function(pdsc->pdev); } void pdsc_health_thread(struct work_struct *work) diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h index e35d3e7006bf..92d7657dd614 100644 --- a/drivers/net/ethernet/amd/pds_core/core.h +++ b/drivers/net/ethernet/amd/pds_core/core.h @@ -184,6 +184,7 @@ struct pdsc { struct mutex devcmd_lock; /* lock for dev_cmd operations */ struct mutex config_lock; /* lock for configuration operations */ spinlock_t adminq_lock; /* lock for adminq operations */ + refcount_t adminq_refcnt; struct pds_core_dev_info_regs __iomem *info_regs; struct pds_core_dev_cmd_regs __iomem *cmd_regs; struct pds_core_intr __iomem *intr_ctrl; @@ -280,11 +281,8 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, union pds_core_dev_comp *comp, int max_seconds); int pdsc_devcmd_init(struct pdsc *pdsc); int pdsc_devcmd_reset(struct pdsc *pdsc); -int pdsc_dev_reinit(struct pdsc *pdsc); int pdsc_dev_init(struct pdsc *pdsc); - -void pdsc_reset_prepare(struct pci_dev *pdev); -void pdsc_reset_done(struct pci_dev *pdev); +void pdsc_dev_uninit(struct pdsc *pdsc); int pdsc_intr_alloc(struct pdsc *pdsc, char *name, irq_handler_t handler, void *data); diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c index 8ec392299b7d..6bdd02b7aa6d 100644 --- a/drivers/net/ethernet/amd/pds_core/debugfs.c +++ b/drivers/net/ethernet/amd/pds_core/debugfs.c @@ -32,8 +32,8 @@ void pdsc_debugfs_del_dev(struct pdsc *pdsc) static int identity_show(struct seq_file *seq, void *v) { - struct pdsc *pdsc = seq->private; struct pds_core_dev_identity *ident; + struct pdsc *pdsc = seq->private; int vt; ident = &pdsc->dev_ident; @@ -64,6 +64,10 @@ DEFINE_SHOW_ATTRIBUTE(identity); void pdsc_debugfs_add_ident(struct pdsc *pdsc) { + /* This file will already exist in the reset flow */ + if (debugfs_lookup("identity", pdsc->dentry)) + return; + debugfs_create_file("identity", 0400, pdsc->dentry, pdsc, &identity_fops); } @@ -102,10 +106,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = { void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) { - struct dentry *qcq_dentry, *q_dentry, *cq_dentry; - struct dentry *intr_dentry; + struct dentry *qcq_dentry, *q_dentry, *cq_dentry, *intr_dentry; struct debugfs_regset32 *intr_ctrl_regset; - struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx]; struct pdsc_queue *q = &qcq->q; struct pdsc_cq *cq = &qcq->cq; @@ -143,6 +145,8 @@ void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq) debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx); if (qcq->flags & PDS_CORE_QCQ_F_INTR) { + struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx]; + intr_dentry = debugfs_create_dir("intr", qcq->dentry); if (IS_ERR_OR_NULL(intr_dentry)) return; diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c index 31940b857e0e..e494e1298dc9 100644 --- a/drivers/net/ethernet/amd/pds_core/dev.c +++ b/drivers/net/ethernet/amd/pds_core/dev.c @@ -57,6 +57,9 @@ int pdsc_err_to_errno(enum pds_core_status_code code) bool pdsc_is_fw_running(struct pdsc *pdsc) { + if (!pdsc->info_regs) + return false; + pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status); pdsc->last_fw_time = jiffies; pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat); @@ -182,13 +185,17 @@ int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd, { int err; + if (!pdsc->cmd_regs) + return -ENXIO; + memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd)); pdsc_devcmd_dbell(pdsc); err = pdsc_devcmd_wait(pdsc, cmd->opcode, max_seconds); - memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); if ((err == -ENXIO || err == -ETIMEDOUT) && pdsc->wq) queue_work(pdsc->wq, &pdsc->health_work); + else + memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp)); return err; } @@ -309,11 +316,20 @@ static int pdsc_identify(struct pdsc *pdsc) return 0; } -int pdsc_dev_reinit(struct pdsc *pdsc) +void pdsc_dev_uninit(struct pdsc *pdsc) { - pdsc_init_devinfo(pdsc); + if (pdsc->intr_info) { + int i; + + for (i = 0; i < pdsc->nintrs; i++) + pdsc_intr_free(pdsc, i); - return pdsc_identify(pdsc); + kfree(pdsc->intr_info); + pdsc->intr_info = NULL; + pdsc->nintrs = 0; + } + + pci_free_irq_vectors(pdsc->pdev); } int pdsc_dev_init(struct pdsc *pdsc) @@ -341,10 +357,8 @@ int pdsc_dev_init(struct pdsc *pdsc) /* Get intr_info struct array for tracking */ pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL); - if (!pdsc->intr_info) { - err = -ENOMEM; - goto err_out; - } + if (!pdsc->intr_info) + return -ENOMEM; err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX); if (err != nintrs) { diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c index e9948ea5bbcd..54864f27c87a 100644 --- a/drivers/net/ethernet/amd/pds_core/devlink.c +++ b/drivers/net/ethernet/amd/pds_core/devlink.c @@ -111,7 +111,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req, mutex_lock(&pdsc->devcmd_lock); err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2); - memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); + if (!err) + memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list)); mutex_unlock(&pdsc->devcmd_lock); if (err && err != -EIO) return err; diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c index 90a811f3878a..fa626719e68d 100644 --- a/drivers/net/ethernet/amd/pds_core/fw.c +++ b/drivers/net/ethernet/amd/pds_core/fw.c @@ -107,6 +107,9 @@ int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw, dev_info(pdsc->dev, "Installing firmware\n"); + if (!pdsc->cmd_regs) + return -ENXIO; + dl = priv_to_devlink(pdsc); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c index 3080898d7b95..ab6133e7db42 100644 --- a/drivers/net/ethernet/amd/pds_core/main.c +++ b/drivers/net/ethernet/amd/pds_core/main.c @@ -37,9 +37,15 @@ static void pdsc_unmap_bars(struct pdsc *pdsc) struct pdsc_dev_bar *bars = pdsc->bars; unsigned int i; + pdsc->info_regs = NULL; + pdsc->cmd_regs = NULL; + pdsc->intr_status = NULL; + pdsc->intr_ctrl = NULL; + for (i = 0; i < PDS_CORE_BARS_MAX; i++) { if (bars[i].vaddr) pci_iounmap(pdsc->pdev, bars[i].vaddr); + bars[i].vaddr = NULL; } } @@ -293,7 +299,7 @@ err_out_stop: err_out_teardown: pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING); err_out_unmap_bars: - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); mutex_destroy(&pdsc->config_lock); @@ -420,7 +426,7 @@ static void pdsc_remove(struct pci_dev *pdev) */ pdsc_sriov_configure(pdev, 0); - del_timer_sync(&pdsc->wdtimer); + timer_shutdown_sync(&pdsc->wdtimer); if (pdsc->wq) destroy_workqueue(pdsc->wq); @@ -433,7 +439,6 @@ static void pdsc_remove(struct pci_dev *pdev) mutex_destroy(&pdsc->config_lock); mutex_destroy(&pdsc->devcmd_lock); - pci_free_irq_vectors(pdev); pdsc_unmap_bars(pdsc); pci_release_regions(pdev); } @@ -445,19 +450,47 @@ static void pdsc_remove(struct pci_dev *pdev) devlink_free(dl); } -void pdsc_reset_prepare(struct pci_dev *pdev) +static void pdsc_stop_health_thread(struct pdsc *pdsc) +{ + if (pdsc->pdev->is_virtfn) + return; + + timer_shutdown_sync(&pdsc->wdtimer); + if (pdsc->health_work.func) + cancel_work_sync(&pdsc->health_work); +} + +static void pdsc_restart_health_thread(struct pdsc *pdsc) +{ + if (pdsc->pdev->is_virtfn) + return; + + timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0); + mod_timer(&pdsc->wdtimer, jiffies + 1); +} + +static void pdsc_reset_prepare(struct pci_dev *pdev) { struct pdsc *pdsc = pci_get_drvdata(pdev); + pdsc_stop_health_thread(pdsc); pdsc_fw_down(pdsc); - pci_free_irq_vectors(pdev); + if (pdev->is_virtfn) { + struct pdsc *pf; + + pf = pdsc_get_pf_struct(pdsc->pdev); + if (!IS_ERR(pf)) + pdsc_auxbus_dev_del(pdsc, pf); + } + pdsc_unmap_bars(pdsc); pci_release_regions(pdev); - pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); } -void pdsc_reset_done(struct pci_dev *pdev) +static void pdsc_reset_done(struct pci_dev *pdev) { struct pdsc *pdsc = pci_get_drvdata(pdev); struct device *dev = pdsc->dev; @@ -486,12 +519,44 @@ void pdsc_reset_done(struct pci_dev *pdev) } pdsc_fw_up(pdsc); + pdsc_restart_health_thread(pdsc); + + if (pdev->is_virtfn) { + struct pdsc *pf; + + pf = pdsc_get_pf_struct(pdsc->pdev); + if (!IS_ERR(pf)) + pdsc_auxbus_dev_add(pdsc, pf); + } +} + +static pci_ers_result_t pdsc_pci_error_detected(struct pci_dev *pdev, + pci_channel_state_t error) +{ + if (error == pci_channel_io_frozen) { + pdsc_reset_prepare(pdev); + return PCI_ERS_RESULT_NEED_RESET; + } + + return PCI_ERS_RESULT_NONE; +} + +static void pdsc_pci_error_resume(struct pci_dev *pdev) +{ + struct pdsc *pdsc = pci_get_drvdata(pdev); + + if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) + pci_reset_function_locked(pdev); } static const struct pci_error_handlers pdsc_err_handler = { /* FLR handling */ .reset_prepare = pdsc_reset_prepare, .reset_done = pdsc_reset_done, + + /* AER handling */ + .error_detected = pdsc_pci_error_detected, + .resume = pdsc_pci_error_resume, }; static struct pci_driver pdsc_driver = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 18a6c8d99fa0..a2606ee3b0a5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -15,6 +15,7 @@ #include "aq_macsec.h" #include "aq_main.h" +#include <linux/linkmode.h> #include <linux/ptp_clock_kernel.h> static void aq_ethtool_get_regs(struct net_device *ndev, @@ -681,23 +682,19 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev, return 0; } -static u32 eee_mask_to_ethtool_mask(u32 speed) +static void eee_mask_to_ethtool_mask(unsigned long *mode, u32 speed) { - u32 rate = 0; - if (speed & AQ_NIC_RATE_EEE_10G) - rate |= SUPPORTED_10000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); if (speed & AQ_NIC_RATE_EEE_1G) - rate |= SUPPORTED_1000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); if (speed & AQ_NIC_RATE_EEE_100M) - rate |= SUPPORTED_100baseT_Full; - - return rate; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); } -static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) +static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_keee *eee) { struct aq_nic_s *aq_nic = netdev_priv(ndev); u32 rate, supported_rates; @@ -713,14 +710,14 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) if (err < 0) return err; - eee->supported = eee_mask_to_ethtool_mask(supported_rates); + eee_mask_to_ethtool_mask(eee->supported, supported_rates); if (aq_nic->aq_nic_cfg.eee_speeds) - eee->advertised = eee->supported; + linkmode_copy(eee->advertised, eee->supported); - eee->lp_advertised = eee_mask_to_ethtool_mask(rate); + eee_mask_to_ethtool_mask(eee->lp_advertised, rate); - eee->eee_enabled = !!eee->advertised; + eee->eee_enabled = !linkmode_empty(eee->advertised); eee->tx_lpi_enabled = eee->eee_enabled; if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK) @@ -729,7 +726,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) return 0; } -static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) +static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_keee *eee) { struct aq_nic_s *aq_nic = netdev_priv(ndev); u32 rate, supported_rates; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index abd4832e4ed2..5acb3e16b567 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -993,7 +993,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) return 0; err_exit_hwts_rx: - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); err_exit_ptp_rx: aq_ring_free(&aq_ptp->ptp_rx); err_exit_ptp_tx: @@ -1011,7 +1011,7 @@ void aq_ptp_ring_free(struct aq_nic_s *aq_nic) aq_ring_free(&aq_ptp->ptp_tx); aq_ring_free(&aq_ptp->ptp_rx); - aq_ring_free(&aq_ptp->hwts_rx); + aq_ring_hwts_rx_free(&aq_ptp->hwts_rx); aq_ptp_skb_ring_release(&aq_ptp->skb_ring); } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index cda8597b4e14..f7433abd6591 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -919,6 +919,19 @@ void aq_ring_free(struct aq_ring_s *self) } } +void aq_ring_hwts_rx_free(struct aq_ring_s *self) +{ + if (!self) + return; + + if (self->dx_ring) { + dma_free_coherent(aq_nic_get_dev(self->aq_nic), + self->size * self->dx_size + AQ_CFG_RXDS_DEF, + self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } +} + unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) { unsigned int count; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 52847310740a..d627ace850ff 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -210,6 +210,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self); int aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, unsigned int size, unsigned int dx_size); +void aq_ring_hwts_rx_free(struct aq_ring_s *self); void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic); unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index 29b04a274d07..a806dadc4196 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -31,6 +31,20 @@ static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask) priv->irq_mask |= mask; } +void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en) +{ + struct bcmasp_priv *priv = intf->parent; + + /* Only supported with internal phys */ + if (!intf->internal_phy) + return; + + if (en) + _intr2_mask_clear(priv, ASP_INTR2_PHY_EVENT(intf->channel)); + else + _intr2_mask_set(priv, ASP_INTR2_PHY_EVENT(intf->channel)); +} + void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en) { struct bcmasp_priv *priv = intf->parent; @@ -79,6 +93,9 @@ static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status) __napi_schedule_irqoff(&intf->tx_napi); } } + + if (status & ASP_INTR2_PHY_EVENT(intf->channel)) + phy_mac_interrupt(intf->ndev->phydev); } static irqreturn_t bcmasp_isr(int irq, void *data) @@ -535,9 +552,6 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, int j = 0, i; for (i = 0; i < NUM_NET_FILTERS; i++) { - if (j == *rule_cnt) - return -EMSGSIZE; - if (!priv->net_filters[i].claimed || priv->net_filters[i].port != intf->port) continue; @@ -547,6 +561,9 @@ int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, priv->net_filters[i - 1].wake_filter) continue; + if (j == *rule_cnt) + return -EMSGSIZE; + rule_locs[j++] = priv->net_filters[i].fs.location; } @@ -972,7 +989,26 @@ static void bcmasp_core_init(struct bcmasp_priv *priv) ASP_INTR2_CLEAR); } -static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow) +static void bcmasp_core_clock_select_many(struct bcmasp_priv *priv, bool slow) +{ + u32 reg; + + reg = ctrl2_core_rl(priv, ASP_CTRL2_CORE_CLOCK_SELECT); + if (slow) + reg &= ~ASP_CTRL2_CORE_CLOCK_SELECT_MAIN; + else + reg |= ASP_CTRL2_CORE_CLOCK_SELECT_MAIN; + ctrl2_core_wl(priv, reg, ASP_CTRL2_CORE_CLOCK_SELECT); + + reg = ctrl2_core_rl(priv, ASP_CTRL2_CPU_CLOCK_SELECT); + if (slow) + reg &= ~ASP_CTRL2_CPU_CLOCK_SELECT_MAIN; + else + reg |= ASP_CTRL2_CPU_CLOCK_SELECT_MAIN; + ctrl2_core_wl(priv, reg, ASP_CTRL2_CPU_CLOCK_SELECT); +} + +static void bcmasp_core_clock_select_one(struct bcmasp_priv *priv, bool slow) { u32 reg; @@ -1166,6 +1202,24 @@ static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv) } } +static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en) +{ + u32 reg, phy_lpi_overwrite; + + reg = rx_edpkt_core_rl(intf->parent, ASP_EDPKT_SPARE_REG); + phy_lpi_overwrite = intf->internal_phy ? ASP_EDPKT_SPARE_REG_EPHY_LPI : + ASP_EDPKT_SPARE_REG_GPHY_LPI; + + if (en) + reg |= phy_lpi_overwrite; + else + reg &= ~phy_lpi_overwrite; + + rx_edpkt_core_wl(intf->parent, reg, ASP_EDPKT_SPARE_REG); + + usleep_range(50, 100); +} + static struct bcmasp_hw_info v20_hw_info = { .rx_ctrl_flush = ASP_RX_CTRL_FLUSH, .umac2fb = UMAC2FB_OFFSET, @@ -1178,6 +1232,7 @@ static const struct bcmasp_plat_data v20_plat_data = { .init_wol = bcmasp_init_wol_per_intf, .enable_wol = bcmasp_enable_wol_per_intf, .destroy_wol = bcmasp_wol_irq_destroy_per_intf, + .core_clock_select = bcmasp_core_clock_select_one, .hw_info = &v20_hw_info, }; @@ -1194,17 +1249,39 @@ static const struct bcmasp_plat_data v21_plat_data = { .init_wol = bcmasp_init_wol_shared, .enable_wol = bcmasp_enable_wol_shared, .destroy_wol = bcmasp_wol_irq_destroy_shared, + .core_clock_select = bcmasp_core_clock_select_one, + .hw_info = &v21_hw_info, +}; + +static const struct bcmasp_plat_data v22_plat_data = { + .init_wol = bcmasp_init_wol_shared, + .enable_wol = bcmasp_enable_wol_shared, + .destroy_wol = bcmasp_wol_irq_destroy_shared, + .core_clock_select = bcmasp_core_clock_select_many, .hw_info = &v21_hw_info, + .eee_fixup = bcmasp_eee_fixup, }; +static void bcmasp_set_pdata(struct bcmasp_priv *priv, const struct bcmasp_plat_data *pdata) +{ + priv->init_wol = pdata->init_wol; + priv->enable_wol = pdata->enable_wol; + priv->destroy_wol = pdata->destroy_wol; + priv->core_clock_select = pdata->core_clock_select; + priv->eee_fixup = pdata->eee_fixup; + priv->hw_info = pdata->hw_info; +} + static const struct of_device_id bcmasp_of_match[] = { { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data }, { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data }, + { .compatible = "brcm,asp-v2.2", .data = &v22_plat_data }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bcmasp_of_match); static const struct of_device_id bcmasp_mdio_of_match[] = { + { .compatible = "brcm,asp-v2.2-mdio", }, { .compatible = "brcm,asp-v2.1-mdio", }, { .compatible = "brcm,asp-v2.0-mdio", }, { /* sentinel */ }, @@ -1265,16 +1342,13 @@ static int bcmasp_probe(struct platform_device *pdev) if (!pdata) return dev_err_probe(dev, -EINVAL, "unable to find platform data\n"); - priv->init_wol = pdata->init_wol; - priv->enable_wol = pdata->enable_wol; - priv->destroy_wol = pdata->destroy_wol; - priv->hw_info = pdata->hw_info; + bcmasp_set_pdata(priv, pdata); /* Enable all clocks to ensure successful probing */ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); /* Switch to the main clock */ - bcmasp_core_clock_select(priv, false); + priv->core_clock_select(priv, false); bcmasp_intr2_mask_set_all(priv); bcmasp_intr2_clear_all(priv); @@ -1381,7 +1455,7 @@ static int __maybe_unused bcmasp_suspend(struct device *d) */ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE); - bcmasp_core_clock_select(priv, true); + priv->core_clock_select(priv, true); clk_disable_unprepare(priv->clk); @@ -1399,7 +1473,7 @@ static int __maybe_unused bcmasp_resume(struct device *d) return ret; /* Switch to the main clock domain */ - bcmasp_core_clock_select(priv, false); + priv->core_clock_select(priv, false); /* Re-enable all clocks for re-initialization */ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h index ec90add6b03e..f93cb3da44b0 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -19,6 +19,8 @@ #define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14) #define ASP_INTR2_UMC0_WAKE BIT(22) #define ASP_INTR2_UMC1_WAKE BIT(28) +#define ASP_INTR2_PHY_EVENT(intr) ((intr) ? BIT(30) | BIT(31) : \ + BIT(24) | BIT(25)) #define ASP_WAKEUP_INTR2_OFFSET 0x1200 #define ASP_WAKEUP_INTR2_STATUS 0x0 @@ -33,6 +35,12 @@ #define ASP_WAKEUP_INTR2_FILT_1 BIT(3) #define ASP_WAKEUP_INTR2_FW BIT(4) +#define ASP_CTRL2_OFFSET 0x2000 +#define ASP_CTRL2_CORE_CLOCK_SELECT 0x0 +#define ASP_CTRL2_CORE_CLOCK_SELECT_MAIN BIT(0) +#define ASP_CTRL2_CPU_CLOCK_SELECT 0x4 +#define ASP_CTRL2_CPU_CLOCK_SELECT_MAIN BIT(0) + #define ASP_TX_ANALYTICS_OFFSET 0x4c000 #define ASP_TX_ANALYTICS_CTRL 0x0 @@ -134,8 +142,11 @@ enum asp_rx_net_filter_block { #define ASP_EDPKT_RX_PKT_CNT 0x138 #define ASP_EDPKT_HDR_EXTR_CNT 0x13c #define ASP_EDPKT_HDR_OUT_CNT 0x140 +#define ASP_EDPKT_SPARE_REG 0x174 +#define ASP_EDPKT_SPARE_REG_EPHY_LPI BIT(4) +#define ASP_EDPKT_SPARE_REG_GPHY_LPI BIT(3) -#define ASP_CTRL 0x101000 +#define ASP_CTRL_OFFSET 0x101000 #define ASP_CTRL_ASP_SW_INIT 0x04 #define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0) #define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1) @@ -306,6 +317,7 @@ struct bcmasp_intf { struct bcmasp_desc *rx_edpkt_cpu; dma_addr_t rx_edpkt_dma_addr; dma_addr_t rx_edpkt_dma_read; + dma_addr_t rx_edpkt_dma_valid; /* RX buffer prefetcher ring*/ void *rx_ring_cpu; @@ -337,7 +349,7 @@ struct bcmasp_intf { int wol_irq; unsigned int wol_irq_enabled:1; - struct ethtool_eee eee; + struct ethtool_keee eee; }; #define NUM_NET_FILTERS 32 @@ -372,6 +384,8 @@ struct bcmasp_plat_data { void (*init_wol)(struct bcmasp_priv *priv); void (*enable_wol)(struct bcmasp_intf *intf, bool en); void (*destroy_wol)(struct bcmasp_priv *priv); + void (*core_clock_select)(struct bcmasp_priv *priv, bool slow); + void (*eee_fixup)(struct bcmasp_intf *priv, bool en); struct bcmasp_hw_info *hw_info; }; @@ -390,6 +404,8 @@ struct bcmasp_priv { void (*init_wol)(struct bcmasp_priv *priv); void (*enable_wol)(struct bcmasp_intf *intf, bool en); void (*destroy_wol)(struct bcmasp_priv *priv); + void (*core_clock_select)(struct bcmasp_priv *priv, bool slow); + void (*eee_fixup)(struct bcmasp_intf *intf, bool en); void __iomem *base; struct bcmasp_hw_info *hw_info; @@ -530,7 +546,8 @@ BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET); BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET); BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET); BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET); -BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL); +BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL_OFFSET); +BCMASP_CORE_IO_MACRO(ctrl2, ASP_CTRL2_OFFSET); struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, struct device_node *ndev_dn, int i); @@ -541,6 +558,8 @@ void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en); void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en); +void bcmasp_enable_phy_irq(struct bcmasp_intf *intf, int en); + void bcmasp_flush_rx_port(struct bcmasp_intf *intf); extern const struct ethtool_ops bcmasp_ethtool_ops; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c index ce6a3d56fb23..484fc2b5626f 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -360,29 +360,26 @@ void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable) umac_wl(intf, reg, UMC_EEE_CTRL); intf->eee.eee_enabled = enable; - intf->eee.eee_active = enable; } -static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e) +static int bcmasp_get_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmasp_intf *intf = netdev_priv(dev); - struct ethtool_eee *p = &intf->eee; + struct ethtool_keee *p = &intf->eee; if (!dev->phydev) return -ENODEV; - e->eee_enabled = p->eee_enabled; - e->eee_active = p->eee_active; e->tx_lpi_enabled = p->tx_lpi_enabled; e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); return phy_ethtool_get_eee(dev->phydev, e); } -static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e) +static int bcmasp_set_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmasp_intf *intf = netdev_priv(dev); - struct ethtool_eee *p = &intf->eee; + struct ethtool_keee *p = &intf->eee; int ret; if (!dev->phydev) @@ -399,7 +396,6 @@ static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e) } umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER); - intf->eee.eee_active = ret >= 0; intf->eee.tx_lpi_enabled = e->tx_lpi_enabled; bcmasp_eee_enable_set(intf, true); } diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index 53e542881255..dd06b68b33ed 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -382,6 +382,7 @@ static void bcmasp_netif_start(struct net_device *dev) bcmasp_enable_rx_irq(intf, 1); bcmasp_enable_tx_irq(intf, 1); + bcmasp_enable_phy_irq(intf, 1); phy_start(dev->phydev); } @@ -607,6 +608,7 @@ static void bcmasp_adj_link(struct net_device *dev) struct phy_device *phydev = dev->phydev; u32 cmd_bits = 0, reg; int changed = 0; + bool active; if (intf->old_link != phydev->link) { changed = 1; @@ -658,8 +660,8 @@ static void bcmasp_adj_link(struct net_device *dev) reg |= cmd_bits; umac_wl(intf, reg, UMC_CMD); - intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0; - bcmasp_eee_enable_set(intf, intf->eee.eee_active); + active = phy_init_eee(phydev, 0) >= 0; + bcmasp_eee_enable_set(intf, active); } reg = rgmii_rl(intf, RGMII_OOB_CNTRL); @@ -673,38 +675,78 @@ static void bcmasp_adj_link(struct net_device *dev) phy_print_status(phydev); } -static int bcmasp_init_rx(struct bcmasp_intf *intf) +static int bcmasp_alloc_buffers(struct bcmasp_intf *intf) { struct device *kdev = &intf->parent->pdev->dev; struct page *buffer_pg; - dma_addr_t dma; - void *p; - u32 reg; - int ret; + /* Alloc RX */ intf->rx_buf_order = get_order(RING_BUFFER_SIZE); buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); - - dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(kdev, dma)) { - __free_pages(buffer_pg, intf->rx_buf_order); + if (!buffer_pg) return -ENOMEM; - } + intf->rx_ring_cpu = page_to_virt(buffer_pg); - intf->rx_ring_dma = dma; - intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; + intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, intf->rx_ring_dma)) + goto free_rx_buffer; + + intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, + &intf->rx_edpkt_dma_addr, GFP_KERNEL); + if (!intf->rx_edpkt_cpu) + goto free_rx_buffer_dma; + + /* Alloc TX */ + intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE, + &intf->tx_spb_dma_addr, GFP_KERNEL); + if (!intf->tx_spb_cpu) + goto free_rx_edpkt_dma; - p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr, + intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), GFP_KERNEL); - if (!p) { - ret = -ENOMEM; - goto free_rx_ring; - } - intf->rx_edpkt_cpu = p; + if (!intf->tx_cbs) + goto free_tx_spb_dma; - netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); + return 0; + +free_tx_spb_dma: + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); +free_rx_edpkt_dma: + dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, + intf->rx_edpkt_dma_addr); +free_rx_buffer_dma: + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); +free_rx_buffer: + __free_pages(buffer_pg, intf->rx_buf_order); + + return -ENOMEM; +} + +static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + + /* RX buffers */ + dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, + intf->rx_edpkt_dma_addr); + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); + + /* TX buffers */ + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); + kfree(intf->tx_cbs); +} +static void bcmasp_init_rx(struct bcmasp_intf *intf) +{ + /* Restart from index 0 */ + intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; + intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1); intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; intf->rx_edpkt_index = 0; @@ -730,64 +772,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf) rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); - rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), - RX_EDPKT_DMA_END); - rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), - RX_EDPKT_DMA_VALID); - - reg = UMAC2FB_CFG_DEFAULT_EN | - ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT); - reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT); - umac2fb_wl(intf, reg, UMAC2FB_CFG); - - return 0; - -free_rx_ring: - dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, - DMA_FROM_DEVICE); - __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID); - return ret; + umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) << + UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT), + UMAC2FB_CFG); } -static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf) -{ - struct device *kdev = &intf->parent->pdev->dev; - - dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, - intf->rx_edpkt_dma_addr); - dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, - DMA_FROM_DEVICE); - __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); -} -static int bcmasp_init_tx(struct bcmasp_intf *intf) +static void bcmasp_init_tx(struct bcmasp_intf *intf) { - struct device *kdev = &intf->parent->pdev->dev; - void *p; - int ret; - - p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr, - GFP_KERNEL); - if (!p) - return -ENOMEM; - - intf->tx_spb_cpu = p; + /* Restart from index 0 */ intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; intf->tx_spb_dma_read = intf->tx_spb_dma_addr; - - intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), - GFP_KERNEL); - if (!intf->tx_cbs) { - ret = -ENOMEM; - goto free_tx_spb; - } - intf->tx_spb_index = 0; intf->tx_spb_clean_index = 0; - netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); - /* Make sure channels are disabled */ tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); @@ -803,26 +804,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf) tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); - - return 0; - -free_tx_spb: - dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, - intf->tx_spb_dma_addr); - - return ret; -} - -static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf) -{ - struct device *kdev = &intf->parent->pdev->dev; - - /* Free descriptors */ - dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, - intf->tx_spb_dma_addr); - - /* Free cbs */ - kfree(intf->tx_cbs); } static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) @@ -910,12 +891,10 @@ static void bcmasp_netif_deinit(struct net_device *dev) /* Disable interrupts */ bcmasp_enable_tx_irq(intf, 0); bcmasp_enable_rx_irq(intf, 0); + bcmasp_enable_phy_irq(intf, 0); netif_napi_del(&intf->tx_napi); - bcmasp_reclaim_free_all_tx(intf); - netif_napi_del(&intf->rx_napi); - bcmasp_reclaim_free_all_rx(intf); } static int bcmasp_stop(struct net_device *dev) @@ -929,6 +908,8 @@ static int bcmasp_stop(struct net_device *dev) bcmasp_netif_deinit(dev); + bcmasp_reclaim_free_buffers(intf); + phy_disconnect(dev->phydev); /* Disable internal EPHY or external PHY */ @@ -1048,6 +1029,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) netdev_err(dev, "could not attach to PHY\n"); goto err_phy_disable; } + + if (intf->internal_phy) + dev->phydev->irq = PHY_MAC_INTERRUPT; + + /* Indicate that the MAC is responsible for PHY PM */ + phydev->mac_managed_pm = true; } else if (!intf->wolopts) { ret = phy_resume(dev->phydev); if (ret) @@ -1067,17 +1054,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) intf->old_link = -1; intf->old_pause = -1; - ret = bcmasp_init_tx(intf); - if (ret) - goto err_phy_disconnect; - - /* Turn on asp */ + bcmasp_init_tx(intf); + netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); bcmasp_enable_tx(intf, 1); - ret = bcmasp_init_rx(intf); - if (ret) - goto err_reclaim_tx; - + bcmasp_init_rx(intf); + netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); bcmasp_enable_rx(intf, 1); /* Turn on UniMAC TX/RX */ @@ -1091,11 +1073,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) return 0; -err_reclaim_tx: - bcmasp_reclaim_free_all_tx(intf); -err_phy_disconnect: - if (phydev) - phy_disconnect(phydev); err_phy_disable: if (intf->internal_phy) bcmasp_ephy_enable_set(intf, false); @@ -1111,13 +1088,24 @@ static int bcmasp_open(struct net_device *dev) netif_dbg(intf, ifup, dev, "bcmasp open\n"); - ret = clk_prepare_enable(intf->parent->clk); + ret = bcmasp_alloc_buffers(intf); if (ret) return ret; - ret = bcmasp_netif_init(dev, true); + ret = clk_prepare_enable(intf->parent->clk); if (ret) + goto err_free_mem; + + ret = bcmasp_netif_init(dev, true); + if (ret) { clk_disable_unprepare(intf->parent->clk); + goto err_free_mem; + } + + return ret; + +err_free_mem: + bcmasp_reclaim_free_buffers(intf); return ret; } @@ -1326,6 +1314,9 @@ static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) ASP_WAKEUP_INTR2_MASK_CLEAR); } + if (intf->eee.eee_enabled && intf->parent->eee_fixup) + intf->parent->eee_fixup(intf, true); + netif_dbg(intf, wol, ndev, "entered WOL mode\n"); } @@ -1374,6 +1365,9 @@ static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) { u32 reg; + if (intf->eee.eee_enabled && intf->parent->eee_fixup) + intf->parent->eee_fixup(intf, false); + reg = umac_rl(intf, UMC_MPD_CTRL); reg &= ~UMC_MPD_CTRL_MPD_EN; umac_wl(intf, reg, UMC_MPD_CTRL); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 0bc7690cdee1..58956ed8f531 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2081,34 +2081,31 @@ static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { "Storage only interface" }; -static u32 bnx2x_eee_to_adv(u32 eee_adv) +static void bnx2x_eee_to_linkmode(unsigned long *mode, u32 eee_adv) { - u32 modes = 0; - if (eee_adv & SHMEM_EEE_100M_ADV) - modes |= ADVERTISED_100baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); if (eee_adv & SHMEM_EEE_1G_ADV) - modes |= ADVERTISED_1000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); if (eee_adv & SHMEM_EEE_10G_ADV) - modes |= ADVERTISED_10000baseT_Full; - - return modes; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); } -static u32 bnx2x_adv_to_eee(u32 modes, u32 shift) +static u32 bnx2x_linkmode_to_eee(const unsigned long *mode, u32 shift) { u32 eee_adv = 0; - if (modes & ADVERTISED_100baseT_Full) + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode)) eee_adv |= SHMEM_EEE_100M_ADV; - if (modes & ADVERTISED_1000baseT_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode)) eee_adv |= SHMEM_EEE_1G_ADV; - if (modes & ADVERTISED_10000baseT_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) eee_adv |= SHMEM_EEE_10G_ADV; return eee_adv << shift; } -static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) +static int bnx2x_get_eee(struct net_device *dev, struct ethtool_keee *edata) { struct bnx2x *bp = netdev_priv(dev); u32 eee_cfg; @@ -2120,16 +2117,17 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) eee_cfg = bp->link_vars.eee_status; - edata->supported = - bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> - SHMEM_EEE_SUPPORTED_SHIFT); + bnx2x_eee_to_linkmode(edata->supported, + (eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> + SHMEM_EEE_SUPPORTED_SHIFT); + + bnx2x_eee_to_linkmode(edata->advertised, + (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >> + SHMEM_EEE_ADV_STATUS_SHIFT); - edata->advertised = - bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >> - SHMEM_EEE_ADV_STATUS_SHIFT); - edata->lp_advertised = - bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >> - SHMEM_EEE_LP_ADV_STATUS_SHIFT); + bnx2x_eee_to_linkmode(edata->lp_advertised, + (eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >> + SHMEM_EEE_LP_ADV_STATUS_SHIFT); /* SHMEM value is in 16u units --> Convert to 1u units. */ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4; @@ -2141,7 +2139,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } -static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) +static int bnx2x_set_eee(struct net_device *dev, struct ethtool_keee *edata) { struct bnx2x *bp = netdev_priv(dev); u32 eee_cfg; @@ -2162,8 +2160,8 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - advertised = bnx2x_adv_to_eee(edata->advertised, - SHMEM_EEE_ADV_STATUS_SHIFT); + advertised = bnx2x_linkmode_to_eee(edata->advertised, + SHMEM_EEE_ADV_STATUS_SHIFT); if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { DP(BNX2X_MSG_ETHTOOL, "Direct manipulation of EEE advertisement is not supported\n"); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 39845d556baf..a15e6d31fc22 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -246,6 +246,49 @@ static const u16 bnxt_async_events_arr[] = { static struct workqueue_struct *bnxt_pf_wq; +#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} +#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} + +const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { + .ports = { + .src = 0, + .dst = 0, + }, + .addrs = { + .v6addrs = { + .src = BNXT_IPV6_MASK_NONE, + .dst = BNXT_IPV6_MASK_NONE, + }, + }, +}; + +const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { + .ports = { + .src = cpu_to_be16(0xffff), + .dst = cpu_to_be16(0xffff), + }, + .addrs = { + .v6addrs = { + .src = BNXT_IPV6_MASK_ALL, + .dst = BNXT_IPV6_MASK_ALL, + }, + }, +}; + +const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { + .ports = { + .src = cpu_to_be16(0xffff), + .dst = cpu_to_be16(0xffff), + }, + .addrs = { + .v4addrs = { + .src = cpu_to_be32(0xffffffff), + .dst = cpu_to_be32(0xffffffff), + }, + }, +}; + static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || @@ -4168,8 +4211,12 @@ static int bnxt_alloc_vnics(struct bnxt *bp) int num_vnics = 1; #ifdef CONFIG_RFS_ACCEL - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == BNXT_FLAG_RFS) - num_vnics += bp->rx_nr_rings; + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + num_vnics++; + else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + num_vnics += bp->rx_nr_rings; + } #endif if (BNXT_CHIP_TYPE_NITRO_A0(bp)) @@ -4186,6 +4233,7 @@ static int bnxt_alloc_vnics(struct bnxt *bp) static void bnxt_init_vnics(struct bnxt *bp) { + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; int i; for (i = 0; i < bp->nr_vnics; i++) { @@ -4199,20 +4247,33 @@ static void bnxt_init_vnics(struct bnxt *bp) vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { - if (!i) { + if (i == BNXT_VNIC_DEFAULT) { u8 *key = (void *)vnic->rss_hash_key; int k; + if (!bp->rss_hash_key_valid && + !bp->rss_hash_key_updated) { + get_random_bytes(bp->rss_hash_key, + HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + + memcpy(vnic->rss_hash_key, bp->rss_hash_key, + HW_HASH_KEY_SIZE); + + if (!bp->rss_hash_key_updated) + continue; + + bp->rss_hash_key_updated = false; + bp->rss_hash_key_valid = true; + bp->toeplitz_prefix = 0; - get_random_bytes(vnic->rss_hash_key, - HW_HASH_KEY_SIZE); for (k = 0; k < 8; k++) { bp->toeplitz_prefix <<= 8; bp->toeplitz_prefix |= key[k]; } } else { - memcpy(vnic->rss_hash_key, - bp->vnic_info[0].rss_hash_key, + memcpy(vnic->rss_hash_key, vnic0->rss_hash_key, HW_HASH_KEY_SIZE); } } @@ -4798,6 +4859,44 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) } } +void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + u8 type = fltr->type, flags = fltr->flags; + + INIT_LIST_HEAD(&fltr->list); + if ((type == BNXT_FLTR_TYPE_L2 && flags & BNXT_ACT_RING_DST) || + (type == BNXT_FLTR_TYPE_NTUPLE && flags & BNXT_ACT_NO_AGING)) + list_add_tail(&fltr->list, &bp->usr_fltr_list); +} + +void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + if (!list_empty(&fltr->list)) + list_del_init(&fltr->list); +} + +void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) +{ + struct bnxt_filter_base *usr_fltr, *tmp; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) + continue; + bnxt_del_one_usr_fltr(bp, usr_fltr); + } +} + +static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + hlist_del(&fltr->hash); + bnxt_del_one_usr_fltr(bp, fltr); + if (fltr->flags) { + clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; + } + kfree(fltr); +} + static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) { int i; @@ -4813,12 +4912,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) head = &bp->ntp_fltr_hash_tbl[i]; hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { bnxt_del_l2_filter(bp, fltr->l2_fltr); - if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) + if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || + !list_empty(&fltr->base.list))) continue; - hlist_del(&fltr->base.hash); - clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); - bp->ntp_fltr_count--; - kfree(fltr); + bnxt_del_fltr(bp, &fltr->base); } } if (!all) @@ -4840,7 +4937,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); bp->ntp_fltr_count = 0; - bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_MAX_FLTR, GFP_KERNEL); + bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); if (!bp->ntp_fltr_bmap) rc = -ENOMEM; @@ -4859,14 +4956,10 @@ static void bnxt_free_l2_filters(struct bnxt *bp, bool all) head = &bp->l2_fltr_hash_tbl[i]; hlist_for_each_entry_safe(fltr, tmp, head, base.hash) { - if (!all && (fltr->base.flags & BNXT_ACT_FUNC_DST)) + if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || + !list_empty(&fltr->base.list))) continue; - hlist_del(&fltr->base.hash); - if (fltr->base.flags) { - clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); - bp->ntp_fltr_count--; - } - kfree(fltr); + bnxt_del_fltr(bp, &fltr->base); } } } @@ -5039,8 +5132,13 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) if (rc) goto alloc_mem_err; - bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | - BNXT_VNIC_UCAST_FLAG; + bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | + BNXT_VNIC_MCAST_FLAG | + BNXT_VNIC_UCAST_FLAG; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) + bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= + BNXT_VNIC_RSS_FLAG | BNXT_VNIC_NTUPLE_FLAG; + rc = bnxt_alloc_vnic_attributes(bp); if (rc) goto alloc_mem_err; @@ -5342,6 +5440,7 @@ void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) return; } hlist_del_rcu(&fltr->base.hash); + bnxt_del_one_usr_fltr(bp, &fltr->base); if (fltr->base.flags) { clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); bp->ntp_fltr_count--; @@ -5480,13 +5579,15 @@ static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, int bit_id; bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, - BNXT_MAX_FLTR, 0); + bp->max_fltr, 0); if (bit_id < 0) return -ENOMEM; fltr->base.sw_id = (u16)bit_id; + bp->ntp_fltr_count++; } head = &bp->l2_fltr_hash_tbl[idx]; hlist_add_head_rcu(&fltr->base.hash, head); + bnxt_insert_usr_fltr(bp, &fltr->base); set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); atomic_set(&fltr->refcnt, 1); return 0; @@ -5519,6 +5620,40 @@ static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, return fltr; } +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + int rc; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + spin_lock_bh(&bp->ntp_fltr_lock); + fltr = __bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) { + fltr = ERR_PTR(-EEXIST); + goto l2_filter_exit; + } + fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); + if (!fltr) { + fltr = ERR_PTR(-ENOMEM); + goto l2_filter_exit; + } + fltr->base.flags = flags; + rc = bnxt_init_l2_filter(bp, fltr, key, idx); + if (rc) { + spin_unlock_bh(&bp->ntp_fltr_lock); + bnxt_del_l2_filter(bp, fltr); + return ERR_PTR(rc); + } + +l2_filter_exit: + spin_unlock_bh(&bp->ntp_fltr_lock); + return fltr; +} + static u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) { #ifdef CONFIG_BNXT_SRIOV @@ -5650,15 +5785,38 @@ void bnxt_fill_ipv6_mask(__be32 mask[4]) mask[i] = cpu_to_be32(~0); } +static void +bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, + struct hwrm_cfa_ntuple_filter_alloc_input *req, + u16 rxq) +{ + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + struct bnxt_vnic_info *vnic; + u32 enables; + + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + enables = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; + req->enables |= cpu_to_le32(enables); + req->rfs_ring_tbl_idx = cpu_to_le16(rxq); + } else { + u32 flags; + + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req->flags |= cpu_to_le32(flags); + req->dst_id = cpu_to_le16(rxq); + } +} + int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct hwrm_cfa_ntuple_filter_alloc_input *req; + struct bnxt_flow_masks *masks = &fltr->fmasks; struct flow_keys *keys = &fltr->fkeys; struct bnxt_l2_filter *l2_fltr; struct bnxt_vnic_info *vnic; - u32 flags = 0; int rc; rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); @@ -5668,16 +5826,16 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, l2_fltr = fltr->l2_fltr; req->l2_filter_id = l2_fltr->base.filter_id; - - if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { - flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; - req->dst_id = cpu_to_le16(fltr->base.rxq); + if (fltr->base.flags & BNXT_ACT_DROP) { + req->flags = + cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP); + } else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq); } else { vnic = &bp->vnic_info[fltr->base.rxq + 1]; req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req->flags = cpu_to_le32(flags); - req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); req->ethertype = htons(ETH_P_IP); req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; @@ -5687,25 +5845,15 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req->ethertype = htons(ETH_P_IPV6); req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { - *(struct in6_addr *)&req->src_ipaddr[0] = - keys->addrs.v6addrs.src; - bnxt_fill_ipv6_mask(req->src_ipaddr_mask); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { - *(struct in6_addr *)&req->dst_ipaddr[0] = - keys->addrs.v6addrs.dst; - bnxt_fill_ipv6_mask(req->dst_ipaddr_mask); - } + *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; + *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; + *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; + *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; } else { - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { - req->src_ipaddr[0] = keys->addrs.v4addrs.src; - req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { - req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - } + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; } if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); @@ -5713,14 +5861,10 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { - req->src_port = keys->ports.src; - req->src_port_mask = cpu_to_be16(0xffff); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { - req->dst_port = keys->ports.dst; - req->dst_port_mask = cpu_to_be16(0xffff); - } + req->src_port = keys->ports.src; + req->src_port_mask = masks->ports.src; + req->dst_port = keys->ports.dst; + req->dst_port_mask = masks->ports.dst; resp = hwrm_req_hold(bp, req); rc = hwrm_req_send(bp, req); @@ -5971,7 +6115,10 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, for (i = 0; i < tbl_size; i++) { u16 ring_id, j; - j = bp->rss_indir_tbl[i]; + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else + j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; ring_id = rxr->rx_ring_struct.fw_ring_id; @@ -5985,10 +6132,13 @@ static void __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, struct bnxt_vnic_info *vnic) { - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { bnxt_fill_hw_rss_tbl_p5(bp, vnic); - else + if (bp->flags & BNXT_FLAG_CHIP_P7) + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; + } else { bnxt_fill_hw_rss_tbl(bp, vnic); + } if (bp->rss_hash_delta) { req->hash_type = cpu_to_le32(bp->rss_hash_delta); @@ -6061,7 +6211,7 @@ exit: static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct hwrm_vnic_rss_qcfg_output *resp; struct hwrm_vnic_rss_qcfg_input *req; @@ -6165,6 +6315,7 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { + struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input *req; unsigned int ring = 0, grp_idx; @@ -6194,8 +6345,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req->rss_rule = - cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); + req->rss_rule = cpu_to_le16(vnic0->fw_rss_cos_lb_ctx[0]); req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); @@ -6292,7 +6442,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; - if (vnic_id == 0) + if (vnic_id == BNXT_VNIC_DEFAULT) req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); resp = hwrm_req_hold(bp, req); @@ -6351,6 +6501,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) } if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) + bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) + bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) + bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) + bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; } hwrm_req_drop(bp, req); return rc; @@ -6918,6 +7076,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_hw_ring_grps = le32_to_cpu(resp->alloc_hw_ring_grps); hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); + hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); hw_resc->resv_irqs = cp; @@ -6973,8 +7132,7 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) static bool bnxt_rfs_supported(struct bnxt *bp); static struct hwrm_func_cfg_input * -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; u32 enables = 0; @@ -6983,52 +7141,42 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return NULL; req->fid = cpu_to_le16(0xffff); - enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - req->num_tx_rings = cpu_to_le16(tx_rings); + enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + req->num_tx_rings = cpu_to_le16(hwr->tx); if (BNXT_NEW_RM(bp)) { - enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; - enables |= tx_rings + ring_grps ? + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= hwr->cp_p5 ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= rx_rings ? - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; } else { - enables |= cp_rings ? + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= ring_grps ? - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; - } - enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; - - req->num_rx_rings = cpu_to_le16(rx_rings); + enables |= hwr->grp ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } + enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : + 0; + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); - - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); - req->num_msix = cpu_to_le16(cp_rings); - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); + req->num_msix = cpu_to_le16(hwr->cp); } else { - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(1); - if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && - bnxt_rfs_supported(bp)) - req->num_rsscos_ctxs = - cpu_to_le16(ring_grps + 1); + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); } - req->num_stat_ctxs = cpu_to_le16(stats); - req->num_vnics = cpu_to_le16(vnics); + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); } req->enables = cpu_to_le32(enables); return req; } static struct hwrm_func_vf_cfg_input * -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; u32 enables = 0; @@ -7036,51 +7184,46 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) return NULL; - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; - enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - enables |= tx_rings + ring_grps ? + enables |= hwr->cp_p5 ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; } else { - enables |= cp_rings ? - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; - enables |= ring_grps ? + enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= hwr->grp ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; } - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req->num_tx_rings = cpu_to_le16(tx_rings); - req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_tx_rings = cpu_to_le16(hwr->tx); + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps); - - req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(rss_ctx); + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); } else { - req->num_cmpl_rings = cpu_to_le16(cp_rings); - req->num_hw_ring_grps = cpu_to_le16(ring_grps); - req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); } - req->num_stat_ctxs = cpu_to_le16(stats); - req->num_vnics = cpu_to_le16(vnics); + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); req->enables = cpu_to_le32(enables); return req; } static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; int rc; - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); if (!req) return -ENOMEM; @@ -7094,25 +7237,23 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return rc; if (bp->hwrm_spec_code < 0x10601) - bp->hw_resc.resv_tx_rings = tx_rings; + bp->hw_resc.resv_tx_rings = hwr->tx; return bnxt_hwrm_get_rings(bp); } static int -bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, int vnics) +bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { - bp->hw_resc.resv_tx_rings = tx_rings; + bp->hw_resc.resv_tx_rings = hwr->tx; return 0; } - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); if (!req) return -ENOMEM; @@ -7123,15 +7264,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return bnxt_hwrm_get_rings(bp); } -static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, - int cp, int stat, int vnic) +static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { if (BNXT_PF(bp)) - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, - vnic); + return bnxt_hwrm_reserve_pf_rings(bp, hwr); else - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, - vnic); + return bnxt_hwrm_reserve_vf_rings(bp, hwr); } int bnxt_nq_rings_in_use(struct bnxt *bp) @@ -7174,6 +7312,24 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp) return cp + ulp_stat; } +static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + if (!hwr->grp) + return 0; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + int rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); + + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + rss_ctx *= hwr->vnic; + return rss_ctx; + } + if (BNXT_VF(bp)) + return BNXT_VF_MAX_RSS_CTX; + if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) + return hwr->grp + 1; + return 1; +} + /* Check if a default RSS map needs to be setup. This function is only * used on older firmware that does not require reserving RX rings. */ @@ -7189,13 +7345,24 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) } } +static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return 2; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return rx_rings + 1; + } + return 1; +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int cp = bnxt_cp_rings_in_use(bp); int nq = bnxt_nq_rings_in_use(bp); int rx = bp->rx_nr_rings, stat; - int vnic = 1, grp = rx; + int vnic, grp = rx; if (hw_resc->resv_tx_rings != bp->tx_nr_rings && bp->hwrm_spec_code >= 0x10601) @@ -7210,9 +7377,9 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) bnxt_check_rss_tbl_no_rmgr(bp); return false; } - if ((bp->flags & BNXT_FLAG_RFS) && - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - vnic = rx + 1; + + vnic = bnxt_get_total_vnics(bp, rx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; stat = bnxt_get_func_stat_ctxs(bp); @@ -7227,47 +7394,65 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) return false; } -static int __bnxt_reserve_rings(struct bnxt *bp) +static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int cp = bnxt_nq_rings_in_use(bp); - int tx = bp->tx_nr_rings; - int rx = bp->rx_nr_rings; - int grp, rx_rings, rc; - int vnic = 1, stat; + + hwr->tx = hw_resc->resv_tx_rings; + if (BNXT_NEW_RM(bp)) { + hwr->rx = hw_resc->resv_rx_rings; + hwr->cp = hw_resc->resv_irqs; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr->cp_p5 = hw_resc->resv_cp_rings; + hwr->grp = hw_resc->resv_hw_ring_grps; + hwr->vnic = hw_resc->resv_vnics; + hwr->stat = hw_resc->resv_stat_ctxs; + hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; + } +} + +static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && + hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); +} + +static int __bnxt_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_rings hwr = {0}; + int rx_rings, rc; bool sh = false; int tx_cp; if (!bnxt_need_reserve_rings(bp)) return 0; + hwr.cp = bnxt_nq_rings_in_use(bp); + hwr.tx = bp->tx_nr_rings; + hwr.rx = bp->rx_nr_rings; if (bp->flags & BNXT_FLAG_SHARED_RINGS) sh = true; - if ((bp->flags & BNXT_FLAG_RFS) && - !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) - vnic = rx + 1; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr.cp_p5 = hwr.rx + hwr.tx; + + hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - grp = bp->rx_nr_rings; - stat = bnxt_get_func_stat_ctxs(bp); + hwr.rx <<= 1; + hwr.grp = bp->rx_nr_rings; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); + hwr.stat = bnxt_get_func_stat_ctxs(bp); - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); + rc = bnxt_hwrm_reserve_rings(bp, &hwr); if (rc) return rc; - tx = hw_resc->resv_tx_rings; - if (BNXT_NEW_RM(bp)) { - rx = hw_resc->resv_rx_rings; - cp = hw_resc->resv_irqs; - grp = hw_resc->resv_hw_ring_grps; - vnic = hw_resc->resv_vnics; - stat = hw_resc->resv_stat_ctxs; - } + bnxt_copy_reserved_rings(bp, &hwr); - rx_rings = rx; + rx_rings = hwr.rx; if (bp->flags & BNXT_FLAG_AGG_RINGS) { - if (rx >= 2) { - rx_rings = rx >> 1; + if (hwr.rx >= 2) { + rx_rings = hwr.rx >> 1; } else { if (netif_running(bp->dev)) return -ENOMEM; @@ -7279,17 +7464,17 @@ static int __bnxt_reserve_rings(struct bnxt *bp) bnxt_set_ring_params(bp); } } - rx_rings = min_t(int, rx_rings, grp); - cp = min_t(int, cp, bp->cp_nr_rings); - if (stat > bnxt_get_ulp_stat_ctxs(bp)) - stat -= bnxt_get_ulp_stat_ctxs(bp); - cp = min_t(int, cp, stat); - rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); + rx_rings = min_t(int, rx_rings, hwr.grp); + hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); + if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) + hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); + hwr.cp = min_t(int, hwr.cp, hwr.stat); + rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx = rx_rings << 1; - tx_cp = bnxt_num_tx_to_cp(bp, tx); - cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; - bp->tx_nr_rings = tx; + hwr.rx = rx_rings << 1; + tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); + hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; + bp->tx_nr_rings = hwr.tx; /* If we cannot reserve all the RX rings, reset the RSS map only * if absolutely necessary @@ -7306,9 +7491,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } } bp->rx_nr_rings = rx_rings; - bp->cp_nr_rings = cp; + bp->cp_nr_rings = hwr.cp; - if (!tx || !rx || !cp || !grp || !vnic || !stat) + if (!bnxt_rings_ok(bp, &hwr)) return -ENOMEM; if (!netif_is_rxfh_configured(bp->dev)) @@ -7317,9 +7502,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_vf_cfg_input *req; u32 flags; @@ -7327,8 +7510,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!BNXT_NEW_RM(bp)) return 0; - req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -7342,15 +7524,12 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return hwrm_req_send_silent(bp, req); } -static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { struct hwrm_func_cfg_input *req; u32 flags; - req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -7368,20 +7547,15 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return hwrm_req_send_silent(bp, req); } -static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int stats, - int vnics) +static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) - return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings, stats, - vnics); + return bnxt_hwrm_check_pf_rings(bp, hwr); - return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + return bnxt_hwrm_check_vf_rings(bp, hwr); } static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) @@ -8709,6 +8883,13 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); + hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); + hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); + hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); + hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); + hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + if (BNXT_PF(bp)) { struct bnxt_pf_info *pf = &bp->pf; @@ -8717,12 +8898,6 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); pf->first_vf_id = le16_to_cpu(resp->first_vf_id); pf->max_vfs = le16_to_cpu(resp->max_vfs); - pf->max_encap_records = le32_to_cpu(resp->max_encap_records); - pf->max_decap_records = le32_to_cpu(resp->max_decap_records); - pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); - pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); - pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); - pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; @@ -8825,6 +9000,14 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; + + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; + hwrm_cfa_adv_qcaps_exit: hwrm_req_drop(bp, req); return rc; @@ -9689,10 +9872,28 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) return __bnxt_setup_vnic(bp, vnic_id); } +static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id, + u16 start_rx_ring_idx, int rx_rings) +{ + int rc; + + rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic_id, rc); + return rc; + } + return bnxt_setup_vnic(bp, vnic_id); +} + static int bnxt_alloc_rfs_vnics(struct bnxt *bp) { int i, rc = 0; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0, + bp->rx_nr_rings); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return 0; @@ -9708,14 +9909,7 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) vnic->flags |= BNXT_VNIC_RFS_FLAG; if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; - rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); - if (rc) { - netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", - vnic_id, rc); - break; - } - rc = bnxt_setup_vnic(bp, vnic_id); - if (rc) + if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1)) break; } return rc; @@ -9756,7 +9950,7 @@ static bool bnxt_mc_list_updated(struct bnxt *, u32 *); static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; int rc = 0; unsigned int rx_nr_rings = bp->rx_nr_rings; @@ -9785,7 +9979,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rx_nr_rings--; /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -9794,7 +9988,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) if (BNXT_VF(bp)) bnxt_hwrm_func_qcfg(bp); - rc = bnxt_setup_vnic(bp, 0); + rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT); if (rc) goto err_out; if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) @@ -10621,10 +10815,10 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8); if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { - struct ethtool_eee *eee = &bp->eee; + struct ethtool_keee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); - eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + _bnxt_fw_to_linkmode(eee->supported, fw_speeds); bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & @@ -10766,7 +10960,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->module_status = resp->module_status; if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { - struct ethtool_eee *eee = &bp->eee; + struct ethtool_keee *eee = &bp->eee; u16 fw_speeds; eee->eee_active = 0; @@ -10775,8 +10969,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) eee->eee_active = 1; fw_speeds = le16_to_cpu( resp->link_partner_adv_eee_link_speed_mask); - eee->lp_advertised = - _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); } /* Pull initial EEE config */ @@ -10786,8 +10979,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) eee->eee_enabled = 1; fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); - eee->advertised = - _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); if (resp->eee_config_phy_addr & PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { @@ -10957,7 +11149,7 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) static void bnxt_hwrm_set_eee(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { - struct ethtool_eee *eee = &bp->eee; + struct ethtool_keee *eee = &bp->eee; if (eee->eee_enabled) { u16 eee_speeds; @@ -11087,6 +11279,7 @@ static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) hw_resc->resv_rx_rings = 0; hw_resc->resv_hw_ring_grps = 0; hw_resc->resv_vnics = 0; + hw_resc->resv_rsscos_ctxs = 0; if (!fw_reset) { bp->tx_nr_rings = 0; bp->rx_nr_rings = 0; @@ -11322,22 +11515,25 @@ static void bnxt_get_wol_settings(struct bnxt *bp) static bool bnxt_eee_config_ok(struct bnxt *bp) { - struct ethtool_eee *eee = &bp->eee; + struct ethtool_keee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) return true; if (eee->eee_enabled) { - u32 advertising = - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); + + _bnxt_fw_to_linkmode(advertising, link_info->advertising); if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { eee->eee_enabled = 0; return false; } - if (eee->advertised & ~advertising) { - eee->advertised = advertising & eee->supported; + if (linkmode_andnot(tmp, eee->advertised, advertising)) { + linkmode_and(eee->advertised, advertising, + eee->supported); return false; } } @@ -11442,6 +11638,42 @@ static int bnxt_reinit_after_abort(struct bnxt *bp) return rc; } +static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + struct bnxt_ntuple_filter *ntp_fltr; + struct bnxt_l2_filter *l2_fltr; + + if (list_empty(&fltr->list)) + return; + + if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { + ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + ntp_fltr->l2_fltr = l2_fltr; + if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { + bnxt_del_ntp_filter(bp, ntp_fltr); + netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", + fltr->sw_id); + } + } else if (fltr->type == BNXT_FLTR_TYPE_L2) { + l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); + if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { + bnxt_del_l2_filter(bp, l2_fltr); + netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", + fltr->sw_id); + } + } +} + +static void bnxt_cfg_usr_fltrs(struct bnxt *bp) +{ + struct bnxt_filter_base *usr_fltr, *tmp; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) + bnxt_cfg_one_usr_fltr(bp, usr_fltr); +} + static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -11528,6 +11760,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_vf_reps_open(bp); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); + bnxt_cfg_usr_fltrs(bp); return 0; open_err_irq: @@ -11969,8 +12202,8 @@ void bnxt_get_ring_err_stats(struct bnxt *bp, static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; struct netdev_hw_addr *ha; u8 *haddr; int mc_count = 0; @@ -12004,7 +12237,7 @@ static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) static bool bnxt_uc_list_updated(struct bnxt *bp) { struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct netdev_hw_addr *ha; int off = 0; @@ -12031,7 +12264,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (!test_bit(BNXT_STATE_OPEN, &bp->state)) return; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; mask = vnic->rx_mask; mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | @@ -12062,7 +12295,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; struct netdev_hw_addr *ha; int i, off = 0, rc; bool uc_update; @@ -12174,21 +12407,32 @@ static bool bnxt_rfs_supported(struct bnxt *bp) /* If runtime conditions support RFS */ static bool bnxt_rfs_capable(struct bnxt *bp) { - int vnics, max_vnics, max_rss_ctxs; + struct bnxt_hw_rings hwr = {0}; + int max_vnics, max_rss_ctxs; + hwr.rss_ctx = 1; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + /* 2 VNICS: default + Ntuple */ + hwr.vnic = 2; + hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * + hwr.vnic; + goto check_reserve_vnic; + } if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) return bnxt_rfs_supported(bp); if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) return false; - vnics = 1 + bp->rx_nr_rings; + hwr.vnic = 1 + bp->rx_nr_rings; +check_reserve_vnic: max_vnics = bnxt_get_max_func_vnics(bp); max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); - /* RSS contexts not a limiting factor */ - if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) - max_rss_ctxs = max_vnics; - if (vnics > max_vnics || vnics > max_rss_ctxs) { + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)) + hwr.rss_ctx = hwr.vnic; + + if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { if (bp->rx_nr_rings > 1) netdev_warn(bp->dev, "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", @@ -12199,15 +12443,19 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (!BNXT_NEW_RM(bp)) return true; - if (vnics == bp->hw_resc.resv_vnics) + if (hwr.vnic == bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) return true; - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); - if (vnics <= bp->hw_resc.resv_vnics) + bnxt_hwrm_reserve_rings(bp, &hwr); + if (hwr.vnic <= bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) return true; netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); + hwr.vnic = 1; + hwr.rss_ctx = 0; + bnxt_hwrm_reserve_rings(bp, &hwr); return false; } @@ -12246,14 +12494,24 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, return features; } +static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, + bool link_re_init, u32 flags, bool update_tpa) +{ + bnxt_close_nic(bp, irq_re_init, link_re_init); + bp->flags = flags; + if (update_tpa) + bnxt_set_ring_params(bp); + return bnxt_open_nic(bp, irq_re_init, link_re_init); +} + static int bnxt_set_features(struct net_device *dev, netdev_features_t features) { + bool update_tpa = false, update_ntuple = false; struct bnxt *bp = netdev_priv(dev); u32 flags = bp->flags; u32 changes; int rc = 0; bool re_init = false; - bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; if (features & NETIF_F_GRO_HW) @@ -12269,6 +12527,8 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (features & NETIF_F_NTUPLE) flags |= BNXT_FLAG_RFS; + else + bnxt_clear_usr_fltrs(bp, true); changes = flags ^ bp->flags; if (changes & BNXT_FLAG_TPA) { @@ -12282,6 +12542,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (changes & ~BNXT_FLAG_TPA) re_init = true; + if (changes & BNXT_FLAG_RFS) + update_ntuple = true; + if (flags != bp->flags) { u32 old_flags = bp->flags; @@ -12292,14 +12555,12 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } - if (re_init) { - bnxt_close_nic(bp, false, false); - bp->flags = flags; - if (update_tpa) - bnxt_set_ring_params(bp); + if (update_ntuple) + return bnxt_reinit_features(bp, true, false, flags, update_tpa); + + if (re_init) + return bnxt_reinit_features(bp, false, false, flags, update_tpa); - return bnxt_open_nic(bp, false, false); - } if (update_tpa) { bp->flags = flags; rc = bnxt_set_tpa(bp, @@ -13129,9 +13390,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp) { int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; - int tx_rings_needed, stats; + struct bnxt_hw_rings hwr = {0}; int rx_rings = rx; - int cp, vnics; if (tcs) tx_sets = tcs; @@ -13144,26 +13404,27 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; - tx_rings_needed = tx * tx_sets + tx_xdp; - if (max_tx < tx_rings_needed) + hwr.rx = rx_rings; + hwr.tx = tx * tx_sets + tx_xdp; + if (max_tx < hwr.tx) return -ENOMEM; - vnics = 1; - if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5_PLUS)) == - BNXT_FLAG_RFS) - vnics += rx; + hwr.vnic = bnxt_get_total_vnics(bp, rx); - tx_cp = __bnxt_num_tx_to_cp(bp, tx_rings_needed, tx_sets, tx_xdp); - cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; - if (max_cp < cp) + tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); + hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; + if (max_cp < hwr.cp) return -ENOMEM; - stats = cp; + hwr.stat = hwr.cp; if (BNXT_NEW_RM(bp)) { - cp += bnxt_get_ulp_msix_num(bp); - stats += bnxt_get_ulp_stat_ctxs(bp); + hwr.cp += bnxt_get_ulp_msix_num(bp); + hwr.stat += bnxt_get_ulp_stat_ctxs(bp); + hwr.grp = rx; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); } - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, - stats, vnics); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr.cp_p5 = hwr.tx + rx; + return bnxt_hwrm_check_rings(bp, &hwr); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -13766,6 +14027,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) return rc; eth_hw_addr_set(dev, addr->sa_data); + bnxt_clear_usr_fltrs(bp, true); if (netif_running(dev)) { bnxt_close_nic(bp, false, false); rc = bnxt_open_nic(bp, false, false); @@ -13888,7 +14150,7 @@ u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, if (skb) return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); } @@ -13899,7 +14161,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, int bit_id; spin_lock_bh(&bp->ntp_fltr_lock); - bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, BNXT_MAX_FLTR, 0); + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); if (bit_id < 0) { spin_unlock_bh(&bp->ntp_fltr_lock); return -ENOMEM; @@ -13911,6 +14173,7 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, head = &bp->ntp_fltr_hash_tbl[idx]; hlist_add_head_rcu(&fltr->base.hash, head); set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); + bnxt_insert_usr_fltr(bp, &fltr->base); bp->ntp_fltr_count++; spin_unlock_bh(&bp->ntp_fltr_lock); return 0; @@ -13919,45 +14182,39 @@ int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, struct bnxt_ntuple_filter *f2) { + struct bnxt_flow_masks *masks1 = &f1->fmasks; + struct bnxt_flow_masks *masks2 = &f2->fmasks; struct flow_keys *keys1 = &f1->fkeys; struct flow_keys *keys2 = &f2->fkeys; - if (f1->ntuple_flags != f2->ntuple_flags) - return false; - if (keys1->basic.n_proto != keys2->basic.n_proto || keys1->basic.ip_proto != keys2->basic.ip_proto) return false; if (keys1->basic.n_proto == htons(ETH_P_IP)) { - if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && - keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src) || - ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && - keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)) + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || + masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) return false; } else { - if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) && - memcmp(&keys1->addrs.v6addrs.src, - &keys2->addrs.v6addrs.src, - sizeof(keys1->addrs.v6addrs.src))) || - ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) && - memcmp(&keys1->addrs.v6addrs.dst, - &keys2->addrs.v6addrs.dst, - sizeof(keys1->addrs.v6addrs.dst)))) + if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, + &keys2->addrs.v6addrs.src) || + !ipv6_addr_equal(&masks1->addrs.v6addrs.src, + &masks2->addrs.v6addrs.src) || + !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, + &keys2->addrs.v6addrs.dst) || + !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, + &masks2->addrs.v6addrs.dst)) return false; } - if (((f1->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) && - keys1->ports.src != keys2->ports.src) || - ((f1->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) && - keys1->ports.dst != keys2->ports.dst)) - return false; - - if (keys1->control.flags == keys2->control.flags && - f1->l2_fltr == f2->l2_fltr) - return true; - - return false; + return keys1->ports.src == keys2->ports.src && + masks1->ports.src == masks2->ports.src && + keys1->ports.dst == keys2->ports.dst && + masks1->ports.dst == masks2->ports.dst && + keys1->control.flags == keys2->control.flags && + f1->l2_fltr == f2->l2_fltr; } struct bnxt_ntuple_filter * @@ -13988,7 +14245,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u32 flags; if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { - l2_fltr = bp->vnic_info[0].l2_filters[0]; + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; atomic_inc(&l2_fltr->refcnt); } else { struct bnxt_l2_key key; @@ -14022,10 +14279,13 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rc = -EPROTONOSUPPORT; goto err_free; } - if (fkeys->basic.n_proto == htons(ETH_P_IPV6) && - bp->hwrm_spec_code < 0x10601) { - rc = -EPROTONOSUPPORT; - goto err_free; + new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; + if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { + if (bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; } flags = fkeys->control.flags; if (((flags & FLOW_DIS_ENCAPSULATION) && @@ -14033,9 +14293,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rc = -EPROTONOSUPPORT; goto err_free; } - new_fltr->l2_fltr = l2_fltr; - new_fltr->ntuple_flags = BNXT_NTUPLE_MATCH_ALL; idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); rcu_read_lock(); @@ -14070,6 +14328,7 @@ void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) return; } hlist_del_rcu(&fltr->base.hash); + bnxt_del_one_usr_fltr(bp, &fltr->base); bp->ntp_fltr_count--; spin_unlock_bh(&bp->ntp_fltr_lock); bnxt_del_l2_filter(bp, fltr->l2_fltr); @@ -14669,6 +14928,7 @@ void bnxt_print_device_info(struct bnxt *bp) static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct bnxt_hw_resc *hw_resc; struct net_device *dev; struct bnxt *bp; int rc, max_irqs; @@ -14827,6 +15087,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + hw_resc = &bp->hw_resc; + bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + + BNXT_L2_FLTR_MAX_FLTR; + /* Older firmware may not report these filters properly */ + if (bp->max_fltr < BNXT_MAX_FLTR) + bp->max_fltr = BNXT_MAX_FLTR; bnxt_init_l2_fltr_tbl(bp); bnxt_set_rx_skb_mode(bp, false); bnxt_set_tpa_flags(bp); @@ -14879,6 +15145,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_dl; + INIT_LIST_HEAD(&bp->usr_fltr_list); + rc = register_netdev(dev); if (rc) goto init_err_cleanup; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 47338b48ca20..dd849e715c9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1213,6 +1213,9 @@ struct bnxt_ring_grp_info { u16 cp_fw_ring_id; }; +#define BNXT_VNIC_DEFAULT 0 +#define BNXT_VNIC_NTUPLE 1 + struct bnxt_vnic_info { u16 fw_vnic_id; /* returned by Chimp during alloc */ #define BNXT_MAX_CTX_PER_VNIC 8 @@ -1252,11 +1255,24 @@ struct bnxt_vnic_info { #define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_UCAST_FLAG 8 #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 +#define BNXT_VNIC_NTUPLE_FLAG 0x20 +}; + +struct bnxt_hw_rings { + int tx; + int rx; + int grp; + int cp; + int cp_p5; + int stat; + int vnic; + int rss_ctx; }; struct bnxt_hw_resc { u16 min_rsscos_ctxs; u16 max_rsscos_ctxs; + u16 resv_rsscos_ctxs; u16 min_cp_rings; u16 max_cp_rings; u16 resv_cp_rings; @@ -1281,6 +1297,12 @@ struct bnxt_hw_resc { u16 max_nqs; u16 max_irqs; u16 resv_irqs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; }; #if defined(CONFIG_BNXT_SRIOV) @@ -1315,12 +1337,6 @@ struct bnxt_pf_info { u16 active_vfs; u16 registered_vfs; u16 max_vfs; - u32 max_encap_records; - u32 max_decap_records; - u32 max_tx_em_flows; - u32 max_tx_wm_flows; - u32 max_rx_em_flows; - u32 max_rx_wm_flows; unsigned long *vf_event_bmap; u16 hwrm_cmd_req_pages; u8 vf_resv_strategy; @@ -1334,6 +1350,7 @@ struct bnxt_pf_info { struct bnxt_filter_base { struct hlist_node hash; + struct list_head list; __le64 filter_id; u8 type; #define BNXT_FLTR_TYPE_NTUPLE 1 @@ -1355,19 +1372,21 @@ struct bnxt_filter_base { struct rcu_head rcu; }; +struct bnxt_flow_masks { + struct flow_dissector_key_ports ports; + struct flow_dissector_key_addrs addrs; +}; + +extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE; +extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL; +extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL; + struct bnxt_ntuple_filter { + /* base filter must be the first member */ struct bnxt_filter_base base; struct flow_keys fkeys; + struct bnxt_flow_masks fmasks; struct bnxt_l2_filter *l2_fltr; - u32 ntuple_flags; -#define BNXT_NTUPLE_MATCH_SRC_IP 1 -#define BNXT_NTUPLE_MATCH_DST_IP 2 -#define BNXT_NTUPLE_MATCH_SRC_PORT 4 -#define BNXT_NTUPLE_MATCH_DST_PORT 8 -#define BNXT_NTUPLE_MATCH_ALL (BNXT_NTUPLE_MATCH_SRC_IP | \ - BNXT_NTUPLE_MATCH_DST_IP | \ - BNXT_NTUPLE_MATCH_SRC_PORT | \ - BNXT_NTUPLE_MATCH_DST_PORT) u32 flow_id; }; @@ -1394,6 +1413,7 @@ struct bnxt_ipv6_tuple { #define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4) struct bnxt_l2_filter { + /* base filter must be the first member */ struct bnxt_filter_base base; struct bnxt_l2_key l2_key; atomic_t refcnt; @@ -2217,6 +2237,14 @@ struct bnxt { #define BNXT_RSS_CAP_UDP_RSS_CAP BIT(1) #define BNXT_RSS_CAP_NEW_RSS_CAP BIT(2) #define BNXT_RSS_CAP_RSS_TCAM BIT(3) +#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(4) +#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5) +#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6) +#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7) + + u8 rss_hash_key[HW_HASH_KEY_SIZE]; + u8 rss_hash_key_valid:1; + u8 rss_hash_key_updated:1; u16 max_mtu; u8 max_tc; @@ -2301,12 +2329,17 @@ struct bnxt { #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35) #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(36) #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(37) + #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(38) + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) u32 fw_dbg_cap; #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) +#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \ + (BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; @@ -2428,6 +2461,7 @@ struct bnxt { unsigned long *ntp_fltr_bmap; int ntp_fltr_count; + int max_fltr; #define BNXT_L2_FLTR_MAX_FLTR 1024 #define BNXT_L2_FLTR_HASH_SIZE 32 @@ -2437,12 +2471,14 @@ struct bnxt { u32 hash_seed; u64 toeplitz_prefix; + struct list_head usr_fltr_list; + /* To protect link related settings during link changes and * ethtool settings changes. */ struct mutex link_lock; struct bnxt_link_info link_info; - struct ethtool_eee eee; + struct ethtool_keee eee; u32 lpi_tmr_lo; u32 lpi_tmr_hi; @@ -2641,10 +2677,16 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); +void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); +void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); +void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp); void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr); +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags); int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr); int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr); int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index dc4ca706b0e2..1d240a27455a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -968,6 +968,7 @@ static int bnxt_set_channels(struct net_device *dev, return -EINVAL; } + bnxt_clear_usr_fltrs(bp, true); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1058,11 +1059,17 @@ static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, u32 *rule_locs) { + u32 count; + cmd->data = bp->ntp_fltr_count; rcu_read_lock(); + count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, + cmd->rule_cnt); cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, BNXT_NTP_FLTR_HASH_SIZE, - rule_locs, 0, cmd->rule_cnt); + rule_locs, count, + cmd->rule_cnt); rcu_read_unlock(); return 0; @@ -1074,13 +1081,44 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) (struct ethtool_rx_flow_spec *)&cmd->fs; struct bnxt_filter_base *fltr_base; struct bnxt_ntuple_filter *fltr; + struct bnxt_flow_masks *fmasks; struct flow_keys *fkeys; int rc = -EINVAL; - if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= bp->max_fltr) return rc; rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, + fs->location); + if (fltr_base) { + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *l2_fltr; + struct bnxt_l2_key *l2_key; + + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + l2_key = &l2_fltr->l2_key; + fs->flow_type = ETHER_FLOW; + ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); + eth_broadcast_addr(m_ether->h_dest); + if (l2_key->vlan) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + fs->flow_type |= FLOW_EXT; + m_ext->vlan_tci = htons(0xfff); + h_ext->vlan_tci = htons(l2_key->vlan); + } + if (fltr_base->flags & BNXT_ACT_RING_DST) + fs->ring_cookie = fltr_base->rxq; + if (fltr_base->flags & BNXT_ACT_FUNC_DST) + fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + rcu_read_unlock(); + return 0; + } fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, BNXT_NTP_FLTR_HASH_SIZE, fs->location); @@ -1091,59 +1129,74 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); fkeys = &fltr->fkeys; + fmasks = &fltr->fmasks; if (fkeys->basic.n_proto == htons(ETH_P_IP)) { - if (fkeys->basic.ip_proto == IPPROTO_TCP) + if (fkeys->basic.ip_proto == IPPROTO_ICMP || + fkeys->basic.ip_proto == IPPROTO_RAW) { + fs->flow_type = IP_USER_FLOW; + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + if (fkeys->basic.ip_proto == IPPROTO_ICMP) + fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; + else + fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW; + fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; + } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { fs->flow_type = TCP_V4_FLOW; - else if (fkeys->basic.ip_proto == IPPROTO_UDP) + } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { fs->flow_type = UDP_V4_FLOW; - else + } else { goto fltr_err; - - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { - fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; - fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { - fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; - fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; + if (fs->flow_type == TCP_V4_FLOW || + fs->flow_type == UDP_V4_FLOW) { fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { + fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); + fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; } } else { - if (fkeys->basic.ip_proto == IPPROTO_TCP) + if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 || + fkeys->basic.ip_proto == IPPROTO_RAW) { + fs->flow_type = IPV6_USER_FLOW; + if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) + fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; + else + fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW; + fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; + } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { fs->flow_type = TCP_V6_FLOW; - else if (fkeys->basic.ip_proto == IPPROTO_UDP) + } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { fs->flow_type = UDP_V6_FLOW; - else + } else { goto fltr_err; - - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_IP) { - *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = - fkeys->addrs.v6addrs.src; - bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6src); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_IP) { - *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = - fkeys->addrs.v6addrs.dst; - bnxt_fill_ipv6_mask(fs->m_u.tcp_ip6_spec.ip6dst); } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_SRC_PORT) { + + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = + fmasks->addrs.v6addrs.src; + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = + fmasks->addrs.v6addrs.dst; + if (fs->flow_type == TCP_V6_FLOW || + fs->flow_type == UDP_V6_FLOW) { fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; - fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0); - } - if (fltr->ntuple_flags & BNXT_NTUPLE_MATCH_DST_PORT) { + fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; - fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0); + fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; } } - fs->ring_cookie = fltr->base.rxq; + if (fltr->base.flags & BNXT_ACT_DROP) + fs->ring_cookie = RX_CLS_FLOW_DISC; + else + fs->ring_cookie = fltr->base.rxq; rc = 0; fltr_err: @@ -1152,17 +1205,78 @@ fltr_err: return rc; } -#define IPV4_ALL_MASK ((__force __be32)~0) -#define L4_PORT_ALL_MASK ((__force __be16)~0) +static int bnxt_add_l2_cls_rule(struct bnxt *bp, + struct ethtool_rx_flow_spec *fs) +{ + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *fltr; + struct bnxt_l2_key key; + u16 vnic_id; + u8 flags; + int rc; + + if (BNXT_CHIP_P5_PLUS(bp)) + return -EOPNOTSUPP; -static bool ipv6_mask_is_full(__be32 mask[4]) + if (!is_broadcast_ether_addr(m_ether->h_dest)) + return -EINVAL; + ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); + key.vlan = 0; + if (fs->flow_type & FLOW_EXT) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) + return -EINVAL; + key.vlan = ntohs(h_ext->vlan_tci); + } + + if (vf) { + flags = BNXT_ACT_FUNC_DST; + vnic_id = 0xffff; + vf--; + } else { + flags = BNXT_ACT_RING_DST; + vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; + } + fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + + fltr->base.fw_vnic_id = vnic_id; + fltr->base.rxq = ring; + fltr->base.vf_idx = vf; + rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); + if (rc) + bnxt_del_l2_filter(bp, fltr); + else + fs->location = fltr->base.sw_id; + return rc; +} + +static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, + struct ethtool_usrip4_spec *ip_mask) { - return (mask[0] & mask[1] & mask[2] & mask[3]) == IPV4_ALL_MASK; + if (ip_mask->l4_4_bytes || ip_mask->tos || + ip_spec->ip_ver != ETH_RX_NFC_IP4 || + ip_mask->proto != BNXT_IP_PROTO_FULL_MASK || + (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP)) + return false; + return true; } -static bool ipv6_mask_is_zero(__be32 mask[4]) +static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, + struct ethtool_usrip6_spec *ip_mask) { - return !(mask[0] | mask[1] | mask[2] | mask[3]); + if (ip_mask->l4_4_bytes || ip_mask->tclass || + ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK || + (ip_spec->l4_proto != IPPROTO_RAW && + ip_spec->l4_proto != IPPROTO_ICMPV6)) + return false; + return true; } static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, @@ -1172,6 +1286,7 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); struct bnxt_ntuple_filter *new_fltr, *fltr; struct bnxt_l2_filter *l2_fltr; + struct bnxt_flow_masks *fmasks; u32 flow_type = fs->flow_type; struct flow_keys *fkeys; u32 idx; @@ -1183,17 +1298,42 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf) return -EOPNOTSUPP; + if (flow_type == IP_USER_FLOW) { + if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec)) + return -EOPNOTSUPP; + } + + if (flow_type == IPV6_USER_FLOW) { + if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec)) + return -EOPNOTSUPP; + } + new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); if (!new_fltr) return -ENOMEM; - l2_fltr = bp->vnic_info[0].l2_filters[0]; + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; atomic_inc(&l2_fltr->refcnt); new_fltr->l2_fltr = l2_fltr; + fmasks = &new_fltr->fmasks; fkeys = &new_fltr->fkeys; rc = -EOPNOTSUPP; switch (flow_type) { + case IP_USER_FLOW: { + struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; + + fkeys->basic.ip_proto = ip_spec->proto; + fkeys->basic.n_proto = htons(ETH_P_IP); + fkeys->addrs.v4addrs.src = ip_spec->ip4src; + fmasks->addrs.v4addrs.src = ip_mask->ip4src; + fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; + fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; + break; + } case TCP_V4_FLOW: case UDP_V4_FLOW: { struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; @@ -1203,32 +1343,26 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, if (flow_type == UDP_V4_FLOW) fkeys->basic.ip_proto = IPPROTO_UDP; fkeys->basic.n_proto = htons(ETH_P_IP); + fkeys->addrs.v4addrs.src = ip_spec->ip4src; + fmasks->addrs.v4addrs.src = ip_mask->ip4src; + fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; + fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; + fkeys->ports.src = ip_spec->psrc; + fmasks->ports.src = ip_mask->psrc; + fkeys->ports.dst = ip_spec->pdst; + fmasks->ports.dst = ip_mask->pdst; + break; + } + case IPV6_USER_FLOW: { + struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; - if (ip_mask->ip4src == IPV4_ALL_MASK) { - fkeys->addrs.v4addrs.src = ip_spec->ip4src; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; - } else if (ip_mask->ip4src) { - goto ntuple_err; - } - if (ip_mask->ip4dst == IPV4_ALL_MASK) { - fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP; - } else if (ip_mask->ip4dst) { - goto ntuple_err; - } - - if (ip_mask->psrc == L4_PORT_ALL_MASK) { - fkeys->ports.src = ip_spec->psrc; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT; - } else if (ip_mask->psrc) { - goto ntuple_err; - } - if (ip_mask->pdst == L4_PORT_ALL_MASK) { - fkeys->ports.dst = ip_spec->pdst; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT; - } else if (ip_mask->pdst) { - goto ntuple_err; - } + fkeys->basic.ip_proto = ip_spec->l4_proto; + fkeys->basic.n_proto = htons(ETH_P_IPV6); + fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; + fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; + fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; + fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; break; } case TCP_V6_FLOW: @@ -1241,40 +1375,21 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, fkeys->basic.ip_proto = IPPROTO_UDP; fkeys->basic.n_proto = htons(ETH_P_IPV6); - if (ipv6_mask_is_full(ip_mask->ip6src)) { - fkeys->addrs.v6addrs.src = - *(struct in6_addr *)&ip_spec->ip6src; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_IP; - } else if (!ipv6_mask_is_zero(ip_mask->ip6src)) { - goto ntuple_err; - } - if (ipv6_mask_is_full(ip_mask->ip6dst)) { - fkeys->addrs.v6addrs.dst = - *(struct in6_addr *)&ip_spec->ip6dst; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_IP; - } else if (!ipv6_mask_is_zero(ip_mask->ip6dst)) { - goto ntuple_err; - } - - if (ip_mask->psrc == L4_PORT_ALL_MASK) { - fkeys->ports.src = ip_spec->psrc; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_SRC_PORT; - } else if (ip_mask->psrc) { - goto ntuple_err; - } - if (ip_mask->pdst == L4_PORT_ALL_MASK) { - fkeys->ports.dst = ip_spec->pdst; - new_fltr->ntuple_flags |= BNXT_NTUPLE_MATCH_DST_PORT; - } else if (ip_mask->pdst) { - goto ntuple_err; - } + fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; + fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; + fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; + fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; + fkeys->ports.src = ip_spec->psrc; + fmasks->ports.src = ip_mask->psrc; + fkeys->ports.dst = ip_spec->pdst; + fmasks->ports.dst = ip_mask->pdst; break; } default: rc = -EOPNOTSUPP; goto ntuple_err; } - if (!new_fltr->ntuple_flags) + if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) goto ntuple_err; idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); @@ -1287,8 +1402,11 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, } rcu_read_unlock(); - new_fltr->base.rxq = ring; new_fltr->base.flags = BNXT_ACT_NO_AGING; + if (fs->ring_cookie == RX_CLS_FLOW_DISC) + new_fltr->base.flags |= BNXT_ACT_DROP; + else + new_fltr->base.rxq = ring; __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); if (!rc) { @@ -1321,6 +1439,18 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (fs->location != RX_CLS_LOC_ANY) return -EINVAL; + flow_type = fs->flow_type; + if ((flow_type == IP_USER_FLOW || + flow_type == IPV6_USER_FLOW) && + !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) + return -EOPNOTSUPP; + if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) + return -EINVAL; + flow_type &= ~FLOW_EXT; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) + return bnxt_add_ntuple_cls_rule(bp, fs); + ring = ethtool_get_flow_spec_ring(fs->ring_cookie); vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); if (BNXT_VF(bp) && vf) @@ -1330,12 +1460,8 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (!vf && ring >= bp->rx_nr_rings) return -EINVAL; - flow_type = fs->flow_type; - if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) - return -EINVAL; - flow_type &= ~FLOW_EXT; if (flow_type == ETHER_FLOW) - rc = -EOPNOTSUPP; + rc = bnxt_add_l2_cls_rule(bp, fs); else rc = bnxt_add_ntuple_cls_rule(bp, fs); return rc; @@ -1346,11 +1472,22 @@ static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) struct ethtool_rx_flow_spec *fs = &cmd->fs; struct bnxt_filter_base *fltr_base; struct bnxt_ntuple_filter *fltr; + u32 id = fs->location; rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, id); + if (fltr_base) { + struct bnxt_l2_filter *l2_fltr; + + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + rcu_read_unlock(); + bnxt_hwrm_l2_filter_free(bp, l2_fltr); + bnxt_del_l2_filter(bp, l2_fltr); + return 0; + } fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, - BNXT_NTP_FLTR_HASH_SIZE, - fs->location); + BNXT_NTP_FLTR_HASH_SIZE, id); if (!fltr_base) { rcu_read_unlock(); return -ENOENT; @@ -1396,8 +1533,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; - case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: + if (bp->rss_hash_cfg & + (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: case AH_V4_FLOW: case ESP_V4_FLOW: case IPV4_FLOW: @@ -1415,8 +1558,14 @@ static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) cmd->data |= RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3; fallthrough; - case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: + if (bp->rss_hash_cfg & + (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: case AH_V6_FLOW: case ESP_V6_FLOW: case IPV6_FLOW: @@ -1463,6 +1612,24 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; if (tuple == 4) rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } else if (cmd->flow_type == AH_ESP_V4_FLOW) { + if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || + !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) + return -EINVAL; + rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; + } else if (cmd->flow_type == AH_ESP_V6_FLOW) { + if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || + !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) + return -EINVAL; + rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; } else if (tuple == 4) { return -EINVAL; } @@ -1521,7 +1688,7 @@ static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bp->ntp_fltr_count; - cmd->data = BNXT_NTP_FLTR_MAX_FLTR | RX_CLS_LOC_SPECIAL; + cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; break; case ETHTOOL_GRXCLSRLALL: @@ -1596,7 +1763,7 @@ static int bnxt_get_rxfh(struct net_device *dev, if (!bp->vnic_info) return 0; - vnic = &bp->vnic_info[0]; + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->indir && bp->rss_indir_tbl) { tbl_size = bnxt_get_rxfh_indir_size(dev); for (i = 0; i < tbl_size; i++) @@ -1619,8 +1786,10 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->key) - return -EOPNOTSUPP; + if (rxfh->key) { + memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } if (rxfh->indir) { u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); @@ -1631,7 +1800,7 @@ static int bnxt_set_rxfh(struct net_device *dev, if (pad) memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); } - + bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); rc = bnxt_open_nic(bp, false, false); @@ -1751,31 +1920,21 @@ static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) return 0; } -u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) +/* TODO: support 25GB, 40GB, 50GB with different cable type */ +void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) { - u32 speed_mask = 0; + linkmode_zero(mode); - /* TODO: support 25GB, 40GB, 50GB with different cable type */ - /* set the advertised speeds */ if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) - speed_mask |= ADVERTISED_100baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) - speed_mask |= ADVERTISED_1000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) - speed_mask |= ADVERTISED_2500baseX_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) - speed_mask |= ADVERTISED_10000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= ADVERTISED_40000baseCR4_Full; - - if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) - speed_mask |= ADVERTISED_Pause; - else if (fw_pause & BNXT_LINK_PAUSE_TX) - speed_mask |= ADVERTISED_Asym_Pause; - else if (fw_pause & BNXT_LINK_PAUSE_RX) - speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - return speed_mask; + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); } enum bnxt_media_type { @@ -2643,23 +2802,22 @@ bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) return 0; } -u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) { u16 fw_speed_mask = 0; - /* only support autoneg at speed 100, 1000, and 10000 */ - if (advertising & (ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; - } - if (advertising & (ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half)) { + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; - } - if (advertising & ADVERTISED_10000baseT_Full) + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; - if (advertising & ADVERTISED_40000baseCR4_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; return fw_speed_mask; @@ -3884,12 +4042,13 @@ static int bnxt_set_eeprom(struct net_device *dev, eeprom->len); } -static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) +static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); struct bnxt *bp = netdev_priv(dev); - struct ethtool_eee *eee = &bp->eee; + struct ethtool_keee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; - u32 advertising; int rc = 0; if (!BNXT_PHY_CFG_ABLE(bp)) @@ -3899,7 +4058,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; mutex_lock(&bp->link_lock); - advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + _bnxt_fw_to_linkmode(advertising, link_info->advertising); if (!edata->eee_enabled) goto eee_ok; @@ -3919,16 +4078,15 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) edata->tx_lpi_timer = eee->tx_lpi_timer; } } - if (!edata->advertised) { - edata->advertised = advertising & eee->supported; - } else if (edata->advertised & ~advertising) { - netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", - edata->advertised, advertising); + if (linkmode_empty(edata->advertised)) { + linkmode_and(edata->advertised, advertising, eee->supported); + } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { + netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); rc = -EINVAL; goto eee_exit; } - eee->advertised = edata->advertised; + linkmode_copy(eee->advertised, edata->advertised); eee->tx_lpi_enabled = edata->tx_lpi_enabled; eee->tx_lpi_timer = edata->tx_lpi_timer; eee_ok: @@ -3942,7 +4100,7 @@ eee_exit: return rc; } -static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) +static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) { struct bnxt *bp = netdev_priv(dev); @@ -3954,12 +4112,12 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) /* Preserve tx_lpi_timer so that the last value will be used * by default when it is re-enabled. */ - edata->advertised = 0; + linkmode_zero(edata->advertised); edata->tx_lpi_enabled = 0; } if (!bp->eee.eee_active) - edata->lp_advertised = 0; + linkmode_zero(edata->lp_advertised); return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index a8ecef8ab82c..e2ee030237d4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -43,12 +43,14 @@ struct bnxt_led_cfg { #define BNXT_PXP_REG_LEN 0x3110 +#define BNXT_IP_PROTO_FULL_MASK 0xFF + extern const struct ethtool_ops bnxt_ethtool_ops; u32 bnxt_get_rxfh_indir_size(struct net_device *dev); -u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); +void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds); u32 bnxt_fw_to_ethtool_speed(u16); -u16 bnxt_get_fw_auto_link_speeds(u32); +u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode); int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info); int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index adad188e38b8..cc07660330f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -684,7 +684,7 @@ static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) timestamp.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(ptp->tx_skb, ×tamp); } else { - netdev_WARN_ONCE(bp->dev, + netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2d7ae71287b1..7396e2823e32 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1313,14 +1313,13 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, } priv->eee.eee_enabled = enable; - priv->eee.eee_active = enable; priv->eee.tx_lpi_enabled = tx_lpi_enabled; } -static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) +static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct ethtool_eee *p = &priv->eee; + struct ethtool_keee *p = &priv->eee; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1328,18 +1327,17 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) if (!dev->phydev) return -ENODEV; - e->eee_enabled = p->eee_enabled; - e->eee_active = p->eee_active; e->tx_lpi_enabled = p->tx_lpi_enabled; e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); return phy_ethtool_get_eee(dev->phydev, e); } -static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) +static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct ethtool_eee *p = &priv->eee; + struct ethtool_keee *p = &priv->eee; + bool active; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1352,9 +1350,9 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) if (!p->eee_enabled) { bcmgenet_eee_enable_set(dev, false, false); } else { - p->eee_active = phy_init_eee(dev->phydev, false) >= 0; + active = phy_init_eee(dev->phydev, false) >= 0; bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); - bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled); + bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); } return phy_ethtool_set_eee(dev->phydev, e); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 1985c0ec4da2..7523b60b3c1c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -645,7 +645,7 @@ struct bcmgenet_priv { struct bcmgenet_mib_counters mib; - struct ethtool_eee eee; + struct ethtool_keee eee; }; #define GENET_IO_MACRO(name, offset) \ diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 97ea76d443ab..9ada89355747 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -30,6 +30,7 @@ static void bcmgenet_mac_config(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; + bool active; /* speed */ if (phydev->speed == SPEED_1000) @@ -88,9 +89,9 @@ static void bcmgenet_mac_config(struct net_device *dev) } bcmgenet_umac_writel(priv, reg, UMAC_CMD); - priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + active = phy_init_eee(phydev, 0) >= 0; bcmgenet_eee_enable_set(dev, - priv->eee.eee_enabled && priv->eee.eee_active, + priv->eee.eee_enabled && active, priv->eee.tx_lpi_enabled); } @@ -475,6 +476,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv) ppd.wait_func = bcmgenet_mii_wait; ppd.wait_func_data = priv; ppd.bus_name = "bcmgenet MII bus"; + /* Pass a reference to our "main" clock which is used for MDIO + * transfers + */ + ppd.clk = priv->clk; /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD * and is 2 * 32-bits word long, 8 bytes total. @@ -673,7 +678,5 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - clk_prepare_enable(priv->clk); platform_device_unregister(priv->mii_pdev); - clk_disable_unprepare(priv->clk); } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 04964bbe08cf..eee759054aad 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -2338,10 +2338,10 @@ static void tg3_phy_apply_otp(struct tg3 *tp) tg3_phy_toggle_auxctl_smdsp(tp, false); } -static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) +static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee) { u32 val; - struct ethtool_eee *dest = &tp->eee; + struct ethtool_keee *dest = &tp->eee; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return; @@ -2362,13 +2362,13 @@ static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee) /* Pull lp advertised settings */ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val)) return; - dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val); /* Pull advertised and eee_enabled settings */ if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val)) return; dest->eee_enabled = !!val; - dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + mii_eee_cap1_mod_linkmode_t(dest->advertised, val); /* Pull tx_lpi_enabled */ val = tr32(TG3_CPMU_EEE_MODE); @@ -4354,23 +4354,12 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) if (!err) { u32 err2; - val = 0; - /* Advertise 100-BaseTX EEE ability */ - if (advertise & ADVERTISED_100baseT_Full) - val |= MDIO_AN_EEE_ADV_100TX; - /* Advertise 1000-BaseT EEE ability */ - if (advertise & ADVERTISED_1000baseT_Full) - val |= MDIO_AN_EEE_ADV_1000T; - - if (!tp->eee.eee_enabled) { + if (!tp->eee.eee_enabled) val = 0; - tp->eee.advertised = 0; - } else { - tp->eee.advertised = advertise & - (ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full); - } + else + val = ethtool_adv_to_mmd_eee_adv_t(advertise); + mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val); err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); if (err) val = 0; @@ -4618,7 +4607,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp) static bool tg3_phy_eee_config_ok(struct tg3 *tp) { - struct ethtool_eee eee; + struct ethtool_keee eee = {}; if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) return true; @@ -4626,13 +4615,13 @@ static bool tg3_phy_eee_config_ok(struct tg3 *tp) tg3_eee_pull_config(tp, &eee); if (tp->eee.eee_enabled) { - if (tp->eee.advertised != eee.advertised || + if (!linkmode_equal(tp->eee.advertised, eee.advertised) || tp->eee.tx_lpi_timer != eee.tx_lpi_timer || tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled) return false; } else { /* EEE is disabled but we're advertising */ - if (eee.advertised) + if (!linkmode_empty(eee.advertised)) return false; } @@ -14180,7 +14169,7 @@ static int tg3_set_coalesce(struct net_device *dev, return 0; } -static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) +static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata) { struct tg3 *tp = netdev_priv(dev); @@ -14189,7 +14178,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - if (edata->advertised != tp->eee.advertised) { + if (!linkmode_equal(edata->advertised, tp->eee.advertised)) { netdev_warn(tp->dev, "Direct manipulation of EEE advertisement is not supported\n"); return -EINVAL; @@ -14202,7 +14191,9 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EINVAL; } - tp->eee = *edata; + tp->eee.eee_enabled = edata->eee_enabled; + tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled; + tp->eee.tx_lpi_timer = edata->tx_lpi_timer; tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED; tg3_warn_mgmt_link_flap(tp); @@ -14217,7 +14208,7 @@ static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } -static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata) +static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata) { struct tg3 *tp = netdev_priv(dev); @@ -15655,10 +15646,13 @@ static int tg3_phy_probe(struct tg3 *tp) tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) { tp->phy_flags |= TG3_PHYFLG_EEE_CAP; - tp->eee.supported = SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full; - tp->eee.advertised = ADVERTISED_100baseT_Full | - ADVERTISED_1000baseT_Full; + linkmode_zero(tp->eee.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + tp->eee.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + tp->eee.supported); + linkmode_copy(tp->eee.advertised, tp->eee.supported); + tp->eee.eee_enabled = 1; tp->eee.tx_lpi_enabled = 1; tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 5016475e5005..cf1b2b123c7e 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -3419,7 +3419,7 @@ struct tg3 { unsigned int irq_cnt; struct ethtool_coalesce coal; - struct ethtool_eee eee; + struct ethtool_keee eee; /* firmware info */ const char *fw_needed; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 31191b520b58..c32174484a96 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1091,10 +1091,10 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. */ static void -bnad_tx_cleanup(struct delayed_work *work) +bnad_tx_cleanup(struct work_struct *work) { struct bnad_tx_info *tx_info = - container_of(work, struct bnad_tx_info, tx_cleanup_work); + container_of(work, struct bnad_tx_info, tx_cleanup_work.work); struct bnad *bnad = NULL; struct bna_tcb *tcb; unsigned long flags; @@ -1170,7 +1170,7 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. */ static void -bnad_rx_cleanup(void *work) +bnad_rx_cleanup(struct work_struct *work) { struct bnad_rx_info *rx_info = container_of(work, struct bnad_rx_info, rx_cleanup_work); @@ -1991,8 +1991,7 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id) } tx_info->tx = tx; - INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, - (work_func_t)bnad_tx_cleanup); + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, bnad_tx_cleanup); /* Register ISR for the Tx object */ if (intr_info->intr_type == BNA_INTR_T_MSIX) { @@ -2248,8 +2247,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id) rx_info->rx = rx; spin_unlock_irqrestore(&bnad->bna_lock, flags); - INIT_WORK(&rx_info->rx_cleanup_work, - (work_func_t)(bnad_rx_cleanup)); + INIT_WORK(&rx_info->rx_cleanup_work, bnad_rx_cleanup); /* * Init NAPI, so that state is set to NAPI_STATE_SCHED, diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 37bd38d772e8..d266a87297a5 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -872,7 +872,7 @@ error: return NETDEV_TX_OK; } -/* dev_base_lock rwlock held, nominally process context */ +/* rcu_read_lock potentially held, nominally process context */ static void enic_get_stats(struct net_device *netdev, struct rtnl_link_stats64 *net_stats) { diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c index 20fcb20b42ed..66b577835338 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_vic.c +++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c @@ -49,7 +49,8 @@ int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length, tlv->type = htons(type); tlv->length = htons(length); - memcpy(tlv->value, value, length); + unsafe_memcpy(tlv->value, value, length, + /* Flexible array of flexible arrays */); vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1); vp->length = htonl(ntohl(vp->length) + diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index ae0b8b37b9bf..4b15af6b7122 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -240,7 +240,7 @@ static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) static int tsnep_phy_open(struct tsnep_adapter *adapter) { struct phy_device *phydev; - struct ethtool_eee ethtool_eee; + struct ethtool_keee ethtool_keee; int retval; retval = phy_connect_direct(adapter->netdev, adapter->phydev, @@ -259,8 +259,8 @@ static int tsnep_phy_open(struct tsnep_adapter *adapter) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* disable EEE autoneg, EEE not supported by TSNEP */ - memset(ðtool_eee, 0, sizeof(ethtool_eee)); - phy_ethtool_set_eee(adapter->phydev, ðtool_eee); + memset(ðtool_keee, 0, sizeof(ethtool_keee)); + phy_ethtool_set_eee(adapter->phydev, ðtool_keee); adapter->phydev->irq = PHY_MAC_INTERRUPT; phy_start(adapter->phydev); @@ -721,17 +721,25 @@ static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, struct xdp_buff *xdp, - struct netdev_queue *tx_nq, struct tsnep_tx *tx) + struct netdev_queue *tx_nq, struct tsnep_tx *tx, + bool zc) { struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); bool xmit; + u32 type; if (unlikely(!xdpf)) return false; + /* no page pool for zero copy */ + if (zc) + type = TSNEP_TX_TYPE_XDP_NDO; + else + type = TSNEP_TX_TYPE_XDP_TX; + __netif_tx_lock(tx_nq, smp_processor_id()); - xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); + xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); /* Avoid transmit queue timeout since we share it with the slow path */ if (xmit) @@ -1260,6 +1268,14 @@ static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) return desc_refilled; } +static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available) +{ + if (desc_available) + xsk_set_rx_need_wakeup(rx->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx->xsk_pool); +} + static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, struct xdp_buff *xdp, int *status, struct netdev_queue *tx_nq, struct tsnep_tx *tx) @@ -1275,7 +1291,7 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -1325,7 +1341,7 @@ static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, case XDP_PASS: return false; case XDP_TX: - if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) goto out_failure; *status |= TSNEP_XDP_TX; return true; @@ -1621,10 +1637,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); if (xsk_uses_need_wakeup(rx->xsk_pool)) { - if (desc_available) - xsk_set_rx_need_wakeup(rx->xsk_pool); - else - xsk_clear_rx_need_wakeup(rx->xsk_pool); + tsnep_xsk_rx_need_wakeup(rx, desc_available); return done; } @@ -1769,14 +1782,8 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) * first polling would be too late as need wakeup signalisation would * be delayed for an indefinite time */ - if (xsk_uses_need_wakeup(rx->xsk_pool)) { - int desc_available = tsnep_rx_desc_available(rx); - - if (desc_available) - xsk_set_rx_need_wakeup(rx->xsk_pool); - else - xsk_clear_rx_need_wakeup(rx->xsk_pool); - } + if (xsk_uses_need_wakeup(rx->xsk_pool)) + tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx)); } static bool tsnep_pending(struct tsnep_queue *queue) @@ -2564,8 +2571,7 @@ static int tsnep_probe(struct platform_device *pdev) mutex_init(&adapter->rxnfc_lock); INIT_LIST_HEAD(&adapter->rxnfc_rules); - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); - adapter->addr = devm_ioremap_resource(&pdev->dev, io); + adapter->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &io); if (IS_ERR(adapter->addr)) return PTR_ERR(adapter->addr); netdev->mem_start = io->start; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index bfdbdab443ae..9f07f4947b63 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2402,7 +2402,7 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) static int enetc_phylink_connect(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - struct ethtool_eee edata; + struct ethtool_keee edata; int err; if (!priv->phylink) { @@ -2418,7 +2418,7 @@ static int enetc_phylink_connect(struct net_device *ndev) } /* disable EEE autoneg, until ENETC driver supports it */ - memset(&edata, 0, sizeof(struct ethtool_eee)); + memset(&edata, 0, sizeof(struct ethtool_keee)); phylink_ethtool_set_eee(priv->phylink, &edata); phylink_start(priv->phylink); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a8fbcada6b01..a19cb2a786fd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -672,7 +672,7 @@ struct fec_enet_private { unsigned int itr_clk_rate; /* tx lpi eee mode */ - struct ethtool_eee eee; + struct ethtool_keee eee; unsigned int clk_ref_rate; /* ptp clock period in ns*/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 432523b2c789..207f1f66c117 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -85,8 +85,6 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep, static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; -/* Pause frame feild and FIFO threshold */ -#define FEC_ENET_FCE (1 << 5) #define FEC_ENET_RSEM_V 0x84 #define FEC_ENET_RSFL_V 16 #define FEC_ENET_RAEM_V 0x8 @@ -240,8 +238,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define PKT_MINBUF_SIZE 64 /* FEC receive acceleration */ -#define FEC_RACC_IPDIS (1 << 1) -#define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_IPDIS BIT(1) +#define FEC_RACC_PRODIS BIT(2) #define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) @@ -273,8 +271,23 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define FEC_MMFR_TA (2 << 16) #define FEC_MMFR_DATA(v) (v & 0xffff) /* FEC ECR bits definition */ -#define FEC_ECR_MAGICEN (1 << 2) -#define FEC_ECR_SLEEP (1 << 3) +#define FEC_ECR_RESET BIT(0) +#define FEC_ECR_ETHEREN BIT(1) +#define FEC_ECR_MAGICEN BIT(2) +#define FEC_ECR_SLEEP BIT(3) +#define FEC_ECR_EN1588 BIT(4) +#define FEC_ECR_BYTESWP BIT(8) +/* FEC RCR bits definition */ +#define FEC_RCR_LOOP BIT(0) +#define FEC_RCR_HALFDPX BIT(1) +#define FEC_RCR_MII BIT(2) +#define FEC_RCR_PROMISC BIT(3) +#define FEC_RCR_BC_REJ BIT(4) +#define FEC_RCR_FLOWCTL BIT(5) +#define FEC_RCR_RMII BIT(8) +#define FEC_RCR_10BASET BIT(9) +/* TX WMARK bits */ +#define FEC_TXWMRK_STRFWD BIT(8) #define FEC_MII_TIMEOUT 30000 /* us */ @@ -1062,7 +1075,7 @@ fec_restart(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; - u32 ecntl = 0x2; /* ETHEREN */ + u32 ecntl = FEC_ECR_ETHEREN; /* Whack a reset. We should wait for this. * For i.MX6SX SOC, enet use AXI bus, we use disable MAC @@ -1137,18 +1150,18 @@ fec_restart(struct net_device *ndev) fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) rcntl |= (1 << 6); else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) - rcntl |= (1 << 8); + rcntl |= FEC_RCR_RMII; else - rcntl &= ~(1 << 8); + rcntl &= ~FEC_RCR_RMII; /* 1G, 100M or 10M */ if (ndev->phydev) { if (ndev->phydev->speed == SPEED_1000) ecntl |= (1 << 5); else if (ndev->phydev->speed == SPEED_100) - rcntl &= ~(1 << 9); + rcntl &= ~FEC_RCR_10BASET; else - rcntl |= (1 << 9); + rcntl |= FEC_RCR_10BASET; } } else { #ifdef FEC_MIIGSK_ENR @@ -1181,7 +1194,7 @@ fec_restart(struct net_device *ndev) if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && ndev->phydev && ndev->phydev->pause)) { - rcntl |= FEC_ENET_FCE; + rcntl |= FEC_RCR_FLOWCTL; /* set FIFO threshold parameter to reduce overrun */ writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); @@ -1192,7 +1205,7 @@ fec_restart(struct net_device *ndev) /* OPD */ writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); } else { - rcntl &= ~FEC_ENET_FCE; + rcntl &= ~FEC_RCR_FLOWCTL; } #endif /* !defined(CONFIG_M5272) */ @@ -1207,13 +1220,13 @@ fec_restart(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ - ecntl |= (1 << 8); + ecntl |= FEC_ECR_BYTESWP; /* enable ENET store and forward mode */ - writel(1 << 8, fep->hwp + FEC_X_WMRK); + writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK); } if (fep->bufdesc_ex) - ecntl |= (1 << 4); + ecntl |= FEC_ECR_EN1588; if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && fep->rgmii_txc_dly) @@ -1312,7 +1325,7 @@ static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); + u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII; u32 val; /* We cannot expect a graceful transmit stop without link !!! */ @@ -1331,7 +1344,7 @@ fec_stop(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { writel(0, fep->hwp + FEC_ECNTRL); } else { - writel(1, fep->hwp + FEC_ECNTRL); + writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); udelay(10); } } else { @@ -1345,12 +1358,11 @@ fec_stop(struct net_device *ndev) /* We have to keep ENET enabled to have MII interrupt stay working */ if (fep->quirks & FEC_QUIRK_ENET_MAC && !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - writel(2, fep->hwp + FEC_ECNTRL); + writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } } - static void fec_timeout(struct net_device *ndev, unsigned int txqueue) { @@ -3122,7 +3134,7 @@ static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_eee *p = &fep->eee; + struct ethtool_keee *p = &fep->eee; unsigned int sleep_cycle, wake_cycle; int ret = 0; @@ -3139,8 +3151,6 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) } p->tx_lpi_enabled = enable; - p->eee_enabled = enable; - p->eee_active = enable; writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); @@ -3149,10 +3159,10 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) } static int -fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_eee *p = &fep->eee; + struct ethtool_keee *p = &fep->eee; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) return -EOPNOTSUPP; @@ -3160,8 +3170,6 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) if (!netif_running(ndev)) return -ENETDOWN; - edata->eee_enabled = p->eee_enabled; - edata->eee_active = p->eee_active; edata->tx_lpi_timer = p->tx_lpi_timer; edata->tx_lpi_enabled = p->tx_lpi_enabled; @@ -3169,10 +3177,10 @@ fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) } static int -fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct fec_enet_private *fep = netdev_priv(ndev); - struct ethtool_eee *p = &fep->eee; + struct ethtool_keee *p = &fep->eee; int ret = 0; if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 9ba15d3183d7..758535adc9ff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1073,6 +1073,14 @@ int memac_initialization(struct mac_device *mac_dev, unsigned long capabilities; unsigned long *supported; + /* The internal connection to the serdes is XGMII, but this isn't + * really correct for the phy mode (which is the external connection). + * However, this is how all older device trees say that they want + * 10GBASE-R (aka XFI), so just convert it for them. + */ + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; + mac_dev->phylink_ops = &memac_mac_ops; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; @@ -1139,7 +1147,7 @@ int memac_initialization(struct mac_device *mac_dev, * (and therefore that xfi_pcs cannot be set). If we are defaulting to * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII. */ - if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) + if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_10GBASER) memac->xfi_pcs = pcs; else memac->sgmii_pcs = pcs; @@ -1153,14 +1161,6 @@ int memac_initialization(struct mac_device *mac_dev, goto _return_fm_mac_free; } - /* The internal connection to the serdes is XGMII, but this isn't - * really correct for the phy mode (which is the external connection). - * However, this is how all older device trees say that they want - * 10GBASE-R (aka XFI), so just convert it for them. - */ - if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) - mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER; - /* TODO: The following interface modes are supported by (some) hardware * but not by this driver: * - 1000BASE-KX diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index e3dfbd7a4236..a811238c018d 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1649,7 +1649,7 @@ static int init_phy(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); phy_interface_t interface = priv->interface; struct phy_device *phydev; - struct ethtool_eee edata; + struct ethtool_keee edata; linkmode_set_bit_array(phy_10_100_features_array, ARRAY_SIZE(phy_10_100_features_array), @@ -1681,7 +1681,7 @@ static int init_phy(struct net_device *dev) phy_support_asym_pause(phydev); /* disable EEE autoneg, EEE not supported by eTSEC */ - memset(&edata, 0, sizeof(struct ethtool_eee)); + memset(&edata, 0, sizeof(struct ethtool_keee)); phy_ethtool_set_eee(phydev, &edata); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index c32b3d40beb0..20f5a9e7fae9 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -402,7 +402,7 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags) static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, struct gve_rx_slot_page_info *page_info, - u16 packet_buffer_size, u16 len, + unsigned int truesize, u16 len, struct gve_rx_ctx *ctx) { u32 offset = page_info->page_offset + page_info->pad; @@ -435,10 +435,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi, if (skb != ctx->skb_head) { ctx->skb_head->len += len; ctx->skb_head->data_len += len; - ctx->skb_head->truesize += packet_buffer_size; + ctx->skb_head->truesize += truesize; } skb_add_rx_frag(skb, num_frags, page_info->page, - offset, len, packet_buffer_size); + offset, len, truesize); return ctx->skb_head; } @@ -532,7 +532,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, memcpy(alloc_page_info.page_address, src, page_info->pad + len); skb = gve_rx_add_frags(napi, &alloc_page_info, - rx->packet_buffer_size, + PAGE_SIZE, len, ctx); u64_stats_update_begin(&rx->statss); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 5e27470c6b1e..f2d4669c81cf 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -987,7 +987,7 @@ static void sun3_82586_timeout(struct net_device *dev, unsigned int txqueue) { #ifdef DEBUG printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); - printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status)); + printk("%s: command-stats: %04x\n", dev->name, swab16(p->xmit_cmds[0]->cmd_status)); printk("%s: check, whether you set the right interrupt number!\n",dev->name); #endif sun3_82586_close(dev); diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index d55638ad8704..767358b60507 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -368,6 +368,14 @@ config IGC To compile this driver as a module, choose M here. The module will be called igc. + +config IGC_LEDS + def_bool LEDS_TRIGGER_NETDEV + depends on IGC && LEDS_CLASS + help + Optional support for controlling the NIC LED's with the netdev + LED trigger. + config IDPF tristate "Intel(R) Infrastructure Data Path Function Support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index a187582d2299..ba9c19e6994c 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -360,23 +360,43 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) * bits to count nanoseconds leaving the rest for fractional nonseconds. + * + * Any given INCVALUE also has an associated maximum adjustment value. This + * maximum adjustment value is the largest increase (or decrease) which can be + * safely applied without overflowing the INCVALUE. Since INCVALUE has + * a maximum range of 24 bits, its largest value is 0xFFFFFF. + * + * To understand where the maximum value comes from, consider the following + * equation: + * + * new_incval = base_incval + (base_incval * adjustment) / 1billion + * + * To avoid overflow that means: + * max_incval = base_incval + (base_incval * max_adj) / billion + * + * Re-arranging: + * max_adj = floor(((max_incval - base_incval) * 1billion) / 1billion) */ #define INCVALUE_96MHZ 125 #define INCVALUE_SHIFT_96MHZ 17 #define INCPERIOD_SHIFT_96MHZ 2 #define INCPERIOD_96MHZ (12 >> INCPERIOD_SHIFT_96MHZ) +#define MAX_PPB_96MHZ 23999900 /* 23,999,900 ppb */ #define INCVALUE_25MHZ 40 #define INCVALUE_SHIFT_25MHZ 18 #define INCPERIOD_25MHZ 1 +#define MAX_PPB_25MHZ 599999900 /* 599,999,900 ppb */ #define INCVALUE_24MHZ 125 #define INCVALUE_SHIFT_24MHZ 14 #define INCPERIOD_24MHZ 3 +#define MAX_PPB_24MHZ 999999999 /* 999,999,999 ppb */ #define INCVALUE_38400KHZ 26 #define INCVALUE_SHIFT_38400KHZ 19 #define INCPERIOD_38400KHZ 1 +#define MAX_PPB_38400KHZ 230769100 /* 230,769,100 ppb */ /* Another drawback of scaling the incvalue by a large factor is the * 64-bit SYSTIM register overflows more quickly. This is dealt with diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index fc0f98ea6133..dc553c51d79a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -2186,7 +2186,7 @@ static int e1000_get_rxnfc(struct net_device *netdev, } } -static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -2223,16 +2223,16 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); if (ret_val) goto release; - edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); + mii_eee_cap1_mod_linkmode_t(edata->supported, phy_data); /* EEE Advertised */ - edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + mii_eee_cap1_mod_linkmode_t(edata->advertised, adapter->eee_advert); /* EEE Link Partner Advertised */ ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); if (ret_val) goto release; - edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data); /* EEE PCS Status */ ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); @@ -2262,11 +2262,13 @@ release: return ret_val; } -static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct e1000_adapter *adapter = netdev_priv(netdev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {}; + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {}; struct e1000_hw *hw = &adapter->hw; - struct ethtool_eee eee_curr; + struct ethtool_keee eee_curr; s32 ret_val; ret_val = e1000e_get_eee(netdev, &eee_curr); @@ -2283,12 +2285,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EINVAL; } - if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + + if (linkmode_andnot(tmp, edata->advertised, supported)) { e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); return -EINVAL; } - adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 02d871bc112a..bbcfd529399b 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -280,8 +280,17 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) switch (hw->mac.type) { case e1000_pch2lan: + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + break; case e1000_pch_lpt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_96MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; + break; case e1000_pch_spt: + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + break; case e1000_pch_cnp: case e1000_pch_tgp: case e1000_pch_adp: @@ -289,15 +298,14 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_lnp: case e1000_pch_ptp: case e1000_pch_nvp: - if ((hw->mac.type < e1000_pch_lpt) || - (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { - adapter->ptp_clock_info.max_adj = 24000000 - 1; - break; - } - fallthrough; + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) + adapter->ptp_clock_info.max_adj = MAX_PPB_24MHZ; + else + adapter->ptp_clock_info.max_adj = MAX_PPB_38400KHZ; + break; case e1000_82574: case e1000_82583: - adapter->ptp_clock_info.max_adj = 600000000 - 1; + adapter->ptp_clock_info.max_adj = MAX_PPB_25MHZ; break; default: break; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9b701615c7c6..ba24f3fa92c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -687,6 +687,54 @@ struct i40e_pf { }; /** + * __i40e_pf_next_vsi - get next valid VSI + * @pf: pointer to the PF struct + * @idx: pointer to start position number + * + * Find and return next non-NULL VSI pointer in pf->vsi array and + * updates idx position. Returns NULL if no VSI is found. + **/ +static __always_inline struct i40e_vsi * +__i40e_pf_next_vsi(struct i40e_pf *pf, int *idx) +{ + while (*idx < pf->num_alloc_vsi) { + if (pf->vsi[*idx]) + return pf->vsi[*idx]; + (*idx)++; + } + return NULL; +} + +#define i40e_pf_for_each_vsi(_pf, _i, _vsi) \ + for (_i = 0, _vsi = __i40e_pf_next_vsi(_pf, &_i); \ + _vsi; \ + _i++, _vsi = __i40e_pf_next_vsi(_pf, &_i)) + +/** + * __i40e_pf_next_veb - get next valid VEB + * @pf: pointer to the PF struct + * @idx: pointer to start position number + * + * Find and return next non-NULL VEB pointer in pf->veb array and + * updates idx position. Returns NULL if no VEB is found. + **/ +static __always_inline struct i40e_veb * +__i40e_pf_next_veb(struct i40e_pf *pf, int *idx) +{ + while (*idx < I40E_MAX_VEB) { + if (pf->veb[*idx]) + return pf->veb[*idx]; + (*idx)++; + } + return NULL; +} + +#define i40e_pf_for_each_veb(_pf, _i, _veb) \ + for (_i = 0, _veb = __i40e_pf_next_veb(_pf, &_i); \ + _veb; \ + _i++, _veb = __i40e_pf_next_veb(_pf, &_i)) + +/** * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key * @macaddr: the MAC Address as the base key * @@ -735,7 +783,6 @@ struct i40e_new_mac_filter { struct i40e_veb { struct i40e_pf *pf; u16 idx; - u16 veb_idx; /* index of VEB parent */ u16 seid; u16 uplink_seid; u16 stats_idx; /* index of VEB parent */ @@ -1120,14 +1167,12 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id); static inline struct i40e_vsi * i40e_find_vsi_by_type(struct i40e_pf *pf, u16 type) { + struct i40e_vsi *vsi; int i; - for (i = 0; i < pf->num_alloc_vsi; i++) { - struct i40e_vsi *vsi = pf->vsi[i]; - - if (vsi && vsi->type == type) + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->type == type) return vsi; - } return NULL; } @@ -1309,4 +1354,40 @@ static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw) struct device *i40e_hw_to_dev(struct i40e_hw *hw); +/** + * i40e_pf_get_vsi_by_seid - find VSI by SEID + * @pf: pointer to a PF + * @seid: SEID of the VSI + **/ +static inline struct i40e_vsi * +i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid) +{ + struct i40e_vsi *vsi; + int i; + + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->seid == seid) + return vsi; + + return NULL; +} + +/** + * i40e_pf_get_veb_by_seid - find VEB by SEID + * @pf: pointer to a PF + * @seid: SEID of the VSI + **/ +static inline struct i40e_veb * +i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid) +{ + struct i40e_veb *veb; + int i; + + i40e_pf_for_each_veb(pf, i, veb) + if (veb->seid == seid) + return veb; + + return NULL; +} + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 9d88ed6105fd..8db1eb0c1768 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -1523,7 +1523,7 @@ void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share, reg = rd32(hw, I40E_PRTDCB_RETSTCC(i)); reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK | I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK | - I40E_PRTDCB_RETSTCC_ETSTC_SHIFT); + I40E_PRTDCB_RETSTCC_ETSTC_MASK); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_BWSHARE_MASK, bw_share[i]); reg |= FIELD_PREP(I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK, diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index 6b60dc9b7736..d76497566e40 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -43,7 +43,7 @@ #define I40E_LLDP_TLV_SUBTYPE_SHIFT 0 #define I40E_LLDP_TLV_SUBTYPE_MASK (0xFF << I40E_LLDP_TLV_SUBTYPE_SHIFT) #define I40E_LLDP_TLV_OUI_SHIFT 8 -#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFF << I40E_LLDP_TLV_OUI_SHIFT) +#define I40E_LLDP_TLV_OUI_MASK (0xFFFFFFU << I40E_LLDP_TLV_OUI_SHIFT) /* Defines for IEEE ETS TLV */ #define I40E_IEEE_ETS_MAXTC_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index b96a92187ab3..8aa43aefe84c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -947,16 +947,16 @@ static int i40e_dcbnl_vsi_del_app(struct i40e_vsi *vsi, static void i40e_dcbnl_del_app(struct i40e_pf *pf, struct i40e_dcb_app_priority_table *app) { + struct i40e_vsi *vsi; int v, err; - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v] && pf->vsi[v]->netdev) { - err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); + i40e_pf_for_each_vsi(pf, v, vsi) + if (vsi->netdev) { + err = i40e_dcbnl_vsi_del_app(vsi, app); dev_dbg(&pf->pdev->dev, "Deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", - pf->vsi[v]->seid, err, app->selector, + vsi->seid, err, app->selector, app->protocolid, app->priority); } - } } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index ef70ddbe9c2f..f9ba45f596c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -24,31 +24,13 @@ enum ring_type { **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) { - int i; - - if (seid < 0) + if (seid < 0) { dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - else - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) - return pf->vsi[i]; - - return NULL; -} -/** - * i40e_dbg_find_veb - searches for the veb with the given seid - * @pf: the PF structure to search for the veb - * @seid: seid of the veb it is searching for - **/ -static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) -{ - int i; + return NULL; + } - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == seid) - return pf->veb[i]; - return NULL; + return i40e_pf_get_vsi_by_seid(pf, seid); } /************************************************************** @@ -653,12 +635,11 @@ out: **/ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int i; - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i]) - dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", - i, pf->vsi[i]->seid); + i40e_pf_for_each_vsi(pf, i, vsi) + dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, vsi->seid); } /** @@ -696,15 +677,14 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; - veb = i40e_dbg_find_veb(pf, seid); + veb = i40e_pf_get_veb_by_seid(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); return; } dev_info(&pf->pdev->dev, - "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d mode=%s\n", - veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, - veb->uplink_seid, + "veb idx=%d stats_ic=%d seid=%d uplink=%d mode=%s\n", + veb->idx, veb->stats_idx, veb->seid, veb->uplink_seid, veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); i40e_dbg_dump_eth_stats(pf, &veb->stats); } @@ -718,11 +698,8 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) struct i40e_veb *veb; int i; - for (i = 0; i < I40E_MAX_VEB; i++) { - veb = pf->veb[i]; - if (veb) - i40e_dbg_dump_veb_seid(pf, veb->seid); - } + i40e_pf_for_each_veb(pf, i, veb) + i40e_dbg_dump_veb_seid(pf, veb->seid); } /** @@ -851,10 +828,14 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } else if (strncmp(cmd_buf, "add relay", 9) == 0) { struct i40e_veb *veb; - int uplink_seid, i; + u8 enabled_tc = 0x1; + int uplink_seid; cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); - if (cnt != 2) { + if (cnt == 0) { + uplink_seid = 0; + vsi_seid = 0; + } else if (cnt != 2) { dev_info(&pf->pdev->dev, "add relay: bad command string, cnt=%d\n", cnt); @@ -866,33 +847,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - vsi = i40e_dbg_find_vsi(pf, vsi_seid); - if (!vsi) { - dev_info(&pf->pdev->dev, - "add relay: VSI %d not found\n", vsi_seid); - goto command_write_done; - } - - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) - break; - if (i >= I40E_MAX_VEB && uplink_seid != 0 && - uplink_seid != pf->mac_seid) { + if (uplink_seid != 0 && uplink_seid != pf->mac_seid) { dev_info(&pf->pdev->dev, "add relay: relay uplink %d not found\n", uplink_seid); goto command_write_done; + } else if (uplink_seid) { + vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); + if (!vsi) { + dev_info(&pf->pdev->dev, + "add relay: VSI %d not found\n", + vsi_seid); + goto command_write_done; + } + enabled_tc = vsi->tc_config.enabled_tc; + } else if (vsi_seid) { + dev_info(&pf->pdev->dev, + "add relay: VSI must be 0 for floating relay\n"); + goto command_write_done; } - veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, - vsi->tc_config.enabled_tc); + veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc); if (veb) dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); else dev_info(&pf->pdev->dev, "add relay failed\n"); } else if (strncmp(cmd_buf, "del relay", 9) == 0) { + struct i40e_veb *veb; int i; + cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, @@ -906,9 +890,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } /* find the veb */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == veb_seid) + i40e_pf_for_each_veb(pf, i, veb) + if (veb->seid == veb_seid) break; + if (i >= I40E_MAX_VEB) { dev_info(&pf->pdev->dev, "del relay: relay %d not found\n", veb_seid); @@ -916,7 +901,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, } dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); - i40e_veb_release(pf->veb[i]); + i40e_veb_release(veb); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { unsigned int v; int ret; @@ -1251,8 +1236,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (cnt == 0) { int i; - for (i = 0; i < pf->num_alloc_vsi; i++) - i40e_vsi_reset_stats(pf->vsi[i]); + i40e_pf_for_each_vsi(pf, i, vsi) + i40e_vsi_reset_stats(vsi); dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); } else if (cnt == 1) { vsi = i40e_dbg_find_vsi(pf, vsi_seid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c841779713f6..42e7e6cdaa6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -5644,7 +5644,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev, return 0; } -static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int i40e_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp phy_cfg; @@ -5664,16 +5664,12 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) if (phy_cfg.eee_capability == 0) return -EOPNOTSUPP; - edata->supported = SUPPORTED_Autoneg; - edata->lp_advertised = edata->supported; - /* Get current configuration */ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL); if (status) return -EAGAIN; - edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U; - edata->eee_enabled = !!edata->advertised; + edata->eee_enabled = !!phy_cfg.eee_capability; edata->tx_lpi_enabled = pf->stats.tx_lpi_status; edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status; @@ -5682,7 +5678,7 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) } static int i40e_is_eee_param_supported(struct net_device *netdev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5691,7 +5687,6 @@ static int i40e_is_eee_param_supported(struct net_device *netdev, u32 value; const char *name; } param[] = { - {edata->advertised & ~SUPPORTED_Autoneg, "advertise"}, {edata->tx_lpi_timer, "tx-timer"}, {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} }; @@ -5709,7 +5704,7 @@ static int i40e_is_eee_param_supported(struct net_device *netdev, return 0; } -static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int i40e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6e7fd473abfd..f12092cdb1f0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -310,11 +310,12 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id) **/ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) { + struct i40e_vsi *vsi; int i; - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && (pf->vsi[i]->id == id)) - return pf->vsi[i]; + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->id == id) + return vsi; return NULL; } @@ -552,24 +553,19 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) **/ void i40e_pf_reset_stats(struct i40e_pf *pf) { + struct i40e_veb *veb; int i; memset(&pf->stats, 0, sizeof(pf->stats)); memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); pf->stat_offsets_loaded = false; - for (i = 0; i < I40E_MAX_VEB; i++) { - if (pf->veb[i]) { - memset(&pf->veb[i]->stats, 0, - sizeof(pf->veb[i]->stats)); - memset(&pf->veb[i]->stats_offsets, 0, - sizeof(pf->veb[i]->stats_offsets)); - memset(&pf->veb[i]->tc_stats, 0, - sizeof(pf->veb[i]->tc_stats)); - memset(&pf->veb[i]->tc_stats_offsets, 0, - sizeof(pf->veb[i]->tc_stats_offsets)); - pf->veb[i]->stat_offsets_loaded = false; - } + i40e_pf_for_each_veb(pf, i, veb) { + memset(&veb->stats, 0, sizeof(veb->stats)); + memset(&veb->stats_offsets, 0, sizeof(veb->stats_offsets)); + memset(&veb->tc_stats, 0, sizeof(veb->tc_stats)); + memset(&veb->tc_stats_offsets, 0, sizeof(veb->tc_stats_offsets)); + veb->stat_offsets_loaded = false; } pf->hw_csum_rx_error = 0; } @@ -2879,6 +2875,7 @@ err_no_memory_locked: **/ static void i40e_sync_filters_subtask(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; if (!pf) @@ -2890,11 +2887,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) return; } - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v] && - (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && - !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { - int ret = i40e_sync_vsi_filters(pf->vsi[v]); + i40e_pf_for_each_vsi(pf, v, vsi) { + if ((vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) && + !test_bit(__I40E_VSI_RELEASING, vsi->state)) { + int ret = i40e_sync_vsi_filters(vsi); if (ret) { /* come back and try again later */ @@ -4926,27 +4922,23 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi) void i40e_vsi_stop_rings(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - int pf_q, err, q_end; + u32 pf_q, tx_q_end, rx_q_end; /* When port TX is suspended, don't wait */ if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) return i40e_vsi_stop_rings_no_wait(vsi); - q_end = vsi->base_queue + vsi->num_queue_pairs; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) - i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false); + tx_q_end = vsi->base_queue + + vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) + i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) { - err = i40e_control_wait_rx_q(pf, pf_q, false); - if (err) - dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d disable timeout\n", - vsi->seid, pf_q); - } + rx_q_end = vsi->base_queue + vsi->num_queue_pairs; + for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) + i40e_control_rx_q(pf, pf_q, false); msleep(I40E_DISABLE_TX_GAP_MSEC); - pf_q = vsi->base_queue; - for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) + for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); i40e_vsi_wait_queues_disabled(vsi); @@ -5170,6 +5162,7 @@ static void i40e_reset_interrupt_capability(struct i40e_pf *pf) **/ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int i; if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) @@ -5179,9 +5172,10 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) I40E_IWARP_IRQ_PILE_ID); i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i]) - i40e_vsi_free_q_vectors(pf->vsi[i]); + + i40e_pf_for_each_vsi(pf, i, vsi) + i40e_vsi_free_q_vectors(vsi); + i40e_reset_interrupt_capability(pf); } @@ -5278,12 +5272,11 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi) **/ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) - i40e_quiesce_vsi(pf->vsi[v]); - } + i40e_pf_for_each_vsi(pf, v, vsi) + i40e_quiesce_vsi(vsi); } /** @@ -5292,12 +5285,11 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) **/ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v; - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) - i40e_unquiesce_vsi(pf->vsi[v]); - } + i40e_pf_for_each_vsi(pf, v, vsi) + i40e_unquiesce_vsi(vsi); } /** @@ -5358,14 +5350,13 @@ wait_rx: **/ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int v, ret = 0; - for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { - if (pf->vsi[v]) { - ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); - if (ret) - break; - } + i40e_pf_for_each_vsi(pf, v, vsi) { + ret = i40e_vsi_wait_queues_disabled(vsi); + if (ret) + break; } return ret; @@ -6782,32 +6773,29 @@ out: **/ static void i40e_dcb_reconfigure(struct i40e_pf *pf) { + struct i40e_vsi *vsi; + struct i40e_veb *veb; u8 tc_map = 0; int ret; - u8 v; + int v; /* Enable the TCs available on PF to all VEBs */ tc_map = i40e_pf_get_tc_map(pf); if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS) return; - for (v = 0; v < I40E_MAX_VEB; v++) { - if (!pf->veb[v]) - continue; - ret = i40e_veb_config_tc(pf->veb[v], tc_map); + i40e_pf_for_each_veb(pf, v, veb) { + ret = i40e_veb_config_tc(veb, tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VEB seid=%d\n", - pf->veb[v]->seid); + veb->seid); /* Will try to configure as many components */ } } /* Update each VSI */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (!pf->vsi[v]) - continue; - + i40e_pf_for_each_vsi(pf, v, vsi) { /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ @@ -6816,17 +6804,17 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; - ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); + ret = i40e_vsi_config_tc(vsi, tc_map); if (ret) { dev_info(&pf->pdev->dev, "Failed configuring TC for VSI seid=%d\n", - pf->vsi[v]->seid); + vsi->seid); /* Will try to configure as many components */ } else { /* Re-configure VSI vectors based on updated TC map */ - i40e_vsi_map_rings_to_vectors(pf->vsi[v]); - if (pf->vsi[v]->netdev) - i40e_dcbnl_set_all(pf->vsi[v]); + i40e_vsi_map_rings_to_vectors(vsi); + if (vsi->netdev) + i40e_dcbnl_set_all(vsi); } } } @@ -9261,7 +9249,9 @@ int i40e_close(struct net_device *netdev) **/ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) { + struct i40e_vsi *vsi; u32 val; + int i; /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { @@ -9317,29 +9307,20 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) "FW LLDP is enabled\n"); } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) { - int v; - /* Find the VSI(s) that requested a re-init */ - dev_info(&pf->pdev->dev, - "VSI reinit requested\n"); - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; + dev_info(&pf->pdev->dev, "VSI reinit requested\n"); - if (vsi != NULL && - test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, + i40e_pf_for_each_vsi(pf, i, vsi) { + if (test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED, vsi->state)) - i40e_vsi_reinit_locked(pf->vsi[v]); + i40e_vsi_reinit_locked(vsi); } } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) { - int v; - /* Find the VSI(s) that needs to be brought down */ dev_info(&pf->pdev->dev, "VSI down requested\n"); - for (v = 0; v < pf->num_alloc_vsi; v++) { - struct i40e_vsi *vsi = pf->vsi[v]; - if (vsi != NULL && - test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, + i40e_pf_for_each_vsi(pf, i, vsi) { + if (test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state)) { set_bit(__I40E_VSI_DOWN, vsi->state); i40e_down(vsi); @@ -9892,6 +9873,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up) **/ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) { + struct i40e_vsi *vsi; struct i40e_pf *pf; int i; @@ -9899,15 +9881,10 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) return; pf = veb->pf; - /* depth first... */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) - i40e_veb_link_event(pf->veb[i], link_up); - - /* ... now the local VSIs */ - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) - i40e_vsi_link_event(pf->vsi[i], link_up); + /* Send link event to contained VSIs */ + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == veb->seid) + i40e_vsi_link_event(vsi, link_up); } /** @@ -9999,6 +9976,8 @@ static void i40e_link_event(struct i40e_pf *pf) **/ static void i40e_watchdog_subtask(struct i40e_pf *pf) { + struct i40e_vsi *vsi; + struct i40e_veb *veb; int i; /* if interface is down do nothing */ @@ -10019,15 +9998,14 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) /* Update the stats for active netdevs so the network stack * can look at updated numbers whenever it cares to */ - for (i = 0; i < pf->num_alloc_vsi; i++) - if (pf->vsi[i] && pf->vsi[i]->netdev) - i40e_update_stats(pf->vsi[i]); + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->netdev) + i40e_update_stats(vsi); if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { /* Update the stats for the active switching components */ - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i]) - i40e_update_veb_stats(pf->veb[i]); + i40e_pf_for_each_veb(pf, i, veb) + i40e_update_veb_stats(veb); } i40e_ptp_rx_hang(pf); @@ -10372,89 +10350,84 @@ static void i40e_config_bridge_mode(struct i40e_veb *veb) } /** - * i40e_reconstitute_veb - rebuild the VEB and anything connected to it + * i40e_reconstitute_veb - rebuild the VEB and VSIs connected to it * @veb: pointer to the VEB instance * - * This is a recursive function that first builds the attached VSIs then - * recurses in to build the next layer of VEB. We track the connections - * through our own index numbers because the seid's from the HW could - * change across the reset. + * This is a function that builds the attached VSIs. We track the connections + * through our own index numbers because the seid's from the HW could change + * across the reset. **/ static int i40e_reconstitute_veb(struct i40e_veb *veb) { struct i40e_vsi *ctl_vsi = NULL; struct i40e_pf *pf = veb->pf; - int v, veb_idx; - int ret; + struct i40e_vsi *vsi; + int v, ret; - /* build VSI that owns this VEB, temporarily attached to base VEB */ - for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { - if (pf->vsi[v] && - pf->vsi[v]->veb_idx == veb->idx && - pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { - ctl_vsi = pf->vsi[v]; - break; - } - } - if (!ctl_vsi) { - dev_info(&pf->pdev->dev, - "missing owner VSI for veb_idx %d\n", veb->idx); - ret = -ENOENT; - goto end_reconstitute; + /* As we do not maintain PV (port virtualizer) switch element then + * there can be only one non-floating VEB that have uplink to MAC SEID + * and its control VSI is the main one. + */ + if (WARN_ON(veb->uplink_seid && veb->uplink_seid != pf->mac_seid)) { + dev_err(&pf->pdev->dev, + "Invalid uplink SEID for VEB %d\n", veb->idx); + return -ENOENT; } - if (ctl_vsi != pf->vsi[pf->lan_vsi]) - ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; - ret = i40e_add_vsi(ctl_vsi); - if (ret) { - dev_info(&pf->pdev->dev, - "rebuild of veb_idx %d owner VSI failed: %d\n", - veb->idx, ret); - goto end_reconstitute; + + if (veb->uplink_seid == pf->mac_seid) { + /* Check that the LAN VSI has VEB owning flag set */ + ctl_vsi = pf->vsi[pf->lan_vsi]; + + if (WARN_ON(ctl_vsi->veb_idx != veb->idx || + !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) { + dev_err(&pf->pdev->dev, + "Invalid control VSI for VEB %d\n", veb->idx); + return -ENOENT; + } + + /* Add the control VSI to switch */ + ret = i40e_add_vsi(ctl_vsi); + if (ret) { + dev_err(&pf->pdev->dev, + "Rebuild of owner VSI for VEB %d failed: %d\n", + veb->idx, ret); + return ret; + } + + i40e_vsi_reset_stats(ctl_vsi); } - i40e_vsi_reset_stats(ctl_vsi); /* create the VEB in the switch and move the VSI onto the VEB */ ret = i40e_add_veb(veb, ctl_vsi); if (ret) - goto end_reconstitute; + return ret; - if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) - veb->bridge_mode = BRIDGE_MODE_VEB; - else - veb->bridge_mode = BRIDGE_MODE_VEPA; - i40e_config_bridge_mode(veb); + if (veb->uplink_seid) { + if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) + veb->bridge_mode = BRIDGE_MODE_VEB; + else + veb->bridge_mode = BRIDGE_MODE_VEPA; + i40e_config_bridge_mode(veb); + } /* create the remaining VSIs attached to this VEB */ - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) + i40e_pf_for_each_vsi(pf, v, vsi) { + if (vsi == ctl_vsi) continue; - if (pf->vsi[v]->veb_idx == veb->idx) { - struct i40e_vsi *vsi = pf->vsi[v]; - + if (vsi->veb_idx == veb->idx) { vsi->uplink_seid = veb->seid; ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of vsi_idx %d failed: %d\n", v, ret); - goto end_reconstitute; + return ret; } i40e_vsi_reset_stats(vsi); } } - /* create any VEBs attached to this VEB - RECURSION */ - for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { - if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { - pf->veb[veb_idx]->uplink_seid = veb->seid; - ret = i40e_reconstitute_veb(pf->veb[veb_idx]); - if (ret) - break; - } - } - -end_reconstitute: return ret; } @@ -10722,6 +10695,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi) static void i40e_prep_for_reset(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; int ret = 0; u32 v; @@ -10736,11 +10710,9 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) /* quiesce the VSIs and their queues that are not already DOWN */ i40e_pf_quiesce_all_vsi(pf); - for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) { - i40e_clean_xps_state(pf->vsi[v]); - pf->vsi[v]->seid = 0; - } + i40e_pf_for_each_vsi(pf, v, vsi) { + i40e_clean_xps_state(vsi); + vsi->seid = 0; } i40e_shutdown_adminq(&pf->hw); @@ -10854,6 +10826,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; + struct i40e_veb *veb; int ret; u32 val; int v; @@ -10995,35 +10968,29 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) */ if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); - /* find the one VEB connected to the MAC, and find orphans */ - for (v = 0; v < I40E_MAX_VEB; v++) { - if (!pf->veb[v]) - continue; - - if (pf->veb[v]->uplink_seid == pf->mac_seid || - pf->veb[v]->uplink_seid == 0) { - ret = i40e_reconstitute_veb(pf->veb[v]); - if (!ret) - continue; + /* Rebuild VEBs */ + i40e_pf_for_each_veb(pf, v, veb) { + ret = i40e_reconstitute_veb(veb); + if (!ret) + continue; - /* If Main VEB failed, we're in deep doodoo, - * so give up rebuilding the switch and set up - * for minimal rebuild of PF VSI. - * If orphan failed, we'll report the error - * but try to keep going. - */ - if (pf->veb[v]->uplink_seid == pf->mac_seid) { - dev_info(&pf->pdev->dev, - "rebuild of switch failed: %d, will try to set up simple PF connection\n", - ret); - vsi->uplink_seid = pf->mac_seid; - break; - } else if (pf->veb[v]->uplink_seid == 0) { - dev_info(&pf->pdev->dev, - "rebuild of orphan VEB failed: %d\n", - ret); - } + /* If Main VEB failed, we're in deep doodoo, + * so give up rebuilding the switch and set up + * for minimal rebuild of PF VSI. + * If orphan failed, we'll report the error + * but try to keep going. + */ + if (veb->uplink_seid == pf->mac_seid) { + dev_info(&pf->pdev->dev, + "rebuild of switch failed: %d, will try to set up simple PF connection\n", + ret); + vsi->uplink_seid = pf->mac_seid; + break; + } else if (veb->uplink_seid == 0) { + dev_info(&pf->pdev->dev, + "rebuild of orphan VEB failed: %d\n", + ret); } } } @@ -12102,6 +12069,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) */ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) { + struct i40e_vsi *vsi; int err, i; /* We cleared the MSI and MSI-X flags when disabling the old interrupt @@ -12118,13 +12086,12 @@ static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) /* Now that we've re-acquired IRQs, we need to remap the vectors and * rings together again. */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i]) { - err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); - if (err) - goto err_unwind; - i40e_vsi_map_rings_to_vectors(pf->vsi[i]); - } + i40e_pf_for_each_vsi(pf, i, vsi) { + err = i40e_vsi_alloc_q_vectors(vsi); + if (err) + goto err_unwind; + + i40e_vsi_map_rings_to_vectors(vsi); } err = i40e_setup_misc_vector(pf); @@ -13126,19 +13093,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; struct nlattr *attr, *br_spec; - int i, rem; + struct i40e_veb *veb; + int rem; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!br_spec) @@ -13203,19 +13167,14 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_veb *veb = NULL; - int i; + struct i40e_veb *veb; /* Only for PF VSI for now */ if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } - + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); if (!veb) return 0; @@ -14149,7 +14108,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; struct hlist_node *h; - struct i40e_veb *veb = NULL; + struct i40e_veb *veb; struct i40e_pf *pf; u16 uplink_seid; int i, n, bkt; @@ -14213,29 +14172,28 @@ int i40e_vsi_release(struct i40e_vsi *vsi) /* If this was the last thing on the VEB, except for the * controlling VSI, remove the VEB, which puts the controlling - * VSI onto the next level down in the switch. + * VSI onto the uplink port. * * Well, okay, there's one more exception here: don't remove - * the orphan VEBs yet. We'll wait for an explicit remove request + * the floating VEBs yet. We'll wait for an explicit remove request * from up the network stack. */ - for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && - pf->vsi[i]->uplink_seid == uplink_seid && - (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { - n++; /* count the VSIs */ - } - } - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - if (pf->veb[i]->uplink_seid == uplink_seid) - n++; /* count the VEBs */ - if (pf->veb[i]->seid == uplink_seid) - veb = pf->veb[i]; + veb = i40e_pf_get_veb_by_seid(pf, uplink_seid); + if (veb && veb->uplink_seid) { + n = 0; + + /* Count non-controlling VSIs present on the VEB */ + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == uplink_seid && + (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + n++; + + /* If there is no VSI except the control one then release + * the VEB and put the control VSI onto VEB uplink. + */ + if (!n) + i40e_veb_release(veb); } - if (n == 0 && veb && veb->uplink_seid != 0) - i40e_veb_release(veb); return 0; } @@ -14393,8 +14351,8 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *vsi = NULL; struct i40e_veb *veb = NULL; u16 alloc_queue_pairs; - int ret, i; int v_idx; + int ret; /* The requested uplink_seid must be either * - the PF's port seid @@ -14409,21 +14367,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, * * Find which uplink_seid we were given and create a new VEB if needed */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { - veb = pf->veb[i]; - break; - } - } - + veb = i40e_pf_get_veb_by_seid(pf, uplink_seid); if (!veb && uplink_seid != pf->mac_seid) { - - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { - vsi = pf->vsi[i]; - break; - } - } + vsi = i40e_pf_get_vsi_by_seid(pf, uplink_seid); if (!vsi) { dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", uplink_seid); @@ -14452,10 +14398,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } i40e_config_bridge_mode(veb); } - for (i = 0; i < I40E_MAX_VEB && !veb; i++) { - if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) - veb = pf->veb[i]; - } + veb = i40e_pf_get_veb_by_seid(pf, vsi->uplink_seid); if (!veb) { dev_info(&pf->pdev->dev, "couldn't add VEB\n"); return NULL; @@ -14685,29 +14628,24 @@ static void i40e_switch_branch_release(struct i40e_veb *branch) struct i40e_pf *pf = branch->pf; u16 branch_seid = branch->seid; u16 veb_idx = branch->idx; + struct i40e_vsi *vsi; + struct i40e_veb *veb; int i; /* release any VEBs on this VEB - RECURSION */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - if (pf->veb[i]->uplink_seid == branch->seid) - i40e_switch_branch_release(pf->veb[i]); - } + i40e_pf_for_each_veb(pf, i, veb) + if (veb->uplink_seid == branch->seid) + i40e_switch_branch_release(veb); /* Release the VSIs on this VEB, but not the owner VSI. * * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing * the VEB itself, so don't use (*branch) after this loop. */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (!pf->vsi[i]) - continue; - if (pf->vsi[i]->uplink_seid == branch_seid && - (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { - i40e_vsi_release(pf->vsi[i]); - } - } + i40e_pf_for_each_vsi(pf, i, vsi) + if (vsi->uplink_seid == branch_seid && + (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) + i40e_vsi_release(vsi); /* There's one corner case where the VEB might not have been * removed, so double check it here and remove it if needed. @@ -14745,38 +14683,35 @@ static void i40e_veb_clear(struct i40e_veb *veb) **/ void i40e_veb_release(struct i40e_veb *veb) { - struct i40e_vsi *vsi = NULL; + struct i40e_vsi *vsi, *vsi_it; struct i40e_pf *pf; int i, n = 0; pf = veb->pf; /* find the remaining VSI and check for extras */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { + i40e_pf_for_each_vsi(pf, i, vsi_it) + if (vsi_it->uplink_seid == veb->seid) { + if (vsi_it->flags & I40E_VSI_FLAG_VEB_OWNER) + vsi = vsi_it; n++; - vsi = pf->vsi[i]; } - } - if (n != 1) { + + /* Floating VEB has to be empty and regular one must have + * single owner VSI. + */ + if ((veb->uplink_seid && n != 1) || (!veb->uplink_seid && n != 0)) { dev_info(&pf->pdev->dev, "can't remove VEB %d with %d VSIs left\n", veb->seid, n); return; } - /* move the remaining VSI to uplink veb */ - vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; + /* For regular VEB move the owner VSI to uplink port */ if (veb->uplink_seid) { + vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; vsi->uplink_seid = veb->uplink_seid; - if (veb->uplink_seid == pf->mac_seid) - vsi->veb_idx = I40E_NO_VEB; - else - vsi->veb_idx = veb->veb_idx; - } else { - /* floating VEB */ - vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; - vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; + vsi->veb_idx = I40E_NO_VEB; } i40e_aq_delete_element(&pf->hw, veb->seid, NULL); @@ -14794,8 +14729,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); int ret; - ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, false, + ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi ? vsi->seid : 0, + veb->enabled_tc, vsi ? false : true, &veb->seid, enable_stats, NULL); /* get a VEB from the hardware */ @@ -14827,9 +14762,11 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) return -ENOENT; } - vsi->uplink_seid = veb->seid; - vsi->veb_idx = veb->idx; - vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + if (vsi) { + vsi->uplink_seid = veb->seid; + vsi->veb_idx = veb->idx; + vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; + } return 0; } @@ -14854,8 +14791,9 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, u16 vsi_seid, u8 enabled_tc) { - struct i40e_veb *veb, *uplink_veb = NULL; - int vsi_idx, veb_idx; + struct i40e_vsi *vsi = NULL; + struct i40e_veb *veb; + int veb_idx; int ret; /* if one seid is 0, the other must be 0 to create a floating relay */ @@ -14868,26 +14806,11 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, } /* make sure there is such a vsi and uplink */ - for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) - if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) - break; - if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { - dev_info(&pf->pdev->dev, "vsi seid %d not found\n", - vsi_seid); - return NULL; - } - - if (uplink_seid && uplink_seid != pf->mac_seid) { - for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) { - if (pf->veb[veb_idx] && - pf->veb[veb_idx]->seid == uplink_seid) { - uplink_veb = pf->veb[veb_idx]; - break; - } - } - if (!uplink_veb) { - dev_info(&pf->pdev->dev, - "uplink seid %d not found\n", uplink_seid); + if (vsi_seid) { + vsi = i40e_pf_get_vsi_by_seid(pf, vsi_seid); + if (!vsi) { + dev_err(&pf->pdev->dev, "vsi seid %d not found\n", + vsi_seid); return NULL; } } @@ -14899,14 +14822,14 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, veb = pf->veb[veb_idx]; veb->flags = flags; veb->uplink_seid = uplink_seid; - veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); /* create the VEB in the switch */ - ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); + ret = i40e_add_veb(veb, vsi); if (ret) goto err_veb; - if (vsi_idx == pf->lan_vsi) + + if (vsi && vsi->idx == pf->lan_vsi) pf->lan_veb = veb->idx; return veb; @@ -14934,6 +14857,7 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, u16 uplink_seid = le16_to_cpu(ele->uplink_seid); u8 element_type = ele->element_type; u16 seid = le16_to_cpu(ele->seid); + struct i40e_veb *veb; if (printconfig) dev_info(&pf->pdev->dev, @@ -14952,13 +14876,10 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, int v; /* find existing or else empty VEB */ - for (v = 0; v < I40E_MAX_VEB; v++) { - if (pf->veb[v] && (pf->veb[v]->seid == seid)) { - pf->lan_veb = v; - break; - } - } - if (pf->lan_veb >= I40E_MAX_VEB) { + veb = i40e_pf_get_veb_by_seid(pf, seid); + if (veb) { + pf->lan_veb = veb->idx; + } else { v = i40e_veb_mem_alloc(pf); if (v < 0) break; @@ -14971,7 +14892,6 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, pf->veb[pf->lan_veb]->seid = seid; pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; pf->veb[pf->lan_veb]->pf = pf; - pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) @@ -15634,6 +15554,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_I40E_DCB enum i40e_get_fw_lldp_status_resp lldp_status; #endif /* CONFIG_I40E_DCB */ + struct i40e_vsi *vsi; struct i40e_pf *pf; struct i40e_hw *hw; u16 wol_nvm_bits; @@ -15644,7 +15565,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* CONFIG_I40E_DCB */ int err; u32 val; - u32 i; err = pci_enable_device_mem(pdev); if (err) @@ -15994,12 +15914,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); /* if FDIR VSI was set up, start it now */ - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { - i40e_vsi_open(pf->vsi[i]); - break; - } - } + vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); + if (vsi) + i40e_vsi_open(vsi); /* The driver only wants link up/down and module qualification * reports from firmware. Note the negative logic. @@ -16245,6 +16162,8 @@ static void i40e_remove(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + struct i40e_vsi *vsi; + struct i40e_veb *veb; int ret_code; int i; @@ -16302,24 +16221,19 @@ static void i40e_remove(struct pci_dev *pdev) /* If there is a switch structure or any orphans, remove them. * This will leave only the PF's VSI remaining. */ - for (i = 0; i < I40E_MAX_VEB; i++) { - if (!pf->veb[i]) - continue; - - if (pf->veb[i]->uplink_seid == pf->mac_seid || - pf->veb[i]->uplink_seid == 0) - i40e_switch_branch_release(pf->veb[i]); - } + i40e_pf_for_each_veb(pf, i, veb) + if (veb->uplink_seid == pf->mac_seid || + veb->uplink_seid == 0) + i40e_switch_branch_release(veb); /* Now we can shutdown the PF's VSIs, just before we kill * adminq and hmc. */ - for (i = pf->num_alloc_vsi; i--;) - if (pf->vsi[i]) { - i40e_vsi_close(pf->vsi[i]); - i40e_vsi_release(pf->vsi[i]); - pf->vsi[i] = NULL; - } + i40e_pf_for_each_vsi(pf, i, vsi) { + i40e_vsi_close(vsi); + i40e_vsi_release(vsi); + pf->vsi[i] = NULL; + } i40e_cloud_filter_exit(pf); @@ -16356,18 +16270,17 @@ unmap: /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ rtnl_lock(); i40e_clear_interrupt_scheme(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (pf->vsi[i]) { - if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) - i40e_vsi_clear_rings(pf->vsi[i]); - i40e_vsi_clear(pf->vsi[i]); - pf->vsi[i] = NULL; - } + i40e_pf_for_each_vsi(pf, i, vsi) { + if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) + i40e_vsi_clear_rings(vsi); + + i40e_vsi_clear(vsi); + pf->vsi[i] = NULL; } rtnl_unlock(); - for (i = 0; i < I40E_MAX_VEB; i++) { - kfree(pf->veb[i]); + i40e_pf_for_each_veb(pf, i, veb) { + kfree(veb); pf->veb[i] = NULL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 908cdbd3ec5d..b34c71770887 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2848,6 +2848,24 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/** + * i40e_can_vf_change_mac + * @vf: pointer to the VF info + * + * Return true if the VF is allowed to change its MAC filters, false otherwise + */ +static bool i40e_can_vf_change_mac(struct i40e_vf *vf) +{ + /* If the VF MAC address has been set administratively (via the + * ndo_set_vf_mac command), then deny permission to the VF to + * add/delete unicast MAC addresses, unless the VF is trusted + */ + if (vf->pf_set_mac && !vf->trusted) + return false; + + return true; +} + #define I40E_MAX_MACVLAN_PER_HW 3072 #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ (num_ports)) @@ -2907,8 +2925,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && - !is_multicast_ether_addr(addr) && vf->pf_set_mac && + if (!i40e_can_vf_change_mac(vf) && + !is_multicast_ether_addr(addr) && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); @@ -3114,19 +3132,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) ret = -EINVAL; goto error_param; } - if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) - was_unimac_deleted = true; } vsi = pf->vsi[vf->lan_vsi_idx]; spin_lock_bh(&vsi->mac_filter_hash_lock); /* delete addresses from the list */ - for (i = 0; i < al->num_elements; i++) + for (i = 0; i < al->num_elements; i++) { + const u8 *addr = al->list[i].addr; + + /* Allow to delete VF primary MAC only if it was not set + * administratively by PF or if VF is trusted. + */ + if (ether_addr_equal(addr, vf->default_lan_addr.addr) && + i40e_can_vf_change_mac(vf)) + was_unimac_deleted = true; + else + continue; + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { ret = -EINVAL; spin_unlock_bh(&vsi->mac_filter_hash_lock); goto error_param; } + } spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 367b613d92c0..365c03d1c462 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -493,7 +493,6 @@ enum ice_pf_flags { ICE_FLAG_DCB_ENA, ICE_FLAG_FD_ENA, ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */ - ICE_FLAG_PTP, /* PTP is enabled by software */ ICE_FLAG_ADV_FEATURES, ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */ ICE_FLAG_CLS_FLOWER, @@ -606,6 +605,7 @@ struct ice_pf { wait_queue_head_t reset_wait_queue; u32 hw_csum_rx_error; + u32 hw_rx_eipe_error; u32 oicr_err_reg; struct msi_map oicr_irq; /* Other interrupt cause MSIX vector */ struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ @@ -896,6 +896,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf) } void ice_debugfs_fwlog_init(struct ice_pf *pf); +void ice_debugfs_pf_deinit(struct ice_pf *pf); void ice_debugfs_init(void); void ice_debugfs_exit(void); void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module); @@ -983,6 +984,8 @@ void ice_service_task_schedule(struct ice_pf *pf); int ice_load(struct ice_pf *pf); void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); +int ice_init_dev(struct ice_pf *pf); +void ice_deinit_dev(struct ice_pf *pf); /** * ice_set_rdma_cap - enable RDMA support diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 7ac847718882..d2fd315556a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) q_vector = vsi->q_vectors[v_idx]; ice_for_each_tx_ring(tx_ring, q_vector->tx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, NULL); + ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, + NULL); tx_ring->q_vector = NULL; } ice_for_each_rx_ring(rx_ring, q_vector->rx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, NULL); + ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, + NULL); rx_ring->q_vector = NULL; } @@ -538,7 +536,7 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) * * Return 0 on success and a negative value on error. */ -int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) +static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); u32 num_bufs = ICE_RX_DESC_UNUSED(ring); @@ -633,6 +631,62 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) return 0; } +int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) +{ + if (q_idx >= vsi->num_rxq) + return -EINVAL; + + return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); +} + +/** + * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length + * @vsi: VSI + */ +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +{ + if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { + vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; + vsi->rx_buf_len = ICE_RXBUF_1664; +#if (PAGE_SIZE < 8192) + } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && + (vsi->netdev->mtu <= ETH_DATA_LEN)) { + vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; +#endif + } else { + vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + vsi->rx_buf_len = ICE_RXBUF_3072; + } +} + +/** + * ice_vsi_cfg_rxqs - Configure the VSI for Rx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Rx VSI for operation. + */ +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) +{ + u16 i; + + if (vsi->type == ICE_VSI_VF) + goto setup_rings; + + ice_vsi_cfg_frame_size(vsi); +setup_rings: + /* set up individual rings */ + ice_for_each_rxq(vsi, i) { + int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); + + if (err) + return err; + } + + return 0; +} + /** * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI * @qs_cfg: gathered variables needed for pf->vsi queues assignment @@ -828,7 +882,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) * @ring: Tx ring to be configured * @qg_buf: queue group buffer */ -int +static int ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf) { @@ -899,6 +953,80 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, return 0; } +int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, + u16 q_idx) +{ + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + + if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) + return -EINVAL; + + qg_buf->num_txqs = 1; + + return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); +} + +/** + * ice_vsi_cfg_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * @rings: Tx ring array to be configured + * @count: number of Tx ring array elements + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +static int +ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) +{ + DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); + int err = 0; + u16 q_idx; + + qg_buf->num_txqs = 1; + + for (q_idx = 0; q_idx < count; q_idx++) { + err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); + if (err) + break; + } + + return err; +} + +/** + * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx VSI for operation. + */ +int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) +{ + return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); +} + +/** + * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI + * @vsi: the VSI being configured + * + * Return 0 on success and a negative value on error + * Configure the Tx queues dedicated for XDP in given VSI for operation. + */ +int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) +{ + int ret; + int i; + + ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); + if (ret) + return ret; + + ice_for_each_rxq(vsi, i) + ice_tx_xsk_pool(vsi, i); + + return 0; +} + /** * ice_cfg_itr - configure the initial interrupt throttle values * @hw: pointer to the HW structure diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h index 17321ba75602..b711bc921928 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.h +++ b/drivers/net/ethernet/intel/ice/ice_base.h @@ -6,7 +6,8 @@ #include "ice.h" -int ice_vsi_cfg_rxq(struct ice_rx_ring *ring); +int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); +int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg); int ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait); @@ -14,9 +15,10 @@ int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx); int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi); void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); void ice_vsi_free_q_vectors(struct ice_vsi *vsi); -int -ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, - struct ice_aqc_add_tx_qgrp *qg_buf); +int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, + u16 q_idx); +int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); +int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector); void ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 10c32cd80fff..9266f25a9978 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -154,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E823L_SFP: hw->mac_type = ICE_MAC_GENERIC; break; + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: + case ICE_DEV_ID_E825C_SFP: + case ICE_DEV_ID_E825C_SGMII: + hw->mac_type = ICE_MAC_GENERIC_3K_E825; + break; case ICE_DEV_ID_E830_BACKPLANE: case ICE_DEV_ID_E830_QSFP56: case ICE_DEV_ID_E830_SFP: @@ -170,6 +176,18 @@ static int ice_set_mac_type(struct ice_hw *hw) } /** + * ice_is_generic_mac - check if device's mac_type is generic + * @hw: pointer to the hardware structure + * + * Return: true if mac_type is generic (with SBQ support), false if not + */ +bool ice_is_generic_mac(struct ice_hw *hw) +{ + return (hw->mac_type == ICE_MAC_GENERIC || + hw->mac_type == ICE_MAC_GENERIC_3K_E825); +} + +/** * ice_is_e810 * @hw: pointer to the hardware structure * @@ -241,6 +259,25 @@ bool ice_is_e823(struct ice_hw *hw) } /** + * ice_is_e825c - Check if a device is E825C family device + * @hw: pointer to the hardware structure + * + * Return: true if the device is E825-C based, false if not. + */ +bool ice_is_e825c(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E825C_BACKPLANE: + case ICE_DEV_ID_E825C_QSFP: + case ICE_DEV_ID_E825C_SFP: + case ICE_DEV_ID_E825C_SGMII: + return true; + default: + return false; + } +} + +/** * ice_clear_pf_cfg - Clear PF configuration * @hw: pointer to the hardware structure * diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 3e933f75e948..32fd10de620c 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -112,6 +112,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); +bool ice_is_generic_mac(struct ice_hw *hw); bool ice_is_e810(struct ice_hw *hw); int ice_clear_pf_cfg(struct ice_hw *hw); int @@ -251,6 +252,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); bool ice_is_e823(struct ice_hw *hw); +bool ice_is_e825c(struct ice_hw *hw); int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index e7d2474c431c..ffe660f34992 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -666,7 +666,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw) /* The device sideband queue is only supported on devices with the * generic MAC type. */ - return hw->mac_type == ICE_MAC_GENERIC; + return ice_is_generic_mac(hw); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 8b7504a9df31..7532d11ad7f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -1825,6 +1825,7 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type) seg_id = SEGMENT_TYPE_ICE_E830; break; case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: default: seg_id = SEGMENT_TYPE_ICE_E810; break; @@ -1845,6 +1846,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type) case ICE_MAC_E830: sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB; break; + case ICE_MAC_GENERIC_3K_E825: + sign_type = SEGMENT_SIGN_TYPE_RSA3K_E825; + break; case ICE_MAC_GENERIC: default: sign_type = SEGMENT_SIGN_TYPE_RSA2K; diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c index c2bfba6b9ead..d252d98218d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_debugfs.c +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -64,9 +64,6 @@ static const char * const ice_fwlog_level_string[] = { "verbose", }; -/* the order in this array is important. it matches the ordering of the - * values in the FW so the index is the same value as in ice_fwlog_level - */ static const char * const ice_fwlog_log_size[] = { "128K", "256K", @@ -648,6 +645,16 @@ err_create_module_files: } /** + * ice_debugfs_pf_deinit - cleanup PF's debugfs + * @pf: pointer to the PF struct + */ +void ice_debugfs_pf_deinit(struct ice_pf *pf) +{ + debugfs_remove_recursive(pf->ice_debugfs_pf); + pf->ice_debugfs_pf = NULL; +} + +/** * ice_debugfs_init - create root directory for debugfs entries */ void ice_debugfs_init(void) diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index a2d384dbfc76..9dfae9bce758 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -71,5 +71,13 @@ #define ICE_DEV_ID_E822L_10G_BASE_T 0x1899 /* Intel(R) Ethernet Connection E822-L 1GbE */ #define ICE_DEV_ID_E822L_SGMII 0x189A +/* Intel(R) Ethernet Connection E825-C for backplane */ +#define ICE_DEV_ID_E825C_BACKPLANE 0x579c +/* Intel(R) Ethernet Connection E825-C for QSFP */ +#define ICE_DEV_ID_E825C_QSFP 0x579d +/* Intel(R) Ethernet Connection E825-C for SFP */ +#define ICE_DEV_ID_E825C_SFP 0x579e +/* Intel(R) Ethernet Connection E825-C 1GbE */ +#define ICE_DEV_ID_E825C_SGMII 0x579f #endif /* _ICE_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 65be56f2af9e..b516e42b41f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -445,6 +445,20 @@ ice_devlink_reload_empr_start(struct ice_pf *pf, } /** + * ice_devlink_reinit_down - unload given PF + * @pf: pointer to the PF struct + */ +static void ice_devlink_reinit_down(struct ice_pf *pf) +{ + /* No need to take devl_lock, it's already taken by devlink API */ + ice_unload(pf); + rtnl_lock(); + ice_vsi_decfg(ice_get_main_vsi(pf)); + rtnl_unlock(); + ice_deinit_dev(pf); +} + +/** * ice_devlink_reload_down - prepare for reload * @devlink: pointer to the devlink instance to reload * @netns_change: if true, the network namespace is changing @@ -477,7 +491,7 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change, "Remove all VFs before doing reinit\n"); return -EOPNOTSUPP; } - ice_unload(pf); + ice_devlink_reinit_down(pf); return 0; case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: return ice_devlink_reload_empr_start(pf, extack); @@ -1270,6 +1284,45 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, } /** + * ice_devlink_reinit_up - do reinit of the given PF + * @pf: pointer to the PF struct + */ +static int ice_devlink_reinit_up(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct ice_vsi_cfg_params params; + int err; + + err = ice_init_dev(pf); + if (err) + return err; + + params = ice_vsi_to_params(vsi); + params.flags = ICE_VSI_FLAG_INIT; + + rtnl_lock(); + err = ice_vsi_cfg(vsi, ¶ms); + rtnl_unlock(); + if (err) + goto err_vsi_cfg; + + /* No need to take devl_lock, it's already taken by devlink API */ + err = ice_load(pf); + if (err) + goto err_load; + + return 0; + +err_load: + rtnl_lock(); + ice_vsi_decfg(vsi); + rtnl_unlock(); +err_vsi_cfg: + ice_deinit_dev(pf); + return err; +} + +/** * ice_devlink_reload_up - do reload up after reinit * @devlink: pointer to the devlink instance reloading * @action: the action requested @@ -1289,7 +1342,7 @@ ice_devlink_reload_up(struct devlink *devlink, switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); - return ice_load(pf); + return ice_devlink_reinit_up(pf); case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE); return ice_devlink_reload_empr_finish(pf, extack); @@ -1569,6 +1622,7 @@ static const struct devlink_port_ops ice_devlink_port_ops = { * @pf: the PF to create a devlink port for * * Create and register a devlink_port for this PF. + * This function has to be called under devl_lock. * * Return: zero on success or an error code on failure. */ @@ -1581,6 +1635,8 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) struct device *dev; int err; + devlink = priv_to_devlink(pf); + dev = ice_pf_to_dev(pf); devlink_port = &pf->devlink_port; @@ -1601,10 +1657,9 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) ice_devlink_set_switch_id(pf, &attrs.switch_id); devlink_port_attrs_set(devlink_port, &attrs); - devlink = priv_to_devlink(pf); - err = devlink_port_register_with_ops(devlink, devlink_port, vsi->idx, - &ice_devlink_port_ops); + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_ops); if (err) { dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", pf->hw.pf_id, err); @@ -1619,10 +1674,11 @@ int ice_devlink_create_pf_port(struct ice_pf *pf) * @pf: the PF to cleanup * * Unregisters the devlink_port structure associated with this PF. + * This function has to be called under devl_lock. */ void ice_devlink_destroy_pf_port(struct ice_pf *pf) { - devlink_port_unregister(&pf->devlink_port); + devl_port_unregister(&pf->devlink_port); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index b9c5eced6326..2bceee6d30f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -31,6 +31,26 @@ static const char * const pin_type_name[] = { }; /** + * ice_dpll_is_reset - check if reset is in progress + * @pf: private board structure + * @extack: error reporting + * + * If reset is in progress, fill extack with error. + * + * Return: + * * false - no reset in progress + * * true - reset in progress + */ +static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack) +{ + if (ice_is_reset_in_progress(pf->state)) { + NL_SET_ERR_MSG(extack, "PF reset in progress"); + return true; + } + return false; +} + +/** * ice_dpll_pin_freq_set - set pin's frequency * @pf: private board structure * @pin: pointer to a pin @@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); mutex_unlock(&pf->dplls.lock); @@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, * ice_dpll_pin_enable - enable a pin on dplls * @hw: board private hw structure * @pin: pointer to a pin + * @dpll_idx: dpll index to connect to output pin * @pin_type: type of pin being enabled * @extack: error reporting * @@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, */ static int ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, - enum ice_dpll_pin_type pin_type, + u8 dpll_idx, enum ice_dpll_pin_type pin_type, struct netlink_ext_ack *extack) { u8 flags = 0; @@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); break; case ICE_DPLL_PIN_TYPE_OUTPUT: + flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL; if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; - ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx, + 0, 0); break; default: return -EINVAL; @@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, case ICE_DPLL_PIN_TYPE_INPUT: ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, NULL, &pin->flags[0], - &pin->freq, NULL); + &pin->freq, &pin->phase_adjust); if (ret) goto err; if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { @@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, break; case ICE_DPLL_PIN_TYPE_OUTPUT: ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, - &pin->flags[0], NULL, + &pin->flags[0], &parent, &pin->freq, NULL); if (ret) goto err; - if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) - pin->state[0] = DPLL_PIN_STATE_CONNECTED; - else - pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; + + parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; + if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { + pin->state[pf->dplls.eec.dpll_idx] = + parent == pf->dplls.eec.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + parent == pf->dplls.pps.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + } break; case ICE_DPLL_PIN_TYPE_RCLK_INPUT: for (parent = 0; parent < pf->dplls.rclk.num_parents; @@ -488,6 +527,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, * @dpll: registered dpll pointer * @dpll_priv: private data pointer passed on dpll registration * @status: on success holds dpll's lock status + * @status_error: status error value * @extack: error reporting * * Dpll subsystem callback, provides dpll's lock status. @@ -500,6 +540,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, static int ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv, enum dpll_lock_status *status, + enum dpll_lock_status_error *status_error, struct netlink_ext_ack *extack) { struct ice_dpll *d = dpll_priv; @@ -568,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); if (enable) - ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); + ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type, + extack); else ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); if (!ret) @@ -603,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, struct netlink_ext_ack *extack) { bool enable = state == DPLL_PIN_STATE_CONNECTED; + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + + if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) + return 0; return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, extack, ICE_DPLL_PIN_TYPE_OUTPUT); @@ -665,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); if (ret) goto unlock; - if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) + if (pin_type == ICE_DPLL_PIN_TYPE_INPUT || + pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) *state = p->state[d->dpll_idx]; - else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) - *state = p->state[0]; ret = 0; unlock: mutex_unlock(&pf->dplls.lock); @@ -790,6 +842,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); mutex_unlock(&pf->dplls.lock); @@ -910,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, u8 flag, flags_en = 0; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); switch (type) { case ICE_DPLL_PIN_TYPE_INPUT: @@ -1069,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1123,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1305,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work) struct ice_pf *pf = container_of(d, struct ice_pf, dplls); struct ice_dpll *de = &pf->dplls.eec; struct ice_dpll *dp = &pf->dplls.pps; - int ret; + int ret = 0; + if (ice_is_reset_in_progress(pf->state)) + goto resched; mutex_lock(&pf->dplls.lock); ret = ice_dpll_update_state(pf, de, false); if (!ret) @@ -1326,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ice_dpll_notify_changes(de); ice_dpll_notify_changes(dp); +resched: /* Run twice a second or reschedule if update failed */ kthread_queue_delayed_work(d->kworker, &d->work, ret ? msecs_to_jiffies(10) : diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a19b06f18e40..3cc364a4d682 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -129,6 +129,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_oversize.nic", stats.rx_oversize), ICE_PF_STAT("rx_jabber.nic", stats.rx_jabber), ICE_PF_STAT("rx_csum_bad.nic", hw_csum_rx_error), + ICE_PF_STAT("rx_eipe_error.nic", hw_rx_eipe_error), ICE_PF_STAT("rx_dropped.nic", stats.eth.rx_discards), ICE_PF_STAT("rx_crc_errors.nic", stats.crc_errors), ICE_PF_STAT("illegal_bytes.nic", stats.illegal_bytes), @@ -3360,7 +3361,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) struct ice_pf *pf = ice_netdev_to_pf(dev); /* only report timestamping if PTP is enabled */ - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c index 92b5dac481cd..4fd15387a7e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_fwlog.c +++ b/drivers/net/ethernet/intel/ice/ice_fwlog.c @@ -188,6 +188,8 @@ void ice_fwlog_deinit(struct ice_hw *hw) if (hw->bus.func) return; + ice_debugfs_pf_deinit(hw->back); + /* make sure FW logging is disabled to not put the FW in a weird state * for the next driver load */ diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 2a25323105e5..467372d541d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -152,6 +152,27 @@ ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport) } /** + * ice_pkg_has_lport_extract - check if lport extraction supported + * @hw: HW struct + */ +static bool ice_pkg_has_lport_extract(struct ice_hw *hw) +{ + int i; + + for (i = 0; i < hw->blk[ICE_BLK_SW].es.count; i++) { + u16 offset; + u8 fv_prot; + + ice_find_prot_off(hw, ICE_BLK_SW, ICE_SW_DEFAULT_PROFILE, i, + &fv_prot, &offset); + if (fv_prot == ICE_FV_PROT_MDID && + offset == ICE_LP_EXT_BUF_OFFSET) + return true; + } + return false; +} + +/** * ice_lag_find_primary - returns pointer to primary interfaces lag struct * @lag: local interfaces lag struct */ @@ -1206,7 +1227,7 @@ static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf) } /** - * ice_lag_init_feature_support_flag - Check for NVM support for LAG + * ice_lag_init_feature_support_flag - Check for package and NVM support for LAG * @pf: PF struct */ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) @@ -1219,7 +1240,7 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf) else ice_clear_feature_support(pf, ICE_F_ROCE_LAG); - if (caps->sriov_lag) + if (caps->sriov_lag && ice_pkg_has_lport_extract(&pf->hw)) ice_set_feature_support(pf, ICE_F_SRIOV_LAG); else ice_clear_feature_support(pf, ICE_F_SRIOV_LAG); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index ede833dfa658..183b38792ef2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -17,6 +17,9 @@ enum ice_lag_role { #define ICE_LAG_INVALID_PORT 0xFF #define ICE_LAG_RESET_RETRIES 5 +#define ICE_SW_DEFAULT_PROFILE 0 +#define ICE_FV_PROT_MDID 255 +#define ICE_LP_EXT_BUF_OFFSET 32 struct ice_pf; struct ice_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 9be724291ef8..59e8a2572985 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1672,27 +1672,6 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) } /** - * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length - * @vsi: VSI - */ -static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) -{ - if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; - vsi->rx_buf_len = ICE_RXBUF_1664; -#if (PAGE_SIZE < 8192) - } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && - (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; -#endif - } else { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_3072; - } -} - -/** * ice_pf_state_is_nominal - checks the PF for nominal state * @pf: pointer to PF to check * @@ -1795,114 +1774,6 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, wr32(hw, QRXFLXP_CNTXT(pf_q), regval); } -int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) -{ - if (q_idx >= vsi->num_rxq) - return -EINVAL; - - return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]); -} - -int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx) -{ - DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); - - if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx]) - return -EINVAL; - - qg_buf->num_txqs = 1; - - return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf); -} - -/** - * ice_vsi_cfg_rxqs - Configure the VSI for Rx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Rx VSI for operation. - */ -int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) -{ - u16 i; - - if (vsi->type == ICE_VSI_VF) - goto setup_rings; - - ice_vsi_cfg_frame_size(vsi); -setup_rings: - /* set up individual rings */ - ice_for_each_rxq(vsi, i) { - int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); - - if (err) - return err; - } - - return 0; -} - -/** - * ice_vsi_cfg_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * @rings: Tx ring array to be configured - * @count: number of Tx ring array elements - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -static int -ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count) -{ - DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); - int err = 0; - u16 q_idx; - - qg_buf->num_txqs = 1; - - for (q_idx = 0; q_idx < count; q_idx++) { - err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf); - if (err) - break; - } - - return err; -} - -/** - * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Tx VSI for operation. - */ -int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi) -{ - return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq); -} - -/** - * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI - * @vsi: the VSI being configured - * - * Return 0 on success and a negative value on error - * Configure the Tx queues dedicated for XDP in given VSI for operation. - */ -int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) -{ - int ret; - int i; - - ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq); - if (ret) - return ret; - - ice_for_each_rxq(vsi, i) - ice_tx_xsk_pool(vsi, i); - - return 0; -} - /** * ice_intrl_usec_to_reg - convert interrupt rate limit to register value * @intrl: interrupt rate limit in usecs @@ -2426,7 +2297,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_map_rings_to_vectors(vsi); /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi, true); + ice_vsi_set_napi_queues(vsi); vsi->stat_offsets_loaded = false; @@ -2904,19 +2775,19 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) } /** - * ice_queue_set_napi - Set the napi instance for the queue + * __ice_queue_set_napi - Set the napi instance for the queue * @dev: device to which NAPI and queue belong * @queue_index: Index of queue * @type: queue type as RX or TX * @napi: NAPI context * @locked: is the rtnl_lock already held * - * Set the napi instance for the queue + * Set the napi instance for the queue. Caller indicates the lock status. */ static void -ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi, - bool locked) +__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) { if (!locked) rtnl_lock(); @@ -2926,26 +2797,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, } /** - * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * ice_queue_set_napi - Set the napi instance for the queue + * @vsi: VSI being configured + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * + * Set the napi instance for the queue. The rtnl lock state is derived from the + * execution path. + */ +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi) +{ + struct ice_pf *pf = vsi->back; + + if (!vsi->netdev) + return; + + if (current_work() == &pf->serv_task || + test_bit(ICE_PREPARED_FOR_RESET, pf->state) || + test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_SUSPENDED, pf->state)) + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + false); + else + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + true); +} + +/** + * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi * @q_vector: q_vector pointer * @locked: is the rtnl_lock already held * + * Associate the q_vector napi with all the queue[s] on the vector. + * Caller indicates the lock status. + */ +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * * Associate the q_vector napi with all the queue[s] on the vector */ -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) { struct ice_rx_ring *rx_ring; struct ice_tx_ring *tx_ring; ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi); ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi); /* Also set the interrupt number for the NAPI */ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); } @@ -2953,11 +2877,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) /** * ice_vsi_set_napi_queues * @vsi: VSI pointer - * @locked: is the rtnl_lock already held * * Associate queue[s] with napi for all vectors */ -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { int i; @@ -2965,7 +2888,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) return; ice_for_each_q_vector(vsi, i) - ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked); + ice_q_vector_set_napi_queues(vsi->q_vectors[i]); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 71bd27244941..b5a1ed7cc4b1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -54,14 +54,6 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf); void ice_update_eth_stats(struct ice_vsi *vsi); -int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx); - -int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx); - -int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); - -int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); - void ice_vsi_cfg_msix(struct ice_vsi *vsi); int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi); @@ -72,8 +64,6 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, u16 rel_vmvf_num); -int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi); - int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); @@ -91,9 +81,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi); + +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); + +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked); +void ice_vsi_set_napi_queues(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index dd4a9bc0dfdc..8f73ba77e835 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -613,7 +613,7 @@ skip: ice_pf_dis_all_vsi(pf, false); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_prepare_for_reset(pf); + ice_ptp_prepare_for_reset(pf, reset_type); if (ice_is_feature_supported(pf, ICE_F_GNSS)) ice_gnss_exit(pf); @@ -1649,8 +1649,10 @@ static void ice_clean_sbq_subtask(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - /* Nothing to do here if sideband queue is not supported */ - if (!ice_is_sbq_supported(hw)) { + /* if mac_type is not generic, sideband is not supported + * and there's nothing to do here + */ + if (!ice_is_generic_mac(hw)) { clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); return; } @@ -3495,7 +3497,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); - ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); } } @@ -4572,90 +4574,6 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -static int ice_start_eth(struct ice_vsi *vsi) -{ - int err; - - err = ice_init_mac_fltr(vsi->back); - if (err) - return err; - - err = ice_vsi_open(vsi); - if (err) - ice_fltr_remove_all(vsi); - - return err; -} - -static void ice_stop_eth(struct ice_vsi *vsi) -{ - ice_fltr_remove_all(vsi); - ice_vsi_close(vsi); -} - -static int ice_init_eth(struct ice_pf *pf) -{ - struct ice_vsi *vsi = ice_get_main_vsi(pf); - int err; - - if (!vsi) - return -EINVAL; - - /* init channel list */ - INIT_LIST_HEAD(&vsi->ch_list); - - err = ice_cfg_netdev(vsi); - if (err) - return err; - /* Setup DCB netlink interface */ - ice_dcbnl_setup(vsi); - - err = ice_init_mac_fltr(pf); - if (err) - goto err_init_mac_fltr; - - err = ice_devlink_create_pf_port(pf); - if (err) - goto err_devlink_create_pf_port; - - SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); - - err = ice_register_netdev(vsi); - if (err) - goto err_register_netdev; - - err = ice_tc_indir_block_register(vsi); - if (err) - goto err_tc_indir_block_register; - - ice_napi_add(vsi); - - return 0; - -err_tc_indir_block_register: - ice_unregister_netdev(vsi); -err_register_netdev: - ice_devlink_destroy_pf_port(pf); -err_devlink_create_pf_port: -err_init_mac_fltr: - ice_decfg_netdev(vsi); - return err; -} - -static void ice_deinit_eth(struct ice_pf *pf) -{ - struct ice_vsi *vsi = ice_get_main_vsi(pf); - - if (!vsi) - return; - - ice_vsi_close(vsi); - ice_unregister_netdev(vsi); - ice_devlink_destroy_pf_port(pf); - ice_tc_indir_block_unregister(vsi); - ice_decfg_netdev(vsi); -} - /** * ice_wait_for_fw - wait for full FW readiness * @hw: pointer to the hardware structure @@ -4681,7 +4599,7 @@ static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) return -ETIMEDOUT; } -static int ice_init_dev(struct ice_pf *pf) +int ice_init_dev(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; @@ -4774,7 +4692,7 @@ err_init_pf: return err; } -static void ice_deinit_dev(struct ice_pf *pf) +void ice_deinit_dev(struct ice_pf *pf) { ice_free_irq_msix_misc(pf); ice_deinit_pf(pf); @@ -5079,31 +4997,47 @@ static void ice_deinit(struct ice_pf *pf) /** * ice_load - load pf by init hw and starting VSI * @pf: pointer to the pf instance + * + * This function has to be called under devl_lock. */ int ice_load(struct ice_pf *pf) { - struct ice_vsi_cfg_params params = {}; struct ice_vsi *vsi; int err; - err = ice_init_dev(pf); + devl_assert_locked(priv_to_devlink(pf)); + + vsi = ice_get_main_vsi(pf); + + /* init channel list */ + INIT_LIST_HEAD(&vsi->ch_list); + + err = ice_cfg_netdev(vsi); if (err) return err; - vsi = ice_get_main_vsi(pf); + /* Setup DCB netlink interface */ + ice_dcbnl_setup(vsi); - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_INIT; + err = ice_init_mac_fltr(pf); + if (err) + goto err_init_mac_fltr; + + err = ice_devlink_create_pf_port(pf); + if (err) + goto err_devlink_create_pf_port; + + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); - rtnl_lock(); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_register_netdev(vsi); if (err) - goto err_vsi_cfg; + goto err_register_netdev; - err = ice_start_eth(ice_get_main_vsi(pf)); + err = ice_tc_indir_block_register(vsi); if (err) - goto err_start_eth; - rtnl_unlock(); + goto err_tc_indir_block_register; + + ice_napi_add(vsi); err = ice_init_rdma(pf); if (err) @@ -5117,29 +5051,35 @@ int ice_load(struct ice_pf *pf) return 0; err_init_rdma: - ice_vsi_close(ice_get_main_vsi(pf)); - rtnl_lock(); -err_start_eth: - ice_vsi_decfg(ice_get_main_vsi(pf)); -err_vsi_cfg: - rtnl_unlock(); - ice_deinit_dev(pf); + ice_tc_indir_block_unregister(vsi); +err_tc_indir_block_register: + ice_unregister_netdev(vsi); +err_register_netdev: + ice_devlink_destroy_pf_port(pf); +err_devlink_create_pf_port: +err_init_mac_fltr: + ice_decfg_netdev(vsi); return err; } /** * ice_unload - unload pf by stopping VSI and deinit hw * @pf: pointer to the pf instance + * + * This function has to be called under devl_lock. */ void ice_unload(struct ice_pf *pf) { + struct ice_vsi *vsi = ice_get_main_vsi(pf); + + devl_assert_locked(priv_to_devlink(pf)); + ice_deinit_features(pf); ice_deinit_rdma(pf); - rtnl_lock(); - ice_stop_eth(ice_get_main_vsi(pf)); - ice_vsi_decfg(ice_get_main_vsi(pf)); - rtnl_unlock(); - ice_deinit_dev(pf); + ice_tc_indir_block_unregister(vsi); + ice_unregister_netdev(vsi); + ice_devlink_destroy_pf_port(pf); + ice_decfg_netdev(vsi); } /** @@ -5237,27 +5177,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) goto err_init; - err = ice_init_eth(pf); + devl_lock(priv_to_devlink(pf)); + err = ice_load(pf); + devl_unlock(priv_to_devlink(pf)); if (err) - goto err_init_eth; - - err = ice_init_rdma(pf); - if (err) - goto err_init_rdma; + goto err_load; err = ice_init_devlink(pf); if (err) goto err_init_devlink; - ice_init_features(pf); - return 0; err_init_devlink: - ice_deinit_rdma(pf); -err_init_rdma: - ice_deinit_eth(pf); -err_init_eth: + devl_lock(priv_to_devlink(pf)); + ice_unload(pf); + devl_unlock(priv_to_devlink(pf)); +err_load: ice_deinit(pf); err_init: pci_disable_device(pdev); @@ -5340,8 +5276,6 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } - ice_debugfs_exit(); - if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -5355,12 +5289,14 @@ static void ice_remove(struct pci_dev *pdev) if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); - ice_deinit_features(pf); + ice_deinit_devlink(pf); - ice_deinit_rdma(pf); - ice_deinit_eth(pf); - ice_deinit(pf); + devl_lock(priv_to_devlink(pf)); + ice_unload(pf); + devl_unlock(priv_to_devlink(pf)); + + ice_deinit(pf); ice_vsi_release_all(pf); ice_setup_mc_magic_wake(pf); @@ -5447,6 +5383,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + ice_vsi_set_napi_queues(pf->vsi[v]); } ret = ice_req_irq_msix_misc(pf); @@ -5752,6 +5689,10 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, @@ -5841,6 +5782,7 @@ module_init(ice_module_init); static void __exit ice_module_exit(void) { pci_unregister_driver(&ice_driver); + ice_debugfs_exit(); destroy_workqueue(ice_wq); destroy_workqueue(ice_lag_wq); pr_info("module unloaded\n"); @@ -7548,7 +7490,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) * fail. */ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) - ice_ptp_reset(pf); + ice_ptp_rebuild(pf, reset_type); if (ice_is_feature_supported(pf, ICE_F_GNSS)) ice_gnss_init(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index 82bc54fec7f3..a2562f04267f 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -24,7 +24,7 @@ #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define ice_flush(a) rd32((a), GLGEN_STAT) -#define ICE_M(m, s) ((m) << (s)) +#define ICE_M(m, s) ((m ## U) << (s)) struct ice_dma_mem { void *va; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 3b6605c8585e..c11eba07283c 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -601,17 +601,13 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) /* Read the low 32 bit value */ raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); - /* For PHYs which don't implement a proper timestamp ready bitmap, - * verify that the timestamp value is different from the last cached - * timestamp. If it is not, skip this for now assuming it hasn't yet - * been captured by hardware. + /* Devices using this interface always verify the timestamp differs + * relative to the last cached timestamp value. */ - if (!drop_ts && tx->verify_cached && - raw_tstamp == tx->tstamps[idx].cached_tstamp) + if (raw_tstamp == tx->tstamps[idx].cached_tstamp) return; - if (tx->verify_cached && raw_tstamp) - tx->tstamps[idx].cached_tstamp = raw_tstamp; + tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); skb = tx->tstamps[idx].skb; tx->tstamps[idx].skb = NULL; @@ -701,9 +697,11 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) hw = &pf->hw; /* Read the Tx ready status first */ - err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); - if (err) - return; + if (tx->has_ready_bitmap) { + err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); + if (err) + return; + } /* Drop packets if the link went down */ link_up = ptp_port->link_up; @@ -731,7 +729,8 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) * If we do not, the hardware logic for generating a new * interrupt can get stuck on some devices. */ - if (!(tstamp_ready & BIT_ULL(phy_idx))) { + if (tx->has_ready_bitmap && + !(tstamp_ready & BIT_ULL(phy_idx))) { if (drop_ts) goto skip_ts_read; @@ -751,7 +750,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) * from the last cached timestamp. If it is not, skip this for * now assuming it hasn't yet been captured by hardware. */ - if (!drop_ts && tx->verify_cached && + if (!drop_ts && !tx->has_ready_bitmap && raw_tstamp == tx->tstamps[idx].cached_tstamp) continue; @@ -761,7 +760,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx) skip_ts_read: spin_lock_irqsave(&tx->lock, flags); - if (tx->verify_cached && raw_tstamp) + if (!tx->has_ready_bitmap && raw_tstamp) tx->tstamps[idx].cached_tstamp = raw_tstamp; clear_bit(idx, tx->in_use); skb = tx->tstamps[idx].skb; @@ -965,6 +964,22 @@ ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx) } /** + * ice_ptp_flush_all_tx_tracker - Flush all timestamp trackers on this clock + * @pf: Board private structure + * + * Called by the clock owner to flush all the Tx timestamp trackers associated + * with the clock. + */ +static void +ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) +{ + struct ice_ptp_port *port; + + list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) + ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); +} + +/** * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker * @pf: Board private structure * @tx: Tx tracking structure to release @@ -1014,7 +1029,7 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) tx->block = port / ICE_PORTS_PER_QUAD; tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; tx->len = INDEX_PER_PORT_E82X; - tx->verify_cached = 0; + tx->has_ready_bitmap = 1; return ice_ptp_alloc_tx_tracker(tx); } @@ -1037,7 +1052,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) * verify new timestamps against cached copy of the last read * timestamp. */ - tx->verify_cached = 1; + tx->has_ready_bitmap = 0; return ice_ptp_alloc_tx_tracker(tx); } @@ -1430,7 +1445,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) struct ice_ptp_port *ptp_port; struct ice_hw *hw = &pf->hw; - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return; if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) @@ -1456,14 +1471,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) } /** - * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt + * ice_ptp_cfg_phy_interrupt - Configure PHY interrupt settings * @pf: PF private structure * @ena: bool value to enable or disable interrupt * @threshold: Minimum number of packets at which intr is triggered * * Utility function to enable or disable Tx timestamp interrupt and threshold */ -static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold) +static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) { struct ice_hw *hw = &pf->hw; int err = 0; @@ -2162,7 +2177,7 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr) { struct hwtstamp_config *config; - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return -EIO; config = &pf->ptp.tstamp_config; @@ -2232,7 +2247,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) struct hwtstamp_config config; int err; - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return -EAGAIN; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) @@ -2616,7 +2631,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work) struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp); int err; - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return; err = ice_ptp_update_cached_phctime(pf); @@ -2629,36 +2644,72 @@ static void ice_ptp_periodic_work(struct kthread_work *work) } /** - * ice_ptp_reset - Initialize PTP hardware clock support after reset + * ice_ptp_prepare_for_reset - Prepare PTP for reset + * @pf: Board private structure + * @reset_type: the reset type being performed + */ +void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) +{ + struct ice_ptp *ptp = &pf->ptp; + u8 src_tmr; + + if (ptp->state != ICE_PTP_READY) + return; + + ptp->state = ICE_PTP_RESETTING; + + /* Disable timestamping for both Tx and Rx */ + ice_ptp_disable_timestamp_mode(pf); + + kthread_cancel_delayed_work_sync(&ptp->work); + + if (reset_type == ICE_RESET_PFR) + return; + + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); + + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + + src_tmr = ice_get_ptp_src_clock_index(&pf->hw); + + /* Disable source clock */ + wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); + + /* Acquire PHC and system timer to restore after reset */ + ptp->reset_time = ktime_get_real_ns(); +} + +/** + * ice_ptp_rebuild_owner - Initialize PTP clock owner after reset * @pf: Board private structure + * + * Companion function for ice_ptp_rebuild() which handles tasks that only the + * PTP clock owner instance should perform. */ -void ice_ptp_reset(struct ice_pf *pf) +static int ice_ptp_rebuild_owner(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; struct ice_hw *hw = &pf->hw; struct timespec64 ts; - int err, itr = 1; u64 time_diff; - - if (test_bit(ICE_PFR_REQ, pf->state) || - !ice_pf_src_tmr_owned(pf)) - goto pfr; + int err; err = ice_ptp_init_phc(hw); if (err) - goto err; + return err; /* Acquire the global hardware lock */ if (!ice_ptp_lock(hw)) { err = -EBUSY; - goto err; + return err; } /* Write the increment time value to PHY and LAN */ err = ice_ptp_write_incval(hw, ice_base_incval(pf)); if (err) { ice_ptp_unlock(hw); - goto err; + return err; } /* Write the initial Time value to PHY and LAN using the cached PHC @@ -2674,38 +2725,54 @@ void ice_ptp_reset(struct ice_pf *pf) err = ice_ptp_write_init(pf, &ts); if (err) { ice_ptp_unlock(hw); - goto err; + return err; } /* Release the global hardware lock */ ice_ptp_unlock(hw); + /* Flush software tracking of any outstanding timestamps since we're + * about to flush the PHY timestamp block. + */ + ice_ptp_flush_all_tx_tracker(pf); + if (!ice_is_e810(hw)) { /* Enable quad interrupts */ - err = ice_ptp_tx_ena_intr(pf, true, itr); + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); if (err) - goto err; - } + return err; -pfr: - /* Init Tx structures */ - if (ice_is_e810(&pf->hw)) { - err = ice_ptp_init_tx_e810(pf, &ptp->port.tx); - } else { - kthread_init_delayed_work(&ptp->port.ov_work, - ice_ptp_wait_for_offsets); - err = ice_ptp_init_tx_e82x(pf, &ptp->port.tx, - ptp->port.port_num); + ice_ptp_restart_all_phy(pf); } - if (err) + + return 0; +} + +/** + * ice_ptp_rebuild - Initialize PTP hardware clock support after reset + * @pf: Board private structure + * @reset_type: the reset type being performed + */ +void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) +{ + struct ice_ptp *ptp = &pf->ptp; + int err; + + if (ptp->state == ICE_PTP_READY) { + ice_ptp_prepare_for_reset(pf, reset_type); + } else if (ptp->state != ICE_PTP_RESETTING) { + err = -EINVAL; + dev_err(ice_pf_to_dev(pf), "PTP was not initialized\n"); goto err; + } - set_bit(ICE_FLAG_PTP, pf->flags); + if (ice_pf_src_tmr_owned(pf) && reset_type != ICE_RESET_PFR) { + err = ice_ptp_rebuild_owner(pf); + if (err) + goto err; + } - /* Restart the PHY timestamping block */ - if (!test_bit(ICE_PFR_REQ, pf->state) && - ice_pf_src_tmr_owned(pf)) - ice_ptp_restart_all_phy(pf); + ptp->state = ICE_PTP_READY; /* Start periodic work going */ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); @@ -2714,6 +2781,7 @@ pfr: return; err: + ptp->state = ICE_PTP_ERROR; dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } @@ -2923,39 +2991,6 @@ int ice_ptp_clock_index(struct ice_pf *pf) } /** - * ice_ptp_prepare_for_reset - Prepare PTP for reset - * @pf: Board private structure - */ -void ice_ptp_prepare_for_reset(struct ice_pf *pf) -{ - struct ice_ptp *ptp = &pf->ptp; - u8 src_tmr; - - clear_bit(ICE_FLAG_PTP, pf->flags); - - /* Disable timestamping for both Tx and Rx */ - ice_ptp_disable_timestamp_mode(pf); - - kthread_cancel_delayed_work_sync(&ptp->work); - - if (test_bit(ICE_PFR_REQ, pf->state)) - return; - - ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); - - /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); - - src_tmr = ice_get_ptp_src_clock_index(&pf->hw); - - /* Disable source clock */ - wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M); - - /* Acquire PHC and system timer to restore after reset */ - ptp->reset_time = ktime_get_real_ns(); -} - -/** * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device * @pf: Board private structure * @@ -2967,7 +3002,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; struct timespec64 ts; - int err, itr = 1; + int err; err = ice_ptp_init_phc(hw); if (err) { @@ -3002,7 +3037,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf) if (!ice_is_e810(hw)) { /* Enable quad interrupts */ - err = ice_ptp_tx_ena_intr(pf, true, itr); + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); if (err) goto err_exit; } @@ -3195,6 +3230,8 @@ void ice_ptp_init(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; int err; + ptp->state = ICE_PTP_INITIALIZING; + ice_ptp_init_phy_model(hw); ice_ptp_init_tx_interrupt_mode(pf); @@ -3219,12 +3256,13 @@ void ice_ptp_init(struct ice_pf *pf) /* Configure initial Tx interrupt settings */ ice_ptp_cfg_tx_interrupt(pf); - set_bit(ICE_FLAG_PTP, pf->flags); - err = ice_ptp_init_work(pf, ptp); + err = ice_ptp_create_auxbus_device(pf); if (err) goto err; - err = ice_ptp_create_auxbus_device(pf); + ptp->state = ICE_PTP_READY; + + err = ice_ptp_init_work(pf, ptp); if (err) goto err; @@ -3237,7 +3275,7 @@ err: ptp_clock_unregister(ptp->clock); pf->ptp.clock = NULL; } - clear_bit(ICE_FLAG_PTP, pf->flags); + ptp->state = ICE_PTP_ERROR; dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err); } @@ -3250,9 +3288,11 @@ err: */ void ice_ptp_release(struct ice_pf *pf) { - if (!test_bit(ICE_FLAG_PTP, pf->flags)) + if (pf->ptp.state != ICE_PTP_READY) return; + pf->ptp.state = ICE_PTP_UNINIT; + /* Disable timestamping for both Tx and Rx */ ice_ptp_disable_timestamp_mode(pf); @@ -3260,8 +3300,6 @@ void ice_ptp_release(struct ice_pf *pf) ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); - clear_bit(ICE_FLAG_PTP, pf->flags); - kthread_cancel_delayed_work_sync(&pf->ptp.work); ice_ptp_port_phy_stop(&pf->ptp.port); @@ -3271,6 +3309,9 @@ void ice_ptp_release(struct ice_pf *pf) pf->ptp.kworker = NULL; } + if (ice_pf_src_tmr_owned(pf)) + ice_ptp_unregister_auxbus_driver(pf); + if (!pf->ptp.clock) return; @@ -3280,7 +3321,5 @@ void ice_ptp_release(struct ice_pf *pf) ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; - ice_ptp_unregister_auxbus_driver(pf); - dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n"); } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 087dd32d8762..3af20025043a 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -100,7 +100,7 @@ struct ice_perout_channel { * the last timestamp we read for a given index. If the current timestamp * value is the same as the cached value, we assume a new timestamp hasn't * been captured. This avoids reporting stale timestamps to the stack. This is - * only done if the verify_cached flag is set in ice_ptp_tx structure. + * only done if the has_ready_bitmap flag is not set in ice_ptp_tx structure. */ struct ice_tx_tstamp { struct sk_buff *skb; @@ -130,7 +130,9 @@ enum ice_tx_tstamp_work { * @init: if true, the tracker is initialized; * @calibrating: if true, the PHY is calibrating the Tx offset. During this * window, timestamps are temporarily disabled. - * @verify_cached: if true, verify new timestamp differs from last read value + * @has_ready_bitmap: if true, the hardware has a valid Tx timestamp ready + * bitmap register. If false, fall back to verifying new + * timestamp values against previously cached copy. * @last_ll_ts_idx_read: index of the last LL TS read by the FW */ struct ice_ptp_tx { @@ -143,7 +145,7 @@ struct ice_ptp_tx { u8 len; u8 init : 1; u8 calibrating : 1; - u8 verify_cached : 1; + u8 has_ready_bitmap : 1; s8 last_ll_ts_idx_read; }; @@ -203,8 +205,17 @@ struct ice_ptp_port_owner { #define GLTSYN_TGT_H_IDX_MAX 4 +enum ice_ptp_state { + ICE_PTP_UNINIT = 0, + ICE_PTP_INITIALIZING, + ICE_PTP_READY, + ICE_PTP_RESETTING, + ICE_PTP_ERROR, +}; + /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK + * @state: current state of PTP state machine * @tx_interrupt_mode: the TX interrupt mode for the PTP clock * @port: data for the PHY port initialization procedure * @ports_owner: data for the auxiliary driver owner @@ -227,6 +238,7 @@ struct ice_ptp_port_owner { * @late_cached_phc_updates: number of times cached PHC update is late */ struct ice_ptp { + enum ice_ptp_state state; enum ice_ptp_tx_interrupt tx_interrupt_mode; struct ice_ptp_port port; struct ice_ptp_port_owner ports_owner; @@ -304,8 +316,9 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx); -void ice_ptp_reset(struct ice_pf *pf); -void ice_ptp_prepare_for_reset(struct ice_pf *pf); +void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type); +void ice_ptp_prepare_for_reset(struct ice_pf *pf, + enum ice_reset_req reset_type); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); @@ -345,8 +358,15 @@ ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, return 0; } -static inline void ice_ptp_reset(struct ice_pf *pf) { } -static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { } +static inline void ice_ptp_rebuild(struct ice_pf *pf, + enum ice_reset_req reset_type) +{ +} + +static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf, + enum ice_reset_req reset_type) +{ +} static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 839e5da24ad5..f8f1d2bdc1be 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -143,8 +143,12 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); - if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | - BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) + if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { + ring->vsi->back->hw_rx_eipe_error++; + return; + } + + if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))) goto checksum_fail; if (ipv6 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S)))) diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 41ab6d7bbd9e..9ff92dba5823 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -132,6 +132,7 @@ enum ice_mac_type { ICE_MAC_E810, ICE_MAC_E830, ICE_MAC_GENERIC, + ICE_MAC_GENERIC_3K_E825, }; /* Media Types */ @@ -1072,7 +1073,7 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_OROM_VER_BUILD_SHIFT 8 #define ICE_OROM_VER_BUILD_MASK (0xffff << ICE_OROM_VER_BUILD_SHIFT) #define ICE_OROM_VER_SHIFT 24 -#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) +#define ICE_OROM_VER_MASK (0xffU << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_1ST_NVM_BANK_PTR 0x42 #define ICE_SR_NVM_BANK_SIZE 0x43 diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 8b81a1677045..8a051420fa19 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -217,42 +217,28 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) */ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) { - DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1); - u16 size = __struct_size(qg_buf); struct ice_q_vector *q_vector; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; int err; - if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) - return -EINVAL; - - qg_buf->num_txqs = 1; - - tx_ring = vsi->tx_rings[q_idx]; - rx_ring = vsi->rx_rings[q_idx]; - q_vector = rx_ring->q_vector; - - err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf); + err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); if (err) return err; if (ice_is_xdp_ena_vsi(vsi)) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; - memset(qg_buf, 0, size); - qg_buf->num_txqs = 1; - err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); + err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); if (err) return err; ice_set_ring_xdp(xdp_ring); ice_tx_xsk_pool(vsi, q_idx); } - err = ice_vsi_cfg_rxq(rx_ring); + err = ice_vsi_cfg_single_rxq(vsi, q_idx); if (err) return err; + q_vector = vsi->rx_rings[q_idx]->q_vector; ice_qvec_cfg_msix(vsi, q_vector); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 8dc837889723..4a3c4454d25a 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -978,7 +978,7 @@ struct virtchnl2_ptype { u8 proto_id_count; __le16 pad; __le16 proto_id[]; -}; +} __packed __aligned(2); VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype); /** diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index a2b759531cb7..3c2dc7bdebb5 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -637,7 +637,7 @@ struct igb_adapter { struct timespec64 period; } perout[IGB_N_PEROUT]; - char fw_version[32]; + char fw_version[48]; #ifdef CONFIG_IGB_HWMON struct hwmon_buff *igb_hwmon_buff; bool ets; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index b66199c9bb3a..99977a22b843 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -3027,7 +3027,7 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } -static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int igb_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3038,11 +3038,13 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; - edata->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->supported); if (!hw->dev_spec._82575.eee_disable) - edata->advertised = - mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + mii_eee_cap1_mod_linkmode_t(edata->advertised, + adapter->eee_advert); /* The IPCNFG and EEER registers are not supported on I354. */ if (hw->mac.type == e1000_i354) { @@ -3068,7 +3070,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) if (ret_val) return -ENODATA; - edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data); break; case e1000_i354: case e1000_i210: @@ -3079,7 +3081,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) if (ret_val) return -ENODATA; - edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + mii_eee_cap1_mod_linkmode_t(edata->lp_advertised, phy_data); break; default: @@ -3099,18 +3101,20 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata) edata->eee_enabled = false; edata->eee_active = false; edata->tx_lpi_enabled = false; - edata->advertised &= ~edata->advertised; + linkmode_zero(edata->advertised); } return 0; } static int igb_set_eee(struct net_device *netdev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct igb_adapter *adapter = netdev_priv(netdev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {}; + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {}; struct e1000_hw *hw = &adapter->hw; - struct ethtool_eee eee_curr; + struct ethtool_keee eee_curr; bool adv1g_eee = true, adv100m_eee = true; s32 ret_val; @@ -3118,7 +3122,7 @@ static int igb_set_eee(struct net_device *netdev, (hw->phy.media_type != e1000_media_type_copper)) return -EOPNOTSUPP; - memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + memset(&eee_curr, 0, sizeof(struct ethtool_keee)); ret_val = igb_get_eee(netdev, &eee_curr); if (ret_val) @@ -3138,14 +3142,21 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } - if (!edata->advertised || (edata->advertised & - ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL))) { + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + supported); + if (linkmode_andnot(tmp, edata->advertised, supported)) { dev_err(&adapter->pdev->dev, "EEE Advertisement supports only 100Tx and/or 100T full duplex\n"); return -EINVAL; } - adv100m_eee = !!(edata->advertised & ADVERTISE_100_FULL); - adv1g_eee = !!(edata->advertised & ADVERTISE_1000_FULL); + adv100m_eee = linkmode_test_bit( + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + edata->advertised); + adv1g_eee = linkmode_test_bit( + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->advertised); } else if (!edata->eee_enabled) { dev_err(&adapter->pdev->dev, @@ -3153,7 +3164,7 @@ static int igb_set_eee(struct net_device *netdev, return -EINVAL; } - adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { hw->dev_spec._82575.eee_disable = !edata->eee_enabled; adapter->flags |= IGB_FLAG_EEE; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4df8d4153aa5..cebb44f51d5f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3069,7 +3069,6 @@ void igb_set_fw_version(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct e1000_fw_version fw; - char *lbuf; igb_get_fw_version(hw, &fw); @@ -3077,34 +3076,36 @@ void igb_set_fw_version(struct igb_adapter *adapter) case e1000_i210: case e1000_i211: if (!(igb_get_flash_presence_i210(hw))) { - lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d", - fw.invm_major, fw.invm_minor, - fw.invm_img_type); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%2d.%2d-%d", + fw.invm_major, fw.invm_minor, + fw.invm_img_type); break; } fallthrough; default: /* if option rom is valid, display its version too */ if (fw.or_valid) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d", - fw.eep_major, fw.eep_minor, - fw.etrack_id, fw.or_major, fw.or_build, - fw.or_patch); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x, %d.%d.%d", + fw.eep_major, fw.eep_minor, fw.etrack_id, + fw.or_major, fw.or_build, fw.or_patch); /* no option rom */ } else if (fw.etrack_id != 0X0000) { - lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x", - fw.eep_major, fw.eep_minor, - fw.etrack_id); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d, 0x%08x", + fw.eep_major, fw.eep_minor, fw.etrack_id); } else { - lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major, - fw.eep_minor, fw.eep_build); + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d.%d", + fw.eep_major, fw.eep_minor, fw.eep_build); } break; } - - /* the truncate happens here if it doesn't fit */ - strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version)); - kfree(lbuf); } /** diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 319c544b9f04..f94570556120 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); /* adjust timestamp for the TX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_TX_LATENCY_10; @@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, ktime_t *timestamp) { struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps ts; __le64 *regval = (__le64 *)va; int adjust = 0; @@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_RX_LATENCY_10; diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index 95d1e8c490a4..ebffd3054285 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -6,6 +6,7 @@ # obj-$(CONFIG_IGC) += igc.o +igc-$(CONFIG_IGC_LEDS) += igc_leds.o igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 45430e246e9c..cfa6baccec55 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -168,7 +168,7 @@ struct igc_ring { struct igc_adapter { struct net_device *netdev; - struct ethtool_eee eee; + struct ethtool_keee eee; u16 eee_advert; unsigned long state; @@ -295,6 +295,9 @@ struct igc_adapter { struct timespec64 start; struct timespec64 period; } perout[IGC_N_PEROUT]; + + /* LEDs */ + struct mutex led_mutex; }; void igc_up(struct igc_adapter *adapter); @@ -585,7 +588,7 @@ enum igc_filter_match_flags { struct igc_nfc_filter { u8 match_flags; u16 etype; - __be16 vlan_etype; + u16 vlan_etype; u16 vlan_tci; u16 vlan_tci_mask; u8 src_addr[ETH_ALEN]; @@ -720,6 +723,8 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter); void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); +int igc_led_setup(struct igc_adapter *adapter); + #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring)) #define IGC_TXD_DCMD (IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS) diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index b95d2c86e803..1a64f1ca6ca8 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -981,7 +981,7 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { fsp->flow_type |= FLOW_EXT; - fsp->h_ext.vlan_etype = rule->filter.vlan_etype; + fsp->h_ext.vlan_etype = htons(rule->filter.vlan_etype); fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; } @@ -1249,7 +1249,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, /* VLAN etype matching */ if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { - rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.vlan_etype = ntohs(fsp->h_ext.vlan_etype); rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; } @@ -1623,18 +1623,17 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) } static int igc_ethtool_get_eee(struct net_device *netdev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; u32 eeer; if (hw->dev_spec._base.eee_enable) - edata->advertised = - mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + mii_eee_cap1_mod_linkmode_t(edata->advertised, + adapter->eee_advert); *edata = adapter->eee; - edata->supported = SUPPORTED_Autoneg; eeer = rd32(IGC_EEER); @@ -1647,9 +1646,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev, edata->eee_enabled = hw->dev_spec._base.eee_enable; - edata->advertised = SUPPORTED_Autoneg; - edata->lp_advertised = SUPPORTED_Autoneg; - /* Report correct negotiated EEE status for devices that * wrongly report EEE at half-duplex */ @@ -1657,21 +1653,21 @@ static int igc_ethtool_get_eee(struct net_device *netdev, edata->eee_enabled = false; edata->eee_active = false; edata->tx_lpi_enabled = false; - edata->advertised &= ~edata->advertised; + linkmode_zero(edata->advertised); } return 0; } static int igc_ethtool_set_eee(struct net_device *netdev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct igc_adapter *adapter = netdev_priv(netdev); struct igc_hw *hw = &adapter->hw; - struct ethtool_eee eee_curr; + struct ethtool_keee eee_curr; s32 ret_val; - memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + memset(&eee_curr, 0, sizeof(struct ethtool_keee)); ret_val = igc_ethtool_get_eee(netdev, &eee_curr); if (ret_val) { @@ -1699,7 +1695,8 @@ static int igc_ethtool_set_eee(struct net_device *netdev, return -EINVAL; } - adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { hw->dev_spec._base.eee_enable = edata->eee_enabled; adapter->flags |= IGC_FLAG_EEE; diff --git a/drivers/net/ethernet/intel/igc/igc_leds.c b/drivers/net/ethernet/intel/igc/igc_leds.c new file mode 100644 index 000000000000..bf240c5daf86 --- /dev/null +++ b/drivers/net/ethernet/intel/igc/igc_leds.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Linutronix GmbH */ + +#include <linux/bits.h> +#include <linux/leds.h> +#include <linux/netdevice.h> +#include <linux/pm_runtime.h> +#include <uapi/linux/uleds.h> + +#include "igc.h" + +#define IGC_NUM_LEDS 3 + +#define IGC_LEDCTL_LED0_MODE_SHIFT 0 +#define IGC_LEDCTL_LED0_MODE_MASK GENMASK(3, 0) +#define IGC_LEDCTL_LED0_BLINK BIT(7) +#define IGC_LEDCTL_LED1_MODE_SHIFT 8 +#define IGC_LEDCTL_LED1_MODE_MASK GENMASK(11, 8) +#define IGC_LEDCTL_LED1_BLINK BIT(15) +#define IGC_LEDCTL_LED2_MODE_SHIFT 16 +#define IGC_LEDCTL_LED2_MODE_MASK GENMASK(19, 16) +#define IGC_LEDCTL_LED2_BLINK BIT(23) + +#define IGC_LEDCTL_MODE_ON 0x00 +#define IGC_LEDCTL_MODE_OFF 0x01 +#define IGC_LEDCTL_MODE_LINK_10 0x05 +#define IGC_LEDCTL_MODE_LINK_100 0x06 +#define IGC_LEDCTL_MODE_LINK_1000 0x07 +#define IGC_LEDCTL_MODE_LINK_2500 0x08 +#define IGC_LEDCTL_MODE_ACTIVITY 0x0b + +#define IGC_SUPPORTED_MODES \ + (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK_1000) | \ + BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK_10) | \ + BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX)) + +#define IGC_ACTIVITY_MODES \ + (BIT(TRIGGER_NETDEV_RX) | BIT(TRIGGER_NETDEV_TX)) + +struct igc_led_classdev { + struct net_device *netdev; + struct led_classdev led; + int index; +}; + +#define lcdev_to_igc_ldev(lcdev) \ + container_of(lcdev, struct igc_led_classdev, led) + +static void igc_led_select(struct igc_adapter *adapter, int led, + u32 *mask, u32 *shift, u32 *blink) +{ + switch (led) { + case 0: + *mask = IGC_LEDCTL_LED0_MODE_MASK; + *shift = IGC_LEDCTL_LED0_MODE_SHIFT; + *blink = IGC_LEDCTL_LED0_BLINK; + break; + case 1: + *mask = IGC_LEDCTL_LED1_MODE_MASK; + *shift = IGC_LEDCTL_LED1_MODE_SHIFT; + *blink = IGC_LEDCTL_LED1_BLINK; + break; + case 2: + *mask = IGC_LEDCTL_LED2_MODE_MASK; + *shift = IGC_LEDCTL_LED2_MODE_SHIFT; + *blink = IGC_LEDCTL_LED2_BLINK; + break; + default: + *mask = *shift = *blink = 0; + netdev_err(adapter->netdev, "Unknown LED %d selected!\n", led); + } +} + +static void igc_led_set(struct igc_adapter *adapter, int led, u32 mode, + bool blink) +{ + u32 shift, mask, blink_bit, ledctl; + struct igc_hw *hw = &adapter->hw; + + igc_led_select(adapter, led, &mask, &shift, &blink_bit); + + pm_runtime_get_sync(&adapter->pdev->dev); + mutex_lock(&adapter->led_mutex); + + /* Set mode */ + ledctl = rd32(IGC_LEDCTL); + ledctl &= ~mask; + ledctl |= mode << shift; + + /* Configure blinking */ + if (blink) + ledctl |= blink_bit; + else + ledctl &= ~blink_bit; + wr32(IGC_LEDCTL, ledctl); + + mutex_unlock(&adapter->led_mutex); + pm_runtime_put(&adapter->pdev->dev); +} + +static u32 igc_led_get(struct igc_adapter *adapter, int led) +{ + u32 shift, mask, blink_bit, ledctl; + struct igc_hw *hw = &adapter->hw; + + igc_led_select(adapter, led, &mask, &shift, &blink_bit); + + pm_runtime_get_sync(&adapter->pdev->dev); + mutex_lock(&adapter->led_mutex); + ledctl = rd32(IGC_LEDCTL); + mutex_unlock(&adapter->led_mutex); + pm_runtime_put(&adapter->pdev->dev); + + return (ledctl & mask) >> shift; +} + +static int igc_led_brightness_set_blocking(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev); + struct igc_adapter *adapter = netdev_priv(ldev->netdev); + u32 mode; + + if (brightness) + mode = IGC_LEDCTL_MODE_ON; + else + mode = IGC_LEDCTL_MODE_OFF; + + netdev_dbg(adapter->netdev, "Set brightness for LED %d to mode %u!\n", + ldev->index, mode); + + igc_led_set(adapter, ldev->index, mode, false); + + return 0; +} + +static int igc_led_hw_control_is_supported(struct led_classdev *led_cdev, + unsigned long flags) +{ + if (flags & ~IGC_SUPPORTED_MODES) + return -EOPNOTSUPP; + + /* If Tx and Rx selected, activity can be offloaded unless some other + * mode is selected as well. + */ + if ((flags & BIT(TRIGGER_NETDEV_TX)) && + (flags & BIT(TRIGGER_NETDEV_RX)) && + !(flags & ~IGC_ACTIVITY_MODES)) + return 0; + + /* Single Rx or Tx activity is not supported. */ + if (flags & IGC_ACTIVITY_MODES) + return -EOPNOTSUPP; + + /* Only one mode can be active at a given time. */ + if (flags & (flags - 1)) + return -EOPNOTSUPP; + + return 0; +} + +static int igc_led_hw_control_set(struct led_classdev *led_cdev, + unsigned long flags) +{ + struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev); + struct igc_adapter *adapter = netdev_priv(ldev->netdev); + u32 mode = IGC_LEDCTL_MODE_OFF; + bool blink = false; + + if (flags & BIT(TRIGGER_NETDEV_LINK_10)) + mode = IGC_LEDCTL_MODE_LINK_10; + if (flags & BIT(TRIGGER_NETDEV_LINK_100)) + mode = IGC_LEDCTL_MODE_LINK_100; + if (flags & BIT(TRIGGER_NETDEV_LINK_1000)) + mode = IGC_LEDCTL_MODE_LINK_1000; + if (flags & BIT(TRIGGER_NETDEV_LINK_2500)) + mode = IGC_LEDCTL_MODE_LINK_2500; + if ((flags & BIT(TRIGGER_NETDEV_TX)) && + (flags & BIT(TRIGGER_NETDEV_RX))) + mode = IGC_LEDCTL_MODE_ACTIVITY; + + netdev_dbg(adapter->netdev, "Set HW control for LED %d to mode %u!\n", + ldev->index, mode); + + /* blink is recommended for activity */ + if (mode == IGC_LEDCTL_MODE_ACTIVITY) + blink = true; + + igc_led_set(adapter, ldev->index, mode, blink); + + return 0; +} + +static int igc_led_hw_control_get(struct led_classdev *led_cdev, + unsigned long *flags) +{ + struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev); + struct igc_adapter *adapter = netdev_priv(ldev->netdev); + u32 mode; + + mode = igc_led_get(adapter, ldev->index); + + switch (mode) { + case IGC_LEDCTL_MODE_ACTIVITY: + *flags = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + break; + case IGC_LEDCTL_MODE_LINK_10: + *flags = BIT(TRIGGER_NETDEV_LINK_10); + break; + case IGC_LEDCTL_MODE_LINK_100: + *flags = BIT(TRIGGER_NETDEV_LINK_100); + break; + case IGC_LEDCTL_MODE_LINK_1000: + *flags = BIT(TRIGGER_NETDEV_LINK_1000); + break; + case IGC_LEDCTL_MODE_LINK_2500: + *flags = BIT(TRIGGER_NETDEV_LINK_2500); + break; + } + + return 0; +} + +static struct device *igc_led_hw_control_get_device(struct led_classdev *led_cdev) +{ + struct igc_led_classdev *ldev = lcdev_to_igc_ldev(led_cdev); + + return &ldev->netdev->dev; +} + +static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf, + size_t buf_len) +{ + snprintf(buf, buf_len, "igc-%x%x-led%d", + pci_domain_nr(adapter->pdev->bus), + pci_dev_id(adapter->pdev), index); +} + +static void igc_setup_ldev(struct igc_led_classdev *ldev, + struct net_device *netdev, int index) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct led_classdev *led_cdev = &ldev->led; + char led_name[LED_MAX_NAME_SIZE]; + + ldev->netdev = netdev; + ldev->index = index; + + igc_led_get_name(adapter, index, led_name, LED_MAX_NAME_SIZE); + led_cdev->name = led_name; + led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN; + led_cdev->max_brightness = 1; + led_cdev->brightness_set_blocking = igc_led_brightness_set_blocking; + led_cdev->hw_control_trigger = "netdev"; + led_cdev->hw_control_is_supported = igc_led_hw_control_is_supported; + led_cdev->hw_control_set = igc_led_hw_control_set; + led_cdev->hw_control_get = igc_led_hw_control_get; + led_cdev->hw_control_get_device = igc_led_hw_control_get_device; + + devm_led_classdev_register(&netdev->dev, led_cdev); +} + +int igc_led_setup(struct igc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct device *dev = &netdev->dev; + struct igc_led_classdev *leds; + int i; + + mutex_init(&adapter->led_mutex); + + leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL); + if (!leds) + return -ENOMEM; + + for (i = 0; i < IGC_NUM_LEDS; i++) + igc_setup_ldev(leds + i, netdev, i); + + return 0; +} diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index ba8d3fe186ae..3af52d238f3b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3385,7 +3385,7 @@ static int igc_flex_filter_select(struct igc_adapter *adapter, u32 fhftsl; if (input->index >= MAX_FLEX_FILTER) { - dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n"); return -EINVAL; } @@ -3420,7 +3420,6 @@ static int igc_flex_filter_select(struct igc_adapter *adapter, static int igc_write_flex_filter_ll(struct igc_adapter *adapter, struct igc_flex_filter *input) { - struct device *dev = &adapter->pdev->dev; struct igc_hw *hw = &adapter->hw; u8 *data = input->data; u8 *mask = input->mask; @@ -3434,7 +3433,7 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter, * out early to avoid surprises later. */ if (input->length % 8 != 0) { - dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n"); return -EINVAL; } @@ -3504,8 +3503,8 @@ static int igc_write_flex_filter_ll(struct igc_adapter *adapter, } wr32(IGC_WUFC, wufc); - dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", - input->index); + netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n", + input->index); return 0; } @@ -3577,9 +3576,9 @@ static bool igc_flex_filter_in_use(struct igc_adapter *adapter) static int igc_add_flex_filter(struct igc_adapter *adapter, struct igc_nfc_rule *rule) { - struct igc_flex_filter flex = { }; struct igc_nfc_filter *filter = &rule->filter; unsigned int eth_offset, user_offset; + struct igc_flex_filter flex = { }; int ret, index; bool vlan; @@ -3615,10 +3614,12 @@ static int igc_add_flex_filter(struct igc_adapter *adapter, ETH_ALEN, NULL); /* Add VLAN etype */ - if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) - igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, - sizeof(filter->vlan_etype), - NULL); + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { + __be16 vlan_etype = cpu_to_be16(filter->vlan_etype); + + igc_flex_filter_add_field(&flex, &vlan_etype, 12, + sizeof(vlan_etype), NULL); + } /* Add VLAN TCI */ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) @@ -6977,6 +6978,12 @@ static int igc_probe(struct pci_dev *pdev, pm_runtime_put_noidle(&pdev->dev); + if (IS_ENABLED(CONFIG_IGC_LEDS)) { + err = igc_led_setup(adapter); + if (err) + goto err_register; + } + return 0; err_register: diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 7cd8716d2ffa..861f37076861 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -130,11 +130,7 @@ void igc_power_down_phy_copper(struct igc_hw *hw) /* The PHY will retain its settings across a power down/up cycle */ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; - - /* Temporary workaround - should be removed when PHY will implement - * IEEE registers as properly - */ - /* hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);*/ + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); usleep_range(1000, 2000); } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index d38c87d7e5e8..e5b893fc5b66 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -12,6 +12,7 @@ #define IGC_MDIC 0x00020 /* MDI Control - RW */ #define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ #define IGC_VET 0x00038 /* VLAN Ether Type - RW */ +#define IGC_LEDCTL 0x00E00 /* LED Control - RW */ #define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */ #define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index b6f0376e42f4..559b443c409f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -949,19 +949,19 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); void ixgbe_write_eitr(struct ixgbe_q_vector *); int ixgbe_poll(struct napi_struct *napi, int budget); int ethtool_ioctl(struct ifreq *ifr); -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue); -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input_mask); -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id, u8 queue); -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, @@ -1059,7 +1059,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); void ixgbe_store_key(struct ixgbe_adapter *adapter); void ixgbe_store_reta(struct ixgbe_adapter *adapter); -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); #ifdef CONFIG_IXGBE_IPSEC void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 6835d5f18753..283a23150a4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -15,10 +15,10 @@ #define IXGBE_82598_VFT_TBL_SIZE 128 #define IXGBE_82598_RX_PB_SIZE 512 -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); /** @@ -66,7 +66,7 @@ out: IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); } -static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -93,12 +93,12 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) * not known. Perform the SFP init if necessary. * **/ -static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +static int ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; u16 list_offset, data_offset; + int ret_val; /* Identify the PHY */ phy->ops.identify(hw); @@ -148,9 +148,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) * Then set pcie completion timeout * **/ -static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +static int ixgbe_start_hw_82598(struct ixgbe_hw *hw) { - s32 ret_val; + int ret_val; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) @@ -170,7 +170,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) * * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, +static int ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { @@ -271,7 +271,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) * * Enable flow control according to the current settings. **/ -static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) +static int ixgbe_fc_enable_82598(struct ixgbe_hw *hw) { u32 fctrl_reg; u32 rmcs_reg; @@ -411,13 +411,13 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, +static int ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { + int status = 0; u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -457,7 +457,7 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, * Function indicates success when phy link is available. If phy is not ready * within 5 seconds of MAC indicating link, the function returns error. **/ -static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +static int ixgbe_validate_link_ready(struct ixgbe_hw *hw) { u32 timeout; u16 an_reg; @@ -493,7 +493,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, +static int ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { @@ -579,7 +579,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, * * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, +static int ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { @@ -624,11 +624,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, * * Sets the link speed in the AUTOC register in the MAC and restarts link. **/ -static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +static int ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - s32 status; + int status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, @@ -647,15 +647,15 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, * clears all interrupts, performing a PHY reset, and performing a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +static int ixgbe_reset_hw_82598(struct ixgbe_hw *hw) { - s32 status; - s32 phy_status = 0; - u32 ctrl; + int phy_status = 0; + u8 analog_val; u32 gheccr; - u32 i; + int status; u32 autoc; - u8 analog_val; + u32 ctrl; + u32 i; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -781,7 +781,7 @@ mac_reset_top: * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq set index **/ -static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +static int ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -805,7 +805,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq clear index (not used in 82598, but elsewhere) **/ -static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +static int ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -836,7 +836,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * * Turn on/off specified VLAN in the VLAN filter table. **/ -static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, +static int ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regindex; @@ -881,7 +881,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, * * Clears the VLAN filter table, and the VMDq index associated with the filter **/ -static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +static int ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) { u32 offset; u32 vlanbyte; @@ -905,7 +905,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) * * Performs read operation to Atlas analog register specified. **/ -static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +static int ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 atlas_ctl; @@ -927,7 +927,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) * * Performs write operation to Atlas analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +static int ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 atlas_ctl; @@ -948,13 +948,13 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) * * Performs 8 byte read operation to SFP module's data over I2C interface. **/ -static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, +static int ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, u8 byte_offset, u8 *eeprom_data) { - s32 status = 0; u16 sfp_addr = 0; u16 sfp_data = 0; u16 sfp_stat = 0; + int status = 0; u16 gssr; u32 i; @@ -1019,7 +1019,7 @@ out: * * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. **/ -static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, @@ -1034,8 +1034,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, * * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C **/ -static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, - u8 *sff8472_data) +static int ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *sff8472_data) { return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, byte_offset, sff8472_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 339e106a5732..e0c300fe5cee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -21,24 +21,24 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, +static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete); -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw) @@ -98,10 +98,10 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) } } -static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { - s32 ret_val; u16 list_offset, data_offset, data_value; + int ret_val; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); @@ -173,10 +173,10 @@ setup_sfp_err: * prot_autoc_write_82599(). Note, that locked can only be true in cases * where this function doesn't return an error. **/ -static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, +static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { - s32 ret_val; + int ret_val; *locked = false; /* If LESM is on then we need to hold the SW/FW semaphore. */ @@ -203,9 +203,9 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, * This part (82599) may need to hold a the SW/FW lock around all writes to * AUTOC. Likewise after a write we need to do a pipeline reset. **/ -static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) +static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) { - s32 ret_val = 0; + int ret_val = 0; /* Blocked by MNG FW so bail */ if (ixgbe_check_reset_blocked(hw)) @@ -237,7 +237,7 @@ out: return ret_val; } -static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; @@ -263,11 +263,11 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) * not known. Perform the SFP init if necessary. * **/ -static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; + int ret_val; u32 esdp; if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { @@ -322,7 +322,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) * * Determines the link capabilities by reading the AUTOC register. **/ -static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, +static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { @@ -500,14 +500,14 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ -static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, - bool autoneg_wait_to_complete) +static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) { + bool got_lock = false; + int status = 0; u32 autoc_reg; u32 links_reg; u32 i; - s32 status = 0; - bool got_lock = false; if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { status = hw->mac.ops.acquire_swfw_sync(hw, @@ -657,15 +657,15 @@ ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) * * Implements the Intel SmartSpeed algorithm. **/ -static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, - ixgbe_link_speed speed, - bool autoneg_wait_to_complete) +static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) { - s32 status = 0; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 i, j; - bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + bool link_up = false; + int status = 0; + s32 i, j; /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; @@ -767,16 +767,15 @@ out: * * Set the link speed in the AUTOC register and restarts link. **/ -static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, +static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - bool autoneg = false; - s32 status; - u32 pma_pmd_1g, link_mode, links_reg, i; - u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); - u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i; + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + bool autoneg = false; + int status; /* holds the value of AUTOC register at this current point in time */ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -785,6 +784,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, /* temporary variable used for comparison purposes */ u32 autoc = current_autoc; + pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); @@ -882,11 +883,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, * * Restarts link on PHY and MAC based on settings passed in. **/ -static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, +static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - s32 status; + int status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, @@ -905,13 +906,13 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; - s32 status; u32 ctrl, i, autoc, autoc2; - u32 curr_lms; bool link_up = false; + u32 curr_lms; + int status; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -1081,7 +1082,7 @@ mac_reset_top: * @hw: pointer to hardware structure * @fdircmd: current value of FDIRCMD register */ -static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) +static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) { int i; @@ -1099,12 +1100,12 @@ static s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ -s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { - int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); u32 fdircmd; - s32 err; + int err; + int i; fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; @@ -1212,7 +1213,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ -s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: @@ -1236,7 +1237,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ -s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: @@ -1359,7 +1360,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, * Note that the tunnel bit in input must not be set when the hardware * tunneling support does not exist. **/ -s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) @@ -1515,7 +1516,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) #define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value))) -s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input_mask) { /* mask IPv6 since it is currently not supported */ @@ -1627,12 +1628,12 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, return 0; } -s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id, u8 queue) { u32 fdirport, fdirvlan, fdirhash, fdircmd; - s32 err; + int err; /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), @@ -1690,13 +1691,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, return 0; } -s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id) { u32 fdirhash; u32 fdircmd; - s32 err; + int err; /* configure FDIRHASH register */ fdirhash = (__force u32)input->formatted.bkt_hash; @@ -1734,7 +1735,7 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, * * Performs read operation to Omer analog register specified. **/ -static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; @@ -1756,7 +1757,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) * * Performs write operation to Omer analog register specified. **/ -static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; @@ -1776,9 +1777,9 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +static int ixgbe_start_hw_82599(struct ixgbe_hw *hw) { - s32 ret_val = 0; + int ret_val = 0; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) @@ -1802,9 +1803,9 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) * If PHY already detected, maintains current PHY type in hw struct, * otherwise executes the PHY detection routine. **/ -static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { - s32 status; + int status; /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); @@ -1835,7 +1836,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) * * Enables the Rx DMA unit for 82599 **/ -static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { /* * Workaround for 82599 silicon errata when enabling the Rx datapath. @@ -1865,12 +1866,12 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) * Return: -EACCES if the FW is not present or if the FW version is * not supported. **/ -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { u16 fw_offset, fw_ptp_cfg_offset; - s32 status = -EACCES; - u16 offset; + int status = -EACCES; u16 fw_version = 0; + u16 offset; /* firmware check is only necessary for SFI devices */ if (hw->phy.media_type != ixgbe_media_type_fiber) @@ -1917,7 +1918,7 @@ fw_version_err: static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; - s32 status; + int status; /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); @@ -1956,7 +1957,7 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) * * Retrieves 16 bit word(s) read from EEPROM **/ -static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, +static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; @@ -1982,7 +1983,7 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, * * Reads a 16 bit word from the EEPROM **/ -static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, +static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; @@ -2006,11 +2007,11 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, * full pipeline reset. Note - We must hold the SW/FW semaphore before writing * to AUTOC, so this function assumes the semaphore is held. **/ -static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) +static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) { - s32 ret_val; - u32 anlp1_reg = 0; u32 i, autoc_reg, autoc2_reg; + u32 anlp1_reg = 0; + int ret_val; /* Enable link if disabled in NVM */ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); @@ -2061,12 +2062,12 @@ reset_pipeline_out: * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { - u32 esdp; - s32 status; s32 timeout = 200; + int status; + u32 esdp; if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ @@ -2115,12 +2116,12 @@ release_i2c_access: * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. **/ -static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { - u32 esdp; - s32 status; s32 timeout = 200; + int status; + u32 esdp; if (hw->phy.qsfp_shared_i2c_bus == true) { /* Acquire I2C bus ownership. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 2e6e0365154a..3be1bfb16498 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -10,10 +10,10 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static int ixgbe_ready_eeprom(struct ixgbe_hw *hw); static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u16 count); @@ -22,15 +22,15 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); static void ixgbe_release_eeprom(struct ixgbe_hw *hw); -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, - u16 words, u16 *data); -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); -static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); +static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw); /* Base table for registers values that change by MAC */ const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { @@ -111,12 +111,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) * * Called at init time to set up flow control. **/ -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) +int ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { - s32 ret_val = 0; u32 reg = 0, reg_bp = 0; - u16 reg_cu = 0; bool locked = false; + int ret_val = 0; + u16 reg_cu = 0; /* * Validate the requested mode. Strict IEEE mode does not allow @@ -267,11 +267,11 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) * table, VLAN filter table, calls routine to set up link and flow control * settings, and leaves transmit and receive units disabled and uninitialized **/ -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +int ixgbe_start_hw_generic(struct ixgbe_hw *hw) { - s32 ret_val; - u32 ctrl_ext; u16 device_caps; + u32 ctrl_ext; + int ret_val; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); @@ -330,7 +330,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) * 82599 * X540 **/ -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +int ixgbe_start_hw_gen2(struct ixgbe_hw *hw) { u32 i; @@ -354,9 +354,9 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) * up link and flow control settings, and leaves transmit and receive units * disabled and uninitialized **/ -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +int ixgbe_init_hw_generic(struct ixgbe_hw *hw) { - s32 status; + int status; /* Reset the hardware */ status = hw->mac.ops.reset_hw(hw); @@ -380,7 +380,7 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) * Clears all hardware statistics counters by reading them from the hardware * Statistics counters are clear on read. **/ -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) { u16 i = 0; @@ -489,14 +489,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) * * Reads the part number string from the EEPROM. **/ -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, +int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) { - s32 ret_val; - u16 data; + int ret_val; u16 pba_ptr; u16 offset; u16 length; + u16 data; if (pba_num == NULL) { hw_dbg(hw, "PBA string buffer was null\n"); @@ -599,7 +599,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, * A reset of the adapter must be performed prior to calling this function * in order for the MAC address to have been loaded from the EEPROM into RAR0 **/ -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) { u32 rar_high; u32 rar_low; @@ -653,7 +653,7 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) * * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure **/ -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) { u16 link_status; @@ -709,7 +709,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) * the shared code and drivers to determine if the adapter is in a stopped * state and should not touch the hardware. **/ -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) { u32 reg_val; u16 i; @@ -759,7 +759,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) * Store the index for the link active LED. This will be used to support * blinking the LED. **/ -s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) +int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; u32 led_reg, led_mode; @@ -800,7 +800,7 @@ s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @index: led number to turn on **/ -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -821,7 +821,7 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) * @hw: pointer to hardware structure * @index: led number to turn off **/ -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); @@ -844,7 +844,7 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; u32 eec; @@ -895,11 +895,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; u16 i, count; + int status; hw->eeprom.ops.init_params(hw); @@ -942,14 +942,14 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ -static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +static int ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; - u16 word; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; u16 page_size; + int status; + u16 word; u16 i; - u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; /* Prepare the EEPROM for writing */ status = ixgbe_acquire_eeprom(hw); @@ -1019,7 +1019,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, * If ixgbe_eeprom_update_checksum is not called after this function, the * EEPROM will most likely contain an invalid checksum. **/ -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { hw->eeprom.ops.init_params(hw); @@ -1038,11 +1038,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; u16 i, count; + int status; hw->eeprom.ops.init_params(hw); @@ -1077,12 +1077,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, * * Reads 16 bit word(s) from EEPROM through bit-bang method **/ -static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +static int ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; - u16 word_in; u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 word_in; + int status; u16 i; /* Prepare the EEPROM for reading */ @@ -1129,7 +1129,7 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, * * Reads 16 bit value from EEPROM through bit-bang method **/ -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { hw->eeprom.ops.init_params(hw); @@ -1149,11 +1149,11 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { + int status; u32 eerd; - s32 status; u32 i; hw->eeprom.ops.init_params(hw); @@ -1189,11 +1189,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, * This function is called only when we are writing a new large buffer * at given offset so the data would be overwritten anyway. **/ -static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, +static int ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset) { u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; - s32 status; + int status; u16 i; for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) @@ -1229,7 +1229,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, * * Reads a 16 bit word from the EEPROM using the EERD register. **/ -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) { return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); } @@ -1243,11 +1243,11 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { + int status; u32 eewr; - s32 status; u16 i; hw->eeprom.ops.init_params(hw); @@ -1286,7 +1286,7 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, * * Write a 16 bit word to the EEPROM using the EEWR register. **/ -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) { return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); } @@ -1299,7 +1299,7 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) * Polls the status bit (bit 1) of the EERD or EEWR to determine when the * read or write is done respectively. **/ -static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +static int ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) { u32 i; u32 reg; @@ -1325,7 +1325,7 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) * Prepares EEPROM for access using bit-bang method. This function should * be called before issuing a command to the EEPROM. **/ -static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +static int ixgbe_acquire_eeprom(struct ixgbe_hw *hw) { u32 eec; u32 i; @@ -1371,7 +1371,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) * * Sets the hardware semaphores so EEPROM access can occur for bit-bang method **/ -static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +static int ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) { u32 timeout = 2000; u32 i; @@ -1462,7 +1462,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) * ixgbe_ready_eeprom - Polls for EEPROM ready * @hw: pointer to hardware structure **/ -static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +static int ixgbe_ready_eeprom(struct ixgbe_hw *hw) { u16 i; u8 spi_stat_reg; @@ -1680,7 +1680,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum * @hw: pointer to hardware structure **/ -s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -1728,7 +1728,7 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return (s32)checksum; + return (int)checksum; } /** @@ -1739,12 +1739,12 @@ s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, +int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val) { - s32 status; - u16 checksum; u16 read_checksum = 0; + u16 checksum; + int status; /* * Read the first word from the EEPROM. If this times out or fails, do @@ -1786,10 +1786,10 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum * @hw: pointer to hardware structure **/ -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) { - s32 status; u16 checksum; + int status; /* * Read the first word from the EEPROM. If this times out or fails, do @@ -1823,7 +1823,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) * * Puts an ethernet address into a receive address register. **/ -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr) { u32 rar_low, rar_high; @@ -1876,7 +1876,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, * * Clears an ethernet address from a receive address register. **/ -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) { u32 rar_high; u32 rar_entries = hw->mac.num_rar_entries; @@ -1917,7 +1917,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) * of the receive address registers. Clears the multicast table. Assumes * the receiver is in reset when the routine is called. **/ -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) { u32 i; u32 rar_entries = hw->mac.num_rar_entries; @@ -1980,7 +1980,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) * by the MO field of the MCSTCTRL. The MO field is set during initialization * to mc_filter_type. **/ -static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +static int ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) { u32 vector = 0; @@ -2049,7 +2049,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) * registers for the first multicast addresses, and hashes the rest into the * multicast table. **/ -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev) { struct netdev_hw_addr *ha; @@ -2091,7 +2091,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, * * Enables multicast address in RAR and the use of the multicast hash table. **/ -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +int ixgbe_enable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; @@ -2108,7 +2108,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) * * Disables multicast address in RAR and the use of the multicast hash table. **/ -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +int ixgbe_disable_mc_generic(struct ixgbe_hw *hw) { struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; @@ -2124,7 +2124,7 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) * * Enable flow control according to the current settings. **/ -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) +int ixgbe_fc_enable_generic(struct ixgbe_hw *hw) { u32 mflcn_reg, fccfg_reg; u32 reg; @@ -2252,7 +2252,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) * Find the intersection between advertised settings and link partner's * advertised settings **/ -s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +int ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) @@ -2294,10 +2294,10 @@ s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, * * Enable flow control according on 1 gig fiber. **/ -static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +static int ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) { u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; - s32 ret_val; + int ret_val; /* * On multispeed fiber at 1g, bail out if @@ -2328,10 +2328,10 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) * * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +static int ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) { u32 links2, anlp1_reg, autoc_reg, links; - s32 ret_val; + int ret_val; /* * On backplane, bail out if @@ -2367,7 +2367,7 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) * * Enable flow control according to IEEE clause 37. **/ -static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +static int ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) { u16 technology_ability_reg = 0; u16 lp_technology_ability_reg = 0; @@ -2395,7 +2395,7 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) void ixgbe_fc_autoneg(struct ixgbe_hw *hw) { ixgbe_link_speed speed; - s32 ret_val = -EIO; + int ret_val = -EIO; bool link_up; /* @@ -2501,7 +2501,7 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) * bit hasn't caused the primary requests to be disabled, else 0 * is returned signifying primary requests disabled. **/ -static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) +static int ixgbe_disable_pcie_primary(struct ixgbe_hw *hw) { u32 i, poll; u16 value; @@ -2573,7 +2573,7 @@ gio_disable_fail: * Acquires the SWFW semaphore through the GSSR register for the specified * function (CSR, PHY0, PHY1, EEPROM, Flash) **/ -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) +int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) { u32 gssr = 0; u32 swmask = mask; @@ -2641,7 +2641,7 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) * * The default case requires no protection so just to the register read. **/ -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) +int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) { *locked = false; *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); @@ -2655,7 +2655,7 @@ s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) * @locked: bool to indicate whether the SW/FW lock was already taken by * previous read. **/ -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) { IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); return 0; @@ -2668,7 +2668,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) * Stops the receive data path and waits for the HW to internally * empty the Rx security block. **/ -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) { #define IXGBE_MAX_SECRX_POLL 40 int i; @@ -2700,7 +2700,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) * * Enables the receive data path **/ -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) +int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) { u32 secrxreg; @@ -2719,7 +2719,7 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) * * Enables the Rx DMA unit **/ -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) { if (regval & IXGBE_RXCTRL_RXEN) hw->mac.ops.enable_rx(hw); @@ -2734,14 +2734,14 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) * @hw: pointer to hardware structure * @index: led number to blink **/ -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) { - ixgbe_link_speed speed = 0; - bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ixgbe_link_speed speed = 0; + bool link_up = false; bool locked = false; - s32 ret_val; + int ret_val; if (index > 3) return -EINVAL; @@ -2782,12 +2782,12 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) * @hw: pointer to hardware structure * @index: led number to stop blinking **/ -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) { - u32 autoc_reg = 0; u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); bool locked = false; - s32 ret_val; + u32 autoc_reg = 0; + int ret_val; if (index > 3) return -EINVAL; @@ -2821,10 +2821,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) * pointer, and returns the value at that location. This is used in both * get and set mac_addr routines. **/ -static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, - u16 *san_mac_offset) +static int ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) { - s32 ret_val; + int ret_val; /* * First read the EEPROM pointer to see if the MAC addresses are @@ -2849,11 +2849,11 @@ static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, * set_lan_id() is called by identify_sfp(), but this cannot be relied * upon for non-SFP connections, so we must call it here. **/ -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) { u16 san_mac_data, san_mac_offset; + int ret_val; u8 i; - s32 ret_val; /* * First read the EEPROM pointer to see if the MAC addresses are @@ -2942,7 +2942,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) * @rar: receive address register index to disassociate * @vmdq: VMDq pool index to remove from the rar **/ -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar_lo, mpsar_hi; u32 rar_entries = hw->mac.num_rar_entries; @@ -2993,7 +2993,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @rar: receive address register index to associate with a VMDq index * @vmdq: VMDq pool index **/ -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) { u32 mpsar; u32 rar_entries = hw->mac.num_rar_entries; @@ -3026,7 +3026,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * VFs advertized and not 0. * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] **/ -s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) { u32 rar = hw->mac.san_mac_rar_index; @@ -3045,7 +3045,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array * @hw: pointer to hardware structure **/ -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) { int i; @@ -3065,9 +3065,9 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) +static int ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { - s32 regindex, first_empty_slot; + int regindex, first_empty_slot; u32 bits; /* short cut the special case */ @@ -3115,11 +3115,11 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) * * Turn on/off specified VLAN in the VLAN filter table. **/ -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, +int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass) { u32 regidx, vfta_delta, vfta, bits; - s32 vlvf_index; + int vlvf_index; if ((vlan > 4095) || (vind > 63)) return -EINVAL; @@ -3226,7 +3226,7 @@ vfta_update: * * Clears the VLAN filter table, and the VMDq index associated with the filter **/ -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) { u32 offset; @@ -3276,7 +3276,7 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) * * Reads the links register to determine if link is up and the current speed **/ -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, +int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) { bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw); @@ -3396,8 +3396,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * This function will read the EEPROM from the alternative SAN MAC address * block to check the support for the alternative WWNN/WWPN prefix support. **/ -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, - u16 *wwpn_prefix) +int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) { u16 offset, caps; u16 alt_san_mac_blk_offset; @@ -3494,7 +3494,7 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) * This function will read the EEPROM location for the device capabilities, * and return the word through device_caps. **/ -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) { hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); @@ -3604,7 +3604,7 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held * by the caller. **/ -s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, +int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout) { u32 hicr, i, fwsts; @@ -3676,15 +3676,15 @@ s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, * Communicates with the manageability block. On success return 0 * else return -EIO or -EINVAL. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, +int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct ixgbe_hic_hdr); struct ixgbe_hic_hdr *hdr = buffer; - u32 *u32arr = buffer; u16 buf_len, dword_len; - s32 status; + u32 *u32arr = buffer; + int status; u32 bi; if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { @@ -3753,13 +3753,13 @@ rel_out: * else returns -EBUSY when encountering an error acquiring * semaphore or -EIO when command fails. **/ -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, __always_unused u16 len, __always_unused const char *driver_ver) { struct ixgbe_hic_drv_info fw_cmd; + int ret_val; int i; - s32 ret_val; fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; @@ -3875,10 +3875,10 @@ static const u8 ixgbe_emc_therm_limit[4] = { * * Returns error code. **/ -static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, +static int ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, u16 *ets_offset) { - s32 status; + int status; status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); if (status) @@ -3903,13 +3903,13 @@ static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, * * Returns the thermal sensor data structure **/ -s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) { - s32 status; u16 ets_offset; - u16 ets_cfg; u16 ets_sensor; u8 num_sensors; + u16 ets_cfg; + int status; u8 i; struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; @@ -3959,17 +3959,17 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) * Inits the thermal sensor thresholds according to the NVM map * and save off the threshold and location values into mac.thermal_sensor_data **/ -s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) +int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) { - s32 status; - u16 ets_offset; - u16 ets_cfg; - u16 ets_sensor; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; u8 low_thresh_delta; u8 num_sensors; u8 therm_limit; + u16 ets_sensor; + u16 ets_offset; + u16 ets_cfg; + int status; u8 i; - struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); @@ -4192,16 +4192,16 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) * * Set the link speed in the MAC and/or PHY register and restarts link. */ -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { - ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; - s32 status = 0; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + bool autoneg, link_up = false; u32 speedcnt = 0; + int status = 0; u32 i = 0; - bool autoneg, link_up = false; /* Mask off requested but non-supported speeds */ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &autoneg); @@ -4340,8 +4340,8 @@ out: void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) { - s32 status; u8 rs, eeprom_data; + int status; switch (speed) { case IXGBE_LINK_SPEED_10GB_FULL: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 34761e691d52..6493abf189de 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -8,89 +8,89 @@ #include "ixgbe.h" u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); -s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, +int ixgbe_init_hw_generic(struct ixgbe_hw *hw); +int ixgbe_start_hw_generic(struct ixgbe_hw *hw); +int ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +int ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +int ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); -s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +int ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); -s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +int ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); -s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); +int ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); -s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); +int ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +int ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); +int ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); -s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +int ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +int ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +int ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); -s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +int ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); -s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, +int ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data); -s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, +int ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +int ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, u16 *checksum_val); -s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +int ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, +int ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, u32 enable_addr); -s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); -s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +int ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +int ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +int ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, struct net_device *netdev); -s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); -s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); -s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); -s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); +int ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +int ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +int ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); +int ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); +int ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); +int ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +int ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); -s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); +int ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); -s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); -s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); -s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); -s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, +int ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +int ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +int ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); +int ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +int ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +int ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, bool vlvf_bypass); -s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); -s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, +int ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); +int ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete); -s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, +int ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); -s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); -s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); +int prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +int prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); -s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +int ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +int ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); -s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); -s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +int ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +int ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver, u16 len, const char *str); u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, +int ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, u32 timeout, bool return_data); -s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, +int ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout); +int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); @@ -111,8 +111,8 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; #define IXGBE_EMC_DIODE3_DATA 0x2A #define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30 -s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); -s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +int ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); +int ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, @@ -121,7 +121,7 @@ void ixgbe_get_orom_version(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +int ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index d26cea5b43bd..502666f28124 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -18,7 +18,7 @@ * @max: max credits by traffic class * @max_frame: maximum frame size */ -static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, +static int ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) { int min_percent = 100; @@ -59,7 +59,7 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, * It should be called only after the rules are checked by * ixgbe_dcb_check_config(). */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, +int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config, int max_frame, u8 direction) { @@ -247,7 +247,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, +int ixgbe_dcb_hw_config(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { u8 pfc_en; @@ -283,7 +283,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, } /* Helper routines to abstract HW specifics from DCB netlink ops */ -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -300,7 +300,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) return -EINVAL; } -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) +int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) { __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; @@ -333,7 +333,7 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) bwg_id, prio_type, ets->prio_tc); } -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, +int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 60cd5863bf5e..91788e4c4e19 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -124,15 +124,15 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); /* DCB credits calculation */ -s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, +int ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, struct ixgbe_dcb_config *, int, u8); /* DCB hw initialization */ -s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, +int ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); +int ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *tc_prio); -s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); -s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +int ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); +int ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index 379ae747cdce..185c3e5f9837 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -15,10 +15,8 @@ * * Configure Rx Data Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type) +int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *prio_type) { u32 reg = 0; u32 credit_refill = 0; @@ -75,11 +73,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) +int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg, max_credits; u8 i; @@ -124,11 +119,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, * * Configure Tx Data Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type) +int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type) { u32 reg; u8 i; @@ -171,7 +163,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, * * Configure Priority Flow Control for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) +int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) { u32 fcrtl, reg; u8 i; @@ -224,7 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) +static int ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; @@ -260,7 +252,7 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h index fdca41abb44c..5bf3f13c6953 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h @@ -46,27 +46,19 @@ /* DCB hardware-specific driver APIs */ /* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); +int ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); /* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +int ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *prio_type); + +int ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +int ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +int ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type); #endif /* _DCB_82598_CONFIG_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index 7948849840a5..c61bd9059541 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -17,7 +17,7 @@ * * Configure Rx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, +int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, @@ -76,7 +76,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, * * Configure Tx Descriptor Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, +int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, @@ -128,7 +128,7 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, * * Configure Tx Packet Arbiter and credits for each traffic class. */ -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, +int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, @@ -187,7 +187,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, * * Configure Priority Flow Control (PFC) for each traffic class. */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) +int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) { u32 i, j, fcrtl, reg; u8 max_tc = 0; @@ -272,7 +272,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) * Configure queue statistics registers, all queues belonging to same traffic * class uses a single set of queue statistics counters. */ -static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) +static int ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) { u32 reg = 0; u8 i = 0; @@ -330,7 +330,7 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) * * Configure dcb settings and enable dcb mode. */ -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h index c6f084883cab..f6e5a87c03e3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h @@ -70,30 +70,21 @@ /* DCB hardware-specific driver APIs */ /* DCB PFC functions */ -s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); +int ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); /* DCB hw initialization */ -s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type); - -s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, - u16 *refill, - u16 *max, - u8 *bwg_id, - u8 *prio_type, - u8 *prio_tc); - -s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +int ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); + +int ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type); + +int ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); + +int ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9a63457712c7..633bac1543dd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -459,7 +459,7 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; - s32 err = 0; + int err = 0; if ((hw->phy.media_type == ixgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { @@ -3326,9 +3326,9 @@ static int ixgbe_get_module_info(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status; u8 sff8472_rev, addr_mode; bool page_swap = false; + int status; if (hw->phy.type == ixgbe_phy_fw) return -ENXIO; @@ -3372,7 +3372,7 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - s32 status = -EFAULT; + int status = -EFAULT; u8 databyte = 0xFF; int i = 0; @@ -3403,66 +3403,68 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, static const struct { ixgbe_link_speed mac_speed; - u32 supported; + u32 link_mode; } ixgbe_ls_map[] = { - { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, - { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, - { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, - { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, - { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, + { IXGBE_LINK_SPEED_10_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT }, + { IXGBE_LINK_SPEED_100_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT }, + { IXGBE_LINK_SPEED_1GB_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT }, + { IXGBE_LINK_SPEED_2_5GB_FULL, ETHTOOL_LINK_MODE_2500baseX_Full_BIT }, + { IXGBE_LINK_SPEED_10GB_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT }, }; static const struct { u32 lp_advertised; - u32 mac_speed; + u32 link_mode; } ixgbe_lp_map[] = { - { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, - { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, - { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, - { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, - { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, - { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, + { FW_PHY_ACT_UD_2_100M_TX_EEE, ETHTOOL_LINK_MODE_100baseT_Full_BIT }, + { FW_PHY_ACT_UD_2_1G_T_EEE, ETHTOOL_LINK_MODE_1000baseT_Full_BIT }, + { FW_PHY_ACT_UD_2_10G_T_EEE, ETHTOOL_LINK_MODE_10000baseT_Full_BIT }, + { FW_PHY_ACT_UD_2_1G_KX_EEE, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT }, + { FW_PHY_ACT_UD_2_10G_KX4_EEE, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT }, + { FW_PHY_ACT_UD_2_10G_KR_EEE, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, }; static int -ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) +ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_keee *edata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; struct ixgbe_hw *hw = &adapter->hw; - s32 rc; + int rc; u16 i; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); if (rc) return rc; - edata->lp_advertised = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { if (info[0] & ixgbe_lp_map[i].lp_advertised) - edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; + linkmode_set_bit(ixgbe_lp_map[i].link_mode, + edata->lp_advertised); } - edata->supported = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) - edata->supported |= ixgbe_ls_map[i].supported; + linkmode_set_bit(ixgbe_lp_map[i].link_mode, + edata->supported); } - edata->advertised = 0; for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) - edata->advertised |= ixgbe_ls_map[i].supported; + linkmode_set_bit(ixgbe_lp_map[i].link_mode, + edata->advertised); } - edata->eee_enabled = !!edata->advertised; + edata->eee_enabled = !linkmode_empty(edata->advertised); edata->tx_lpi_enabled = edata->eee_enabled; - if (edata->advertised & edata->lp_advertised) - edata->eee_active = true; + + linkmode_and(common, edata->advertised, edata->lp_advertised); + edata->eee_active = !linkmode_empty(common); return 0; } -static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; @@ -3476,17 +3478,17 @@ static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EOPNOTSUPP; } -static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_keee *edata) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - struct ethtool_eee eee_data; - s32 ret_val; + struct ethtool_keee eee_data; + int ret_val; if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) return -EOPNOTSUPP; - memset(&eee_data, 0, sizeof(struct ethtool_eee)); + memset(&eee_data, 0, sizeof(struct ethtool_keee)); ret_val = ixgbe_get_eee(netdev, &eee_data); if (ret_val) @@ -3504,7 +3506,7 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) return -EINVAL; } - if (eee_data.advertised != edata->advertised) { + if (!linkmode_equal(eee_data.advertised, edata->advertised)) { e_err(drv, "Setting EEE advertised speeds is not supported\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bd541527c8c7..e23c3614fb10 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -205,7 +205,7 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, return 0; } -static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) +static int ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 link_status = 0; @@ -7809,7 +7809,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - s32 err; + int err; /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index fe7ef5773369..d67d77e5dacc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -15,7 +15,7 @@ * * returns SUCCESS if it successfully read message from buffer **/ -s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +int ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; @@ -38,7 +38,7 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) * * returns SUCCESS if it successfully copied message into the buffer **/ -s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +int ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; @@ -58,7 +58,7 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +int ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; @@ -75,7 +75,7 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +int ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; @@ -92,7 +92,7 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if the Status bit was found or else ERR_MBX **/ -s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +int ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; @@ -109,7 +109,7 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message notification **/ -static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +static int ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; @@ -134,7 +134,7 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) * * returns SUCCESS if it successfully received a message acknowledgement **/ -static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +static int ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; @@ -162,11 +162,11 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ -static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, +static int ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; + int ret_val; if (!mbx->ops) return -EIO; @@ -189,11 +189,11 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ -static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, - u16 mbx_id) +static int ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; + int ret_val; /* exit if either we can't write or there isn't a defined timeout */ if (!mbx->ops || !mbx->timeout) @@ -208,7 +208,7 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, return ixgbe_poll_for_ack(hw, mbx_id); } -static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +static int ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) { u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); @@ -227,9 +227,9 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +static int ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) { - s32 index = IXGBE_MBVFICR_INDEX(vf_number); + int index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, @@ -248,9 +248,9 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +static int ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) { - s32 index = IXGBE_MBVFICR_INDEX(vf_number); + int index = IXGBE_MBVFICR_INDEX(vf_number); u32 vf_bit = vf_number % 16; if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, @@ -269,7 +269,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if the VF has set the Status bit or else ERR_MBX **/ -static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +static int ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 reg_offset = (vf_number < 32) ? 0 : 1; u32 vf_shift = vf_number % 32; @@ -305,7 +305,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) * * return SUCCESS if we obtained the mailbox lock **/ -static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +static int ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) { u32 p2v_mailbox; @@ -329,10 +329,10 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) * * returns SUCCESS if it successfully copied message into the buffer **/ -static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +static int ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { - s32 ret_val; + int ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ @@ -368,10 +368,10 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ -static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, +static int ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 vf_number) { - s32 ret_val; + int ret_val; u16 i; /* lock the mailbox to prevent pf/vf race condition */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 6434c190e7a4..bd205306934b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -96,11 +96,11 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ #define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ -s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); -s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); -s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +int ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +int ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +int ixgbe_check_for_msg(struct ixgbe_hw *, u16); +int ixgbe_check_for_ack(struct ixgbe_hw *, u16); +int ixgbe_check_for_rst(struct ixgbe_hw *, u16); #ifdef CONFIG_PCI_IOV void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); #endif /* CONFIG_PCI_IOV */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index f28140a05f09..75e9453331ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -11,19 +11,19 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw); static void ixgbe_i2c_stop(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); +static int ixgbe_get_phy_id(struct ixgbe_hw *hw); +static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); /** * ixgbe_out_i2c_byte_ack - Send I2C byte with ack @@ -32,9 +32,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); * * Returns an error code on error. **/ -static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) +static int ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) { - s32 status; + int status; status = ixgbe_clock_out_i2c_byte(hw, byte); if (status) @@ -49,9 +49,9 @@ static s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) * * Returns an error code on error. **/ -static s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) +static int ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) { - s32 status; + int status; status = ixgbe_clock_in_i2c_byte(hw, byte); if (status) @@ -85,7 +85,7 @@ static u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) * * Returns an error code on error. */ -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; @@ -163,7 +163,7 @@ fail: * * Returns an error code on error. */ -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, +int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val, bool lock) { u32 swfw_mask = hw->phy.phy_semaphore_mask; @@ -260,7 +260,7 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) * * Determines the physical layer module found on the current adapter. **/ -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +int ixgbe_identify_phy_generic(struct ixgbe_hw *hw) { u32 status = -EFAULT; u32 phy_addr; @@ -332,11 +332,11 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * **/ -static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +static int ixgbe_get_phy_id(struct ixgbe_hw *hw) { - s32 status; u16 phy_id_high = 0; u16 phy_id_low = 0; + int status; status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, &phy_id_high); @@ -394,11 +394,11 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) * ixgbe_reset_phy_generic - Performs a PHY reset * @hw: pointer to hardware structure **/ -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +int ixgbe_reset_phy_generic(struct ixgbe_hw *hw) { u32 i; u16 ctrl = 0; - s32 status = 0; + int status = 0; if (hw->phy.type == ixgbe_phy_unknown) status = ixgbe_identify_phy_generic(hw); @@ -470,8 +470,8 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) * * Reads a value from a specified PHY register without the SWFW lock **/ -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, - u16 *phy_data) +int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) { u32 i, data, command; @@ -546,11 +546,11 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, * @device_type: 5 bit device type * @phy_data: Pointer to read data from PHY register **/ -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { - s32 status; u32 gssr = hw->phy.phy_semaphore_mask; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, @@ -571,8 +571,8 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u16 phy_data) +int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) { u32 i, command; @@ -644,11 +644,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, * @device_type: 5 bit device type * @phy_data: Data to write to the PHY register **/ -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { - s32 status; u32 gssr = hw->phy.phy_semaphore_mask; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, @@ -668,7 +668,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, * @hw: pointer to hardware structure * @cmd: command register value to write **/ -static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) +static int ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) { IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd); @@ -684,11 +684,11 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) * @regnum: register number * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, +static int ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr, int regnum, u32 gssr) { u32 hwaddr, cmd; - s32 data; + int data; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; @@ -718,11 +718,11 @@ mii_bus_read_done: * @regnum: register number * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, +static int ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr, int devad, int regnum, u32 gssr) { u32 hwaddr, cmd; - s32 data; + int data; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; @@ -756,11 +756,11 @@ mii_bus_read_done: * @val: value to write * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, +static int ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; - s32 err; + int err; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; @@ -787,12 +787,12 @@ static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr, * @val: value to write * @gssr: semaphore flags to acquire **/ -static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, +static int ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr, int devad, int regnum, u16 val, u32 gssr) { u32 hwaddr, cmd; - s32 err; + int err; if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) return -EBUSY; @@ -821,7 +821,7 @@ mii_bus_write_done: * @addr: address * @regnum: register number **/ -static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) +static int ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; struct ixgbe_hw *hw = &adapter->hw; @@ -837,7 +837,7 @@ static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) * @addr: address * @regnum: register number **/ -static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, +static int ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; @@ -854,7 +854,7 @@ static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr, * @regnum: register number * @val: value to write **/ -static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, +static int ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; @@ -872,7 +872,7 @@ static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, * @regnum: register number * @val: value to write **/ -static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, +static int ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; @@ -889,7 +889,7 @@ static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, * @addr: address * @regnum: register number **/ -static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, +static int ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum) { struct ixgbe_adapter *adapter = bus->priv; @@ -907,7 +907,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr, * @devad: device address to read * @regnum: register number **/ -static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, +static int ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, int devad, int regnum) { struct ixgbe_adapter *adapter = bus->priv; @@ -925,7 +925,7 @@ static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr, * @regnum: register number * @val: value to write **/ -static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, +static int ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; @@ -944,7 +944,7 @@ static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr, * @regnum: register number * @val: value to write **/ -static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, +static int ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad, int regnum, u16 val) { struct ixgbe_adapter *adapter = bus->priv; @@ -1023,13 +1023,13 @@ out: * * ixgbe_mii_bus_init initializes a mii_bus structure in adapter **/ -s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) +int ixgbe_mii_bus_init(struct ixgbe_hw *hw) { - s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); - s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum); - s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, + int (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val); + int (*read_c22)(struct mii_bus *bus, int addr, int regnum); + int (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum, u16 val); - s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum); + int (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum); struct ixgbe_adapter *adapter = hw->back; struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; @@ -1095,12 +1095,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) * * Restart autonegotiation and PHY and waits for completion. **/ -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - s32 status = 0; u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; - bool autoneg = false; ixgbe_link_speed speed; + bool autoneg = false; + int status = 0; ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); @@ -1173,7 +1173,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) * @speed: new link speed * @autoneg_wait_to_complete: unused **/ -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, +int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { @@ -1214,10 +1214,10 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, * Determines the supported link capabilities by reading the PHY auto * negotiation register. */ -static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) +static int ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) { u16 speed_ability; - s32 status; + int status; status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, &speed_ability); @@ -1253,11 +1253,11 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) * @speed: pointer to link speed * @autoneg: boolean auto-negotiation value */ -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, +int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { - s32 status = 0; + int status = 0; *autoneg = true; if (!hw->phy.speeds_supported) @@ -1276,15 +1276,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, * Reads the VS1 register to determine if link is up and the current speed for * the PHY. **/ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, +int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up) { - s32 status; - u32 time_out; u32 max_time_out = 10; - u16 phy_link = 0; u16 phy_speed = 0; + u16 phy_link = 0; u16 phy_data = 0; + u32 time_out; + int status; /* Initialize speed and link to default case */ *link_up = false; @@ -1326,7 +1326,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, * it is called via a function pointer that could call other * functions that could return an error. **/ -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) { u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; bool autoneg = false; @@ -1399,13 +1399,13 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +int ixgbe_reset_phy_nl(struct ixgbe_hw *hw) { u16 phy_offset, control, eword, edata, block_crc; - bool end_data = false; u16 list_offset, data_offset; + bool end_data = false; u16 phy_data = 0; - s32 ret_val; + int ret_val; u32 i; /* Blocked by MNG FW so bail */ @@ -1506,7 +1506,7 @@ err_eeprom: * * Determines HW type and calls appropriate function. **/ -s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) +int ixgbe_identify_module_generic(struct ixgbe_hw *hw) { switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: @@ -1527,19 +1527,19 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) * * Searches for and identifies the SFP module and assigns appropriate PHY type. **/ -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) { + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; struct ixgbe_adapter *adapter = hw->back; - s32 status; + u8 oui_bytes[3] = {0, 0, 0}; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + u16 enforce_sfp = 0; u32 vendor_oui = 0; - enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; - u8 comp_codes_1g = 0; - u8 comp_codes_10g = 0; - u8 oui_bytes[3] = {0, 0, 0}; u8 cable_tech = 0; u8 cable_spec = 0; - u16 enforce_sfp = 0; + int status; if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { hw->phy.sfp_type = ixgbe_sfp_type_not_present; @@ -1792,10 +1792,10 @@ err_read_i2c_eeprom: * * Searches for and identifies the QSFP module and assigns appropriate PHY type **/ -static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) +static int ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) { struct ixgbe_adapter *adapter = hw->back; - s32 status; + int status; u32 vendor_oui = 0; enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; u8 identifier = 0; @@ -1975,7 +1975,7 @@ err_read_i2c_eeprom: * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if * so it returns the offsets to the phy init sequence block. **/ -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, +int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset) { @@ -2065,7 +2065,7 @@ err_phy: * * Performs byte read operation to SFP module's EEPROM over I2C interface. **/ -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, @@ -2081,7 +2081,7 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte read operation to SFP module's SFF-8472 data over I2C **/ -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data) { return hw->phy.ops.read_i2c_byte(hw, byte_offset, @@ -2097,7 +2097,7 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, * * Performs byte write operation to SFP module's EEPROM over I2C interface. **/ -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data) { return hw->phy.ops.write_i2c_byte(hw, byte_offset, @@ -2131,14 +2131,14 @@ static bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data, bool lock) { - s32 status; - u32 max_retry = 10; - u32 retry = 0; u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 max_retry = 10; bool nack = true; + u32 retry = 0; + int status; if (hw->mac.type >= ixgbe_mac_X550) max_retry = 3; @@ -2221,7 +2221,7 @@ fail: * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, @@ -2238,7 +2238,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * Performs byte read operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data) { return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, @@ -2256,13 +2256,13 @@ s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -static s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, +static int ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data, bool lock) { - s32 status; + u32 swfw_mask = hw->phy.phy_semaphore_mask; u32 max_retry = 1; u32 retry = 0; - u32 swfw_mask = hw->phy.phy_semaphore_mask; + int status; if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) return -EBUSY; @@ -2324,7 +2324,7 @@ fail: * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, @@ -2341,7 +2341,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, * Performs byte write operation to SFP module's EEPROM over I2C interface at * a specified device address. */ -s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data) { return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, @@ -2422,10 +2422,10 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) * * Clocks in one byte data via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +static int ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) { - s32 i; bool bit = false; + int i; *data = 0; for (i = 7; i >= 0; i--) { @@ -2443,12 +2443,12 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) * * Clocks out one byte data via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +static int ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) { - s32 status; - s32 i; - u32 i2cctl; bool bit = false; + int status; + u32 i2cctl; + int i; for (i = 7; i >= 0; i--) { bit = (data >> i) & 0x1; @@ -2474,14 +2474,14 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) * * Clocks in/out one bit via I2C data/clock **/ -static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +static int ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { - u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); - s32 status = 0; - u32 i = 0; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); u32 timeout = 10; bool ack = true; + int status = 0; + u32 i = 0; if (data_oe_bit) { i2cctl |= IXGBE_I2C_DATA_OUT(hw); @@ -2525,7 +2525,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) * * Clocks in one bit via I2C data/clock **/ -static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +static int ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); @@ -2559,10 +2559,10 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) * * Clocks out one bit via I2C data/clock **/ -static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +static int ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { - s32 status; u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + int status; status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -2647,7 +2647,7 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) * Sets the I2C data bit * Asserts the I2C data output enable on X550 hardware. **/ -static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +static int ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN(hw); @@ -2769,7 +2769,7 @@ bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @on: true for on, false for off **/ -s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) { u32 status; u16 reg; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index ef72729d7c93..beedcb7bec0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -121,57 +121,57 @@ /* SFP+ SFF-8472 Compliance code */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 -s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw); +int ixgbe_mii_bus_init(struct ixgbe_hw *hw); -s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); -s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +int ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +int ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); -s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data); -s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, +int ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data); -s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); -s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, +int ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +int ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, +int ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg); bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); /* PHY specific */ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, +int ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); -s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +int ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); -s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); -s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); -s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); -s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, +int ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +int ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); +int ixgbe_identify_module_generic(struct ixgbe_hw *hw); +int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, u16 *list_offset, u16 *data_offset); bool ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); -s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); -s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 *data); -s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); -s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, u8 data); -s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); -s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 *sff8472_data); -s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, +int ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); -s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, +int ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val, bool lock); -s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, +int ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, u16 val, bool lock); #endif /* _IXGBE_PHY_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7299a830f6e4..fcfd0a075eee 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -492,7 +492,7 @@ static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf struct net_device *dev = adapter->netdev; int pf_max_frame = dev->mtu + ETH_HLEN; u32 reg_offset, vf_shift, vfre; - s32 err = 0; + int err = 0; #ifdef CONFIG_FCOE if (dev->features & NETIF_F_FCOE_MTU) @@ -775,7 +775,7 @@ static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf) static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, int vf, unsigned char *mac_addr) { - s32 retval; + int retval; ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); @@ -1254,7 +1254,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) u32 mbx_size = IXGBE_VFMAILBOX_SIZE; u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; struct ixgbe_hw *hw = &adapter->hw; - s32 retval; + int retval; retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); @@ -1418,7 +1418,7 @@ void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter) int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - s32 retval; + int retval; if (vf >= adapter->num_vfs) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 61b9774b3d31..d44c58130be2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3393,50 +3393,50 @@ struct ixgbe_hw; /* Function pointer table */ struct ixgbe_eeprom_operations { - s32 (*init_params)(struct ixgbe_hw *); - s32 (*read)(struct ixgbe_hw *, u16, u16 *); - s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*write)(struct ixgbe_hw *, u16, u16); - s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); - s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); - s32 (*update_checksum)(struct ixgbe_hw *); - s32 (*calc_checksum)(struct ixgbe_hw *); + int (*init_params)(struct ixgbe_hw *); + int (*read)(struct ixgbe_hw *, u16, u16 *); + int (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + int (*write)(struct ixgbe_hw *, u16, u16); + int (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + int (*validate_checksum)(struct ixgbe_hw *, u16 *); + int (*update_checksum)(struct ixgbe_hw *); + int (*calc_checksum)(struct ixgbe_hw *); }; struct ixgbe_mac_operations { - s32 (*init_hw)(struct ixgbe_hw *); - s32 (*reset_hw)(struct ixgbe_hw *); - s32 (*start_hw)(struct ixgbe_hw *); - s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + int (*init_hw)(struct ixgbe_hw *); + int (*reset_hw)(struct ixgbe_hw *); + int (*start_hw)(struct ixgbe_hw *); + int (*clear_hw_cntrs)(struct ixgbe_hw *); enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); - s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); - s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); - s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); - s32 (*stop_adapter)(struct ixgbe_hw *); - s32 (*get_bus_info)(struct ixgbe_hw *); + int (*get_mac_addr)(struct ixgbe_hw *, u8 *); + int (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + int (*get_device_caps)(struct ixgbe_hw *, u16 *); + int (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + int (*stop_adapter)(struct ixgbe_hw *); + int (*get_bus_info)(struct ixgbe_hw *); void (*set_lan_id)(struct ixgbe_hw *); - s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); - s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); - s32 (*setup_sfp)(struct ixgbe_hw *); - s32 (*disable_rx_buff)(struct ixgbe_hw *); - s32 (*enable_rx_buff)(struct ixgbe_hw *); - s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); - s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); + int (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + int (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + int (*setup_sfp)(struct ixgbe_hw *); + int (*disable_rx_buff)(struct ixgbe_hw *); + int (*enable_rx_buff)(struct ixgbe_hw *); + int (*enable_rx_dma)(struct ixgbe_hw *, u32); + int (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); void (*init_swfw_sync)(struct ixgbe_hw *); - s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); - s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + int (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + int (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); /* Link */ void (*disable_tx_laser)(struct ixgbe_hw *); void (*enable_tx_laser)(struct ixgbe_hw *); void (*flap_tx_laser)(struct ixgbe_hw *); void (*stop_link_on_d3)(struct ixgbe_hw *); - s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); - s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + int (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + int (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + int (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); @@ -3444,38 +3444,38 @@ struct ixgbe_mac_operations { void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); /* LED */ - s32 (*led_on)(struct ixgbe_hw *, u32); - s32 (*led_off)(struct ixgbe_hw *, u32); - s32 (*blink_led_start)(struct ixgbe_hw *, u32); - s32 (*blink_led_stop)(struct ixgbe_hw *, u32); - s32 (*init_led_link_act)(struct ixgbe_hw *); + int (*led_on)(struct ixgbe_hw *, u32); + int (*led_off)(struct ixgbe_hw *, u32); + int (*blink_led_start)(struct ixgbe_hw *, u32); + int (*blink_led_stop)(struct ixgbe_hw *, u32); + int (*init_led_link_act)(struct ixgbe_hw *); /* RAR, Multicast, VLAN */ - s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); - s32 (*clear_rar)(struct ixgbe_hw *, u32); - s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); - s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); - s32 (*init_rx_addrs)(struct ixgbe_hw *); - s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*enable_mc)(struct ixgbe_hw *); - s32 (*disable_mc)(struct ixgbe_hw *); - s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); - s32 (*init_uta_tables)(struct ixgbe_hw *); + int (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + int (*clear_rar)(struct ixgbe_hw *, u32); + int (*set_vmdq)(struct ixgbe_hw *, u32, u32); + int (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + int (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + int (*init_rx_addrs)(struct ixgbe_hw *); + int (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); + int (*enable_mc)(struct ixgbe_hw *); + int (*disable_mc)(struct ixgbe_hw *); + int (*clear_vfta)(struct ixgbe_hw *); + int (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + int (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); /* Flow Control */ - s32 (*fc_enable)(struct ixgbe_hw *); - s32 (*setup_fc)(struct ixgbe_hw *); + int (*fc_enable)(struct ixgbe_hw *); + int (*setup_fc)(struct ixgbe_hw *); void (*fc_autoneg)(struct ixgbe_hw *); /* Manageability interface */ - s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, + int (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, const char *); - s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); - s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + int (*get_thermal_sensor_data)(struct ixgbe_hw *); + int (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); bool (*fw_recovery_mode)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); @@ -3484,47 +3484,47 @@ struct ixgbe_mac_operations { void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); /* DMA Coalescing */ - s32 (*dmac_config)(struct ixgbe_hw *hw); - s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); - s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); - s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); - s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); + int (*dmac_config)(struct ixgbe_hw *hw); + int (*dmac_update_tcs)(struct ixgbe_hw *hw); + int (*dmac_config_tcs)(struct ixgbe_hw *hw); + int (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + int (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); }; struct ixgbe_phy_operations { - s32 (*identify)(struct ixgbe_hw *); - s32 (*identify_sfp)(struct ixgbe_hw *); - s32 (*init)(struct ixgbe_hw *); - s32 (*reset)(struct ixgbe_hw *); - s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); - s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); - s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); - s32 (*setup_link)(struct ixgbe_hw *); - s32 (*setup_internal_link)(struct ixgbe_hw *); - s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); - s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); - s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); - s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); - s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); - s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); - s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + int (*identify)(struct ixgbe_hw *); + int (*identify_sfp)(struct ixgbe_hw *); + int (*init)(struct ixgbe_hw *); + int (*reset)(struct ixgbe_hw *); + int (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + int (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + int (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + int (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + int (*setup_link)(struct ixgbe_hw *); + int (*setup_internal_link)(struct ixgbe_hw *); + int (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + int (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + int (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + int (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + int (*read_i2c_sff8472)(struct ixgbe_hw *, u8, u8 *); + int (*read_i2c_eeprom)(struct ixgbe_hw *, u8, u8 *); + int (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); bool (*check_overtemp)(struct ixgbe_hw *); - s32 (*set_phy_power)(struct ixgbe_hw *, bool on); - s32 (*enter_lplu)(struct ixgbe_hw *); - s32 (*handle_lasi)(struct ixgbe_hw *hw, bool *); - s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + int (*set_phy_power)(struct ixgbe_hw *, bool on); + int (*enter_lplu)(struct ixgbe_hw *); + int (*handle_lasi)(struct ixgbe_hw *hw, bool *); + int (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, u8 *value); - s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, + int (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, u8 value); }; struct ixgbe_link_operations { - s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); - s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + int (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); + int (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); - s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); - s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, + int (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); + int (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); }; @@ -3602,14 +3602,14 @@ struct ixgbe_phy_info { #include "ixgbe_mbx.h" struct ixgbe_mbx_operations { - s32 (*init_params)(struct ixgbe_hw *hw); - s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct ixgbe_hw *, u16); - s32 (*check_for_ack)(struct ixgbe_hw *, u16); - s32 (*check_for_rst)(struct ixgbe_hw *, u16); + int (*init_params)(struct ixgbe_hw *hw); + int (*read)(struct ixgbe_hw *, u32 *, u16, u16); + int (*write)(struct ixgbe_hw *, u32 *, u16, u16); + int (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + int (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + int (*check_for_msg)(struct ixgbe_hw *, u16); + int (*check_for_ack)(struct ixgbe_hw *, u16); + int (*check_for_rst)(struct ixgbe_hw *, u16); }; struct ixgbe_mbx_stats { @@ -3656,7 +3656,7 @@ struct ixgbe_hw { struct ixgbe_info { enum ixgbe_mac_type mac; - s32 (*get_invariants)(struct ixgbe_hw *); + int (*get_invariants)(struct ixgbe_hw *); const struct ixgbe_mac_operations *mac_ops; const struct ixgbe_eeprom_operations *eeprom_ops; const struct ixgbe_phy_operations *phy_ops; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 57a912e4653f..f1ffa398f6df 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -16,9 +16,9 @@ #define IXGBE_X540_VFT_TBL_SIZE 128 #define IXGBE_X540_RX_PB_SIZE 384 -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static int ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) @@ -26,7 +26,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) return ixgbe_media_type_copper; } -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) +int ixgbe_get_invariants_X540(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; @@ -51,7 +51,7 @@ s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) * @speed: new link speed * @autoneg_wait_to_complete: true when waiting for completion is needed **/ -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, +int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete) { return hw->phy.ops.setup_link_speed(hw, speed, @@ -66,11 +66,11 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +int ixgbe_reset_hw_X540(struct ixgbe_hw *hw) { - s32 status; - u32 ctrl, i; u32 swfw_mask = hw->phy.phy_semaphore_mask; + u32 ctrl, i; + int status; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -166,9 +166,9 @@ mac_reset_top: * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ -s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +int ixgbe_start_hw_X540(struct ixgbe_hw *hw) { - s32 ret_val; + int ret_val; ret_val = ixgbe_start_hw_generic(hw); if (ret_val) @@ -184,7 +184,7 @@ s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; @@ -215,9 +215,9 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) * * Reads a 16 bit word from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +static int ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) { - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return -EBUSY; @@ -237,10 +237,10 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) * * Reads a 16 bit word(s) from the EEPROM using the EERD register. **/ -static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, +static int ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return -EBUSY; @@ -259,9 +259,9 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, * * Write a 16 bit word to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +static int ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return -EBUSY; @@ -281,10 +281,10 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) * * Write a 16 bit word(s) to the EEPROM using the EEWR register. **/ -static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, +static int ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) return -EBUSY; @@ -303,7 +303,7 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, * * @hw: pointer to hardware structure **/ -static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +static int ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) { u16 i; u16 j; @@ -368,7 +368,7 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return (s32)checksum; + return (int)checksum; } /** @@ -379,12 +379,12 @@ static s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, +static int ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val) { - s32 status; - u16 checksum; u16 read_checksum = 0; + u16 checksum; + int status; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every @@ -439,10 +439,10 @@ out: * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ -static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +static int ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) { - s32 status; u16 checksum; + int status; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every @@ -484,10 +484,10 @@ out: * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy * EEPROM from shadow RAM to the flash device. **/ -static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +static int ixgbe_update_flash_X540(struct ixgbe_hw *hw) { + int status; u32 flup; - s32 status; status = ixgbe_poll_flash_update_done_X540(hw); if (status == -EIO) { @@ -529,7 +529,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) * Polls the FLUDONE (bit 26) of the EEC Register to determine when the * flash update is done. **/ -static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +static int ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) { u32 i; u32 reg; @@ -551,7 +551,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) * Acquires the SWFW semaphore thought the SW_FW_SYNC register for * the specified function (CSR, PHY0, PHY1, NVM, Flash) **/ -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) +int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) { u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; @@ -660,7 +660,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * * Sets the hardware semaphores so SW/FW can gain control of shared resources */ -static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +static int ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) { u32 timeout = 2000; u32 i; @@ -760,7 +760,7 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) * Devices that implement the version 2 interface: * X540 **/ -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; @@ -798,7 +798,7 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) * Devices that implement the version 2 interface: * X540 **/ -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) { u32 macc_reg; u32 ledctl_reg; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index e246c0d2a427..b69a680d3ab5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -3,17 +3,17 @@ #include "ixgbe_type.h" -s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, +int ixgbe_get_invariants_X540(struct ixgbe_hw *hw); +int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); -s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +int ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +int ixgbe_start_hw_X540(struct ixgbe_hw *hw); enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); -s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, +int ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); -s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +int ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +int ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +int ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); -s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +int ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 6208923e29a2..2decb0710b6e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -6,13 +6,13 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); +static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); +static int ixgbe_setup_fc_x550em(struct ixgbe_hw *); static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *); static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *); -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); +static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *); -static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; @@ -29,7 +29,7 @@ static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) return 0; } -static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; @@ -41,7 +41,7 @@ static s32 ixgbe_get_invariants_X550_x_fw(struct ixgbe_hw *hw) return 0; } -static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; @@ -55,7 +55,7 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw) return 0; } -static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) +static int ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; @@ -91,7 +91,7 @@ static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) * * Returns status code */ -static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) +static int ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) { return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); } @@ -104,7 +104,7 @@ static s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) * * Returns status code */ -static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) +static int ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) { return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); } @@ -117,9 +117,9 @@ static s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) * * Returns status code */ -static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) +static int ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) { - s32 status; + int status; status = ixgbe_read_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); if (status) @@ -135,9 +135,9 @@ static s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) * * Returns status code */ -static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) +static int ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) { - s32 status; + int status; status = ixgbe_write_i2c_byte_generic_unlocked(hw, reg, IXGBE_PE, value); @@ -153,9 +153,9 @@ static s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) * This function assumes that the caller has acquired the proper semaphore. * Returns error code */ -static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) +static int ixgbe_reset_cs4227(struct ixgbe_hw *hw) { - s32 status; + int status; u32 retry; u16 value; u8 reg; @@ -225,7 +225,7 @@ static s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) static void ixgbe_check_cs4227(struct ixgbe_hw *hw) { u32 swfw_mask = hw->phy.phy_semaphore_mask; - s32 status; + int status; u16 value; u8 retry; @@ -292,7 +292,7 @@ out: * * Returns error code */ -static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) +static int ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: @@ -347,13 +347,13 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) return 0; } -static s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { return -EOPNOTSUPP; } -static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { return -EOPNOTSUPP; @@ -368,7 +368,7 @@ static s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, * * Returns an error code on error. **/ -static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +static int ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); @@ -383,7 +383,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, * * Returns an error code on error. **/ -static s32 +static int ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { @@ -399,7 +399,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, * * Returns an error code on error. **/ -static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, +static int ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); @@ -414,7 +414,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, * * Returns an error code on error. **/ -static s32 +static int ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { @@ -427,7 +427,7 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, * @activity: activity to perform * @data: Pointer to 4 32-bit words of data */ -s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, +int ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, u32 (*data)[FW_PHY_ACT_DATA_COUNT]) { union { @@ -435,7 +435,7 @@ s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, struct ixgbe_hic_phy_activity_resp rsp; } hic; u16 retries = FW_PHY_ACT_RETRIES; - s32 rc; + int rc; u32 i; do { @@ -484,12 +484,12 @@ static const struct { * * Returns error code */ -static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) +static int ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) { u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; u16 phy_speeds; u16 phy_id_lo; - s32 rc; + int rc; u16 i; if (hw->phy.id) @@ -526,7 +526,7 @@ static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) * * Returns error code */ -static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) +static int ixgbe_identify_phy_fw(struct ixgbe_hw *hw) { if (hw->bus.lan_id) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; @@ -545,7 +545,7 @@ static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) * * Returns error code */ -static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) +static int ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; @@ -557,10 +557,10 @@ static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) * ixgbe_setup_fw_link - Setup firmware-controlled PHYs * @hw: pointer to hardware structure */ -static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) +static int ixgbe_setup_fw_link(struct ixgbe_hw *hw) { u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; + int rc; u16 i; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) @@ -613,7 +613,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) * * Called at init time to set up flow control. */ -static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) +static int ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) { if (hw->fc.requested_mode == ixgbe_fc_default) hw->fc.requested_mode = ixgbe_fc_full; @@ -627,7 +627,7 @@ static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) * Initializes the EEPROM parameters ixgbe_eeprom_info within the * ixgbe_hw struct in order to set up EEPROM access. **/ -static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) +static int ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; @@ -659,7 +659,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) * * Note: ctrl can be NULL if the IOSF control register value is not needed */ -static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) +static int ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) { u32 i, command; @@ -690,12 +690,12 @@ static s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) * @device_type: 3 bit device type * @phy_data: Pointer to read data from the register **/ -static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 *data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error; - s32 ret; + int ret; ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); if (ret) @@ -716,7 +716,8 @@ static s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { error = FIELD_GET(IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK, command); hw_dbg(hw, "Failed to read, error %x\n", error); - return -EIO; + ret = -EIO; + goto out; } if (!ret) @@ -731,10 +732,10 @@ out: * ixgbe_get_phy_token - Get the token for shared PHY access * @hw: Pointer to hardware structure */ -static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +static int ixgbe_get_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; - s32 status; + int status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; @@ -760,10 +761,10 @@ static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) * ixgbe_put_phy_token - Put the token for shared PHY access * @hw: Pointer to hardware structure */ -static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +static int ixgbe_put_phy_token(struct ixgbe_hw *hw) { struct ixgbe_hic_phy_token_req token_cmd; - s32 status; + int status; token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; @@ -789,7 +790,7 @@ static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) * @device_type: 3 bit device type * @data: Data to write to the register **/ -static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, __always_unused u32 device_type, u32 data) { @@ -815,7 +816,7 @@ static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, * @device_type: 3 bit device type * @data: Pointer to read data from the register **/ -static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, __always_unused u32 device_type, u32 *data) { @@ -823,7 +824,7 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, struct ixgbe_hic_internal_phy_req cmd; struct ixgbe_hic_internal_phy_resp rsp; } hic; - s32 status; + int status; memset(&hic, 0, sizeof(hic)); hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; @@ -850,14 +851,14 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, * * Reads a 16 bit word(s) from the EEPROM using the hostif. **/ -static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, +static int ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; u32 current_word = 0; u16 words_to_read; - s32 status; + int status; u32 i; /* Take semaphore for the entire operation. */ @@ -922,14 +923,14 @@ out: * * Returns error status for any failure **/ -static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, +static int ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, u16 size, u16 *csum, u16 *buffer, u32 buffer_size) { - u16 buf[256]; - s32 status; u16 length, bufsz, i, start; u16 *local_buffer; + u16 buf[256]; + int status; bufsz = ARRAY_SIZE(buf); @@ -990,14 +991,14 @@ static s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, * * Returns a negative error code on error, or the 16-bit checksum **/ -static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, +static int ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) { u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; + u16 pointer, i, size; u16 *local_buffer; - s32 status; u16 checksum = 0; - u16 pointer, i, size; + int status; hw->eeprom.ops.init_params(hw); @@ -1059,7 +1060,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, checksum = (u16)IXGBE_EEPROM_SUM - checksum; - return (s32)checksum; + return (int)checksum; } /** ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum @@ -1067,7 +1068,7 @@ static s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, * * Returns a negative error code on error, or the 16-bit checksum **/ -static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) +static int ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) { return ixgbe_calc_checksum_X550(hw, NULL, 0); } @@ -1079,11 +1080,11 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) * * Reads a 16 bit word from the EEPROM using the hostif. **/ -static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) +static int ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) { const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; struct ixgbe_hic_read_shadow_ram buffer; - s32 status; + int status; buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; @@ -1117,12 +1118,12 @@ static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) * Performs checksum calculation and validates the EEPROM checksum. If the * caller does not need checksum_val, the value can be NULL. **/ -static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, +static int ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) { - s32 status; - u16 checksum; u16 read_checksum = 0; + u16 checksum; + int status; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every @@ -1167,11 +1168,11 @@ static s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, * * Write a 16 bit word to the EEPROM using the hostif. **/ -static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, +static int ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status; struct ixgbe_hic_write_shadow_ram buffer; + int status; buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; buffer.hdr.req.buf_lenh = 0; @@ -1195,9 +1196,9 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, * * Write a 16 bit word to the EEPROM using the hostif. **/ -static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) +static int ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) { - s32 status = 0; + int status = 0; if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); @@ -1215,10 +1216,10 @@ static s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 data) * * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. **/ -static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) +static int ixgbe_update_flash_X550(struct ixgbe_hw *hw) { - s32 status = 0; union ixgbe_hic_hdr2 buffer; + int status = 0; buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; buffer.req.buf_lenh = 0; @@ -1237,7 +1238,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) * Sets bus link width and speed to unknown because X550em is * not a PCI device. **/ -static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) +static int ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) { hw->bus.type = ixgbe_bus_type_internal; hw->bus.width = ixgbe_bus_width_unknown; @@ -1268,9 +1269,9 @@ static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) **/ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) { - u32 rxctrl, pfdtxgswc; - s32 status; struct ixgbe_hic_disable_rxen fw_cmd; + u32 rxctrl, pfdtxgswc; + int status; rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); if (rxctrl & IXGBE_RXCTRL_RXEN) { @@ -1310,10 +1311,10 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) * checksum and updates the EEPROM and instructs the hardware to update * the flash. **/ -static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) +static int ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) { - s32 status; u16 checksum = 0; + int status; /* Read the first word from the EEPROM. If this times out or fails, do * not continue or we could be in for a very long wait while every @@ -1350,11 +1351,11 @@ static s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) * * Write a 16 bit word(s) to the EEPROM using the hostif. **/ -static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, +static int ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { - s32 status = 0; + int status = 0; u32 i = 0; /* Take semaphore for the entire operation. */ @@ -1386,12 +1387,12 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, * @device_type: 3 bit device type * @data: Data to write to the register **/ -static s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u32 data) { u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; u32 command, error; - s32 ret; + int ret; ret = hw->mac.ops.acquire_swfw_sync(hw, gssr); if (ret) @@ -1429,10 +1430,10 @@ out: * * iXfI configuration needed for ixgbe_mac_X550EM_x devices. **/ -static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) +static int ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) { - s32 status; u32 reg_val; + int status; /* Disable training protocol FSM. */ status = ixgbe_read_iosf_sb_reg_x550(hw, @@ -1501,10 +1502,10 @@ static s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) * internal PHY * @hw: pointer to hardware structure **/ -static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) +static int ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) { - s32 status; u32 link_ctrl; + int status; /* Restart auto-negotiation. */ status = hw->mac.ops.read_iosf_sb_reg(hw, @@ -1550,11 +1551,11 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) * Configures the integrated KR PHY to use iXFI mode. Used to connect an * internal and external PHY at a specific speed, without autonegotiation. **/ -static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +static int ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; - s32 status; u32 reg_val; + int status; /* iXFI is only supported with X552 */ if (mac->type != ixgbe_mac_X550EM_x) @@ -1607,7 +1608,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) * @hw: pointer to hardware structure * @linear: true if SFP module is linear */ -static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) +static int ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) { switch (hw->phy.sfp_type) { case ixgbe_sfp_type_not_present: @@ -1644,14 +1645,14 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) * * Configures the extern PHY and the integrated KR PHY for SFP support. */ -static s32 +static int ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { - s32 status; - u16 reg_slice, reg_val; bool setup_linear = false; + u16 reg_slice, reg_val; + int status; /* Check if SFP module is supported and linear */ status = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); @@ -1690,11 +1691,11 @@ ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, * Configures the integrated PHY for native SFI mode. Used to connect the * internal PHY directly to an SFP cage, without autonegotiation. **/ -static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) +static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { struct ixgbe_mac_info *mac = &hw->mac; - s32 status; u32 reg_val; + int status; /* Disable all AN and force speed to 10G Serial. */ status = mac->ops.read_iosf_sb_reg(hw, @@ -1789,13 +1790,13 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) * * Configure the integrated PHY for native SFP support. */ -static s32 +static int ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { bool setup_linear = false; u32 reg_phy_int; - s32 ret_val; + int ret_val; /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); @@ -1838,14 +1839,14 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, * * Configure the integrated PHY for SFP support. */ -static s32 +static int ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { u32 reg_slice, slice_offset; bool setup_linear = false; u16 reg_phy_ext; - s32 ret_val; + int ret_val; /* Check if SFP module is supported and linear */ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); @@ -1917,12 +1918,12 @@ ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, * * Returns error status for any failure **/ -static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, +static int ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { - s32 status; ixgbe_link_speed force_speed; + int status; /* Setup internal/external PHY link speed to iXFI (10G), unless * only 1G is auto advertised then setup KX link. @@ -1953,7 +1954,7 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, * * Check that both the MAC and X557 external PHY have link. **/ -static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, +static int ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up, bool link_up_wait_to_complete) @@ -1997,13 +1998,13 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, * @speed: unused * @autoneg_wait_to_complete: unused */ -static s32 +static int ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, __always_unused bool autoneg_wait_to_complete) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; - s32 rc; + int rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -2070,12 +2071,12 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, * @speed: the link speed to force * @autoneg_wait: true when waiting for completion is needed */ -static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, +static int ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait) { struct ixgbe_mac_info *mac = &hw->mac; u32 lval, sval, flx_val; - s32 rc; + int rc; rc = mac->ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -2147,7 +2148,7 @@ static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) { u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ixgbe_link_speed speed; - s32 status = -EIO; + int status = -EIO; bool link_up; /* AN should have completed when the cable was plugged in. @@ -2275,10 +2276,10 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) /** ixgbe_setup_sfp_modules_X550em - Setup SFP module * @hw: pointer to hardware structure */ -static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +static int ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) { - s32 status; bool linear; + int status; /* Check if SFP module is supported */ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); @@ -2296,7 +2297,7 @@ static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) * @speed: pointer to link speed * @autoneg: true when autoneg or autotry is enabled **/ -static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, +static int ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *autoneg) { @@ -2374,7 +2375,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, * Determime if external Base T PHY interrupt cause is high temperature * failure alarm or link status change. **/ -static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc, +static int ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc, bool *is_overtemp) { u32 status; @@ -2462,7 +2463,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc, * * Returns PHY access status **/ -static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +static int ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) { bool lsc, overtemp; u32 status; @@ -2554,7 +2555,7 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) * failure alarm then return error, else if link status change * then setup internal/external PHY link **/ -static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw, +static int ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *is_overtemp) { struct ixgbe_phy_info *phy = &hw->phy; @@ -2578,11 +2579,11 @@ static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw, * * Configures the integrated KR PHY. **/ -static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, +static int ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed) { - s32 status; u32 reg_val; + int status; status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -2633,7 +2634,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, * ixgbe_setup_kr_x550em - Configure the KR PHY * @hw: pointer to hardware structure **/ -static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) +static int ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { /* leave link alone for 2.5G */ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) @@ -2651,7 +2652,7 @@ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) * * Returns error code if unable to get link status. **/ -static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +static int ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) { u32 ret; u16 autoneg_status; @@ -2685,7 +2686,7 @@ static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) * A return of a non-zero value indicates an error, and the base driver should * not report link up. **/ -static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) +static int ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { ixgbe_link_speed force_speed; bool link_up; @@ -2745,9 +2746,9 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) /** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI * @hw: pointer to hardware structure **/ -static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +static int ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) { - s32 status; + int status; status = ixgbe_reset_phy_generic(hw); @@ -2763,7 +2764,7 @@ static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) * @hw: pointer to hardware structure * @led_idx: led number to turn on **/ -static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +static int ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; @@ -2785,7 +2786,7 @@ static s32 ixgbe_led_on_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * @hw: pointer to hardware structure * @led_idx: led number to turn off **/ -static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) +static int ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) { u16 phy_data; @@ -2818,12 +2819,12 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx) * semaphore, -EIO when command fails or -ENIVAL when incorrect * params passed. **/ -static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, +static int ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 sub, u16 len, const char *driver_ver) { struct ixgbe_hic_drv_info2 fw_cmd; - s32 ret_val; + int ret_val; int i; if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string))) @@ -2865,12 +2866,12 @@ static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, * * Determine lowest common link speed with link partner. **/ -static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, +static int ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) { - u16 an_lp_status; - s32 status; u16 word = hw->eeprom.ctrl_word_3; + u16 an_lp_status; + int status; *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; @@ -2883,28 +2884,28 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, /* If link partner advertised 1G, return 1G */ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; - return status; + return 0; } /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) - return status; + return 0; /* Link partner not capable of lower speeds, return 10G */ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; - return status; + return 0; } /** * ixgbe_setup_fc_x550em - Set up flow control * @hw: pointer to hardware structure */ -static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) +static int ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) { bool pause, asm_dir; u32 reg_val; - s32 rc = 0; + int rc = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { @@ -2989,7 +2990,7 @@ static void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) { u32 link_s1, lp_an_page_low, an_cntl_1; ixgbe_link_speed speed; - s32 status = -EIO; + int status = -EIO; bool link_up; /* AN should have completed when the cable was plugged in. @@ -3072,13 +3073,13 @@ static void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting * the X557 PHY immediately prior to entering LPLU. **/ -static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) +static int ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) { u16 an_10g_cntl_reg, autoneg_reg, speed; - s32 status; ixgbe_link_speed lcd_speed; u32 save_autoneg; bool link_up; + int status; /* If blocked by MNG FW, then don't restart AN */ if (ixgbe_check_reset_blocked(hw)) @@ -3129,7 +3130,7 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) - return status; + return 0; /* Clear AN completed indication */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, @@ -3166,10 +3167,10 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs * @hw: pointer to hardware structure */ -static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) +static int ixgbe_reset_phy_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; + int rc; if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) return 0; @@ -3195,7 +3196,7 @@ static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) static bool ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) { u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; - s32 rc; + int rc; rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); if (rc) @@ -3238,10 +3239,10 @@ static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) * set during init_shared_code because the PHY/SFP type was * not known. Perform the SFP init if necessary. **/ -static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) +static int ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; - s32 ret_val; + int ret_val; hw->mac.ops.set_lan_id(hw); @@ -3366,9 +3367,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /** ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. ** @hw: pointer to hardware structure **/ -static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) +static int ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { - s32 status; + int status; u16 reg; status = hw->phy.ops.read_reg(hw, @@ -3440,14 +3441,14 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) ** and clears all interrupts, perform a PHY reset, and perform a link (MAC) ** reset. **/ -static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) +static int ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) { + u32 swfw_mask = hw->phy.phy_semaphore_mask; ixgbe_link_speed link_speed; - s32 status; + bool link_up = false; u32 ctrl = 0; + int status; u32 i; - bool link_up = false; - u32 swfw_mask = hw->phy.phy_semaphore_mask; /* Call adapter stop to disable Tx/Rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); @@ -3608,10 +3609,10 @@ static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) +static int ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) { - s32 status = 0; u32 an_cntl = 0; + int status = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { @@ -3713,9 +3714,9 @@ static void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) * * Acquires the SWFW semaphore and sets the I2C MUX */ -static s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) +static int ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) { - s32 status; + int status; status = ixgbe_acquire_swfw_sync_X540(hw, mask); if (status) @@ -3749,11 +3750,11 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) * * Acquires the SWFW semaphore and get the shared PHY token as needed */ -static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +static int ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; - s32 status; + int status; while (--retries) { status = 0; @@ -3806,11 +3807,11 @@ static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) * Token. The PHY Token is needed since the MDIO is shared between to MAC * instances. */ -static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 *phy_data) { u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return -EBUSY; @@ -3832,11 +3833,11 @@ static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, * Writes a value to specified PHY register using the SWFW lock and PHY Token. * The PHY Token is needed since the MDIO is shared between to MAC instances. */ -static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, +static int ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, u16 phy_data) { u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; - s32 status; + int status; if (hw->mac.ops.acquire_swfw_sync(hw, mask)) return -EBUSY; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 884d64114bff..837295fecd17 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -180,6 +180,7 @@ config SKY2_DEBUG source "drivers/net/ethernet/marvell/octeontx2/Kconfig" source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" +source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig" source "drivers/net/ethernet/marvell/prestera/Kconfig" endif # NET_VENDOR_MARVELL diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index ceba4aa4f026..a399defe25fd 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -12,5 +12,6 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKY2) += sky2.o obj-y += octeon_ep/ +obj-y += octeon_ep_vf/ obj-y += octeontx2/ obj-y += prestera/ diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a641b3534ca3..40a5f1431e4e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -5097,7 +5097,7 @@ static int mvneta_ethtool_set_wol(struct net_device *dev, } static int mvneta_ethtool_get_eee(struct net_device *dev, - struct ethtool_eee *eee) + struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); u32 lpi_ctl0; @@ -5113,7 +5113,7 @@ static int mvneta_ethtool_get_eee(struct net_device *dev, } static int mvneta_ethtool_set_eee(struct net_device *dev, - struct ethtool_eee *eee) + struct ethtool_keee *eee) { struct mvneta_port *pp = netdev_priv(dev); u32 lpi_ctl0; diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig new file mode 100644 index 000000000000..e371a3ef0c49 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration +# + +config OCTEON_EP_VF + tristate "Marvell Octeon PCI Endpoint NIC VF Driver" + depends on 64BIT + depends on PCI + help + This driver supports the networking functionality of Marvell's + Octeon PCI Endpoint NIC VF. + + To know the list of devices supported by this driver, refer to the + documentation in + <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>. + + To compile this driver as a module, choose M here. + The name of the module will be octeon_ep_vf. diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile new file mode 100644 index 000000000000..4a5f9fcb0b40 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Network driver for Marvell's Octeon PCI Endpoint NIC VF +# + +obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o + +octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \ + octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \ + octep_vf_ethtool.o diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c new file mode 100644 index 000000000000..88937fce75f1 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cn9k.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" +#include "octep_vf_regs_cn9k.h" + +/* Dump useful hardware IQ/OQ CSRs for debug purpose */ +static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno) +{ + struct device *dev = &oct->pdev->dev; + + dev_info(dev, "IQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno))); + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_CONTROL(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno))); + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_ENABLE(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno))); + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno))); + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno))); + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_CNTS(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno))); + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_PKT_CNT(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno))); + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno))); + + dev_info(dev, "OQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno))); + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_CONTROL(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno))); + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_ENABLE(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno))); + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_CNTS(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno))); + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno))); + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno), + octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno))); +} + +/* Reset Hardware Tx queue */ +static void cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no) +{ + u64 val = ULL(0); + + dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); + + /* Disable the Tx/Instruction Ring */ + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val); + + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); + + val = GENMASK_ULL(31, 0); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val); + + val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no)); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no), + val & GENMASK_ULL(31, 0)); +} + +/* Reset Hardware Rx queue */ +static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no) +{ + u64 val = ULL(0); + + /* Disable Output (Rx) Ring */ + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val); + + /* Clear count CSRs */ + val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no)); + octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val); + + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0)); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0)); +} + +/* Reset all hardware Tx/Rx queues */ +static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + int q; + + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n"); + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + cn93_vf_reset_iq(oct, q); + cn93_vf_reset_oq(oct, q); + } +} + +/* Initialize configuration limits and initial active config */ +static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct) +{ + struct octep_vf_config *conf = oct->conf; + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0)); + conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) & + CN93_VF_R_IN_CTL_RPVF_MASK; + conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings; + + conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; + conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; + conf->iq.db_min = OCTEP_VF_DB_MIN; + conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; + + conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; + conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; + conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; + conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; + conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; + + conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings; +} + +/* Setup registers for a hardware Tx Queue */ +static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no) +{ + struct octep_vf_iq *iq = oct->iq[iq_no]; + u32 reset_instr_cnt; + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) { + do { + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no)); + } while (!(reg_val & CN93_VF_R_IN_CTL_IDLE)); + } + reg_val |= CN93_VF_R_IN_CTL_RDSIZE; + reg_val |= CN93_VF_R_IN_CTL_IS_64B; + reg_val |= CN93_VF_R_IN_CTL_ESR; + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val); + + /* Write the start of the input queue's ring and its size */ + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr for this queue */ + iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no); + iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no); + + /* Store the current instruction counter (used in flush_iq calculation) */ + reset_instr_cnt = readl(iq->inst_cnt_reg); + writel(reset_instr_cnt, iq->inst_cnt_reg); + + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); +} + +/* Setup registers for a hardware Rx Queue */ +static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no) +{ + struct octep_vf_oq *oq = oct->oq[oq_no]; + u32 time_threshold = 0; + u64 oq_ctl = ULL(0); + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) { + do { + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); + } while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)); + } + + reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE); + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P); + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P); + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I); + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I); + reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I); + reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D); + reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D); + reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D); + reg_val |= (CN93_VF_R_OUT_CTL_ES_P); + + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); + + oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no)); + oq_ctl &= ~GENMASK_ULL(22, 0); //clear the ISIZE and BSIZE (22-0) + oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); //populate the BSIZE (15-0) + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no); + oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no); + + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); + reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); +} + +/* Setup registers for a VF mailbox */ +static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no) +{ + struct octep_vf_mbox *mbox = oct->mbox; + + /* PF to VF DATA reg. VF reads from this reg */ + mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no); + + /* VF mbox interrupt reg */ + mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no); + + /* VF to PF DATA reg. VF writes into this reg */ + mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no); +} + +/* Mailbox Interrupt handler */ +static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct) +{ + if (oct->mbox) + schedule_work(&oct->mbox->wk.work); + else + dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n"); +} + +/* Tx/Rx queue interrupt handler */ +static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data) +{ + struct octep_vf_ioq_vector *vector = data; + struct octep_vf_device *oct; + struct octep_vf_oq *oq; + u64 reg_val; + + oct = vector->octep_vf_dev; + oq = vector->oq; + /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */ + if (oq->q_no == 0) { + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0)); + if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) { + cn93_handle_vf_mbox_intr(oct); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val); + } + } + napi_schedule_irqoff(oq->napi); + return IRQ_HANDLED; +} + +/* Re-initialize Octeon hardware registers */ +static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct) +{ + u32 i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_iq_regs(oct, i); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_oq_regs(oct, i); + + oct->hw_ops.enable_interrupts(oct); + oct->hw_ops.enable_io_queues(oct); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/* Enable all interrupts */ +static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct) +{ + int num_rings, q; + u64 reg_val; + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) { + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val); + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); + } + /* Enable PF to VF mbox interrupt by setting 2nd bit*/ + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), + CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB); +} + +/* Disable all interrupts */ +static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct) +{ + int num_rings, q; + u64 reg_val; + + /* Disable PF to VF mbox interrupt by setting 2nd bit*/ + if (oct->mbox) + octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0); + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) { + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q)); + reg_val &= ~BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val); + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q)); + reg_val &= ~BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); + } +} + +/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ +static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done, new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count; + + return new_idx; +} + +/* Enable a hardware Tx Queue */ +static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no) +{ + u64 loop = HZ; + u64 reg_val; + + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0)); + + while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) && + loop--) { + schedule_timeout_interruptible(1); + } + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no)); + reg_val |= ULL(1); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Enable a hardware Rx Queue */ +static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0)); + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no)); + reg_val |= ULL(1); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Enable all hardware Tx/Rx Queues assigned to VF */ +static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct) +{ + u8 q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_vf_enable_iq_cn93(oct, q); + octep_vf_enable_oq_cn93(oct, q); + } +} + +/* Disable a hardware Tx Queue assigned to VF */ +static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no)); + reg_val &= ~ULL(1); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Disable a hardware Rx Queue assigned to VF */ +static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no)); + reg_val &= ~ULL(1); + octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Disable all hardware Tx/Rx Queues assigned to VF */ +static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct) +{ + int q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_vf_disable_iq_cn93(oct, q); + octep_vf_disable_oq_cn93(oct, q); + } +} + +/* Dump hardware registers (including Tx/Rx queues) for debugging. */ +static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct) +{ + u8 num_rings, q; + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) + cn93_vf_dump_q_regs(oct, q); +} + +/** + * octep_vf_device_setup_cn93() - Setup Octeon device. + * + * @oct: Octeon device private data structure. + * + * - initialize hardware operations. + * - get target side pcie port number for the device. + * - set initial configuration and max limits. + */ +void octep_vf_device_setup_cn93(struct octep_vf_device *oct) +{ + oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93; + oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93; + oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93; + + oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93; + oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93; + + oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93; + oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93; + + oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93; + + oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93; + oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93; + oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93; + + oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93; + oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93; + oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93; + oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93; + + oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93; + octep_vf_init_config_cn93_vf(oct); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c new file mode 100644 index 000000000000..1f79dfad42c6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_cnxk.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" +#include "octep_vf_regs_cnxk.h" + +/* Dump useful hardware IQ/OQ CSRs for debug purpose */ +static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno) +{ + struct device *dev = &oct->pdev->dev; + + dev_info(dev, "IQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno))); + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_CONTROL(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno))); + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_ENABLE(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno))); + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno))); + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno))); + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_CNTS(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno))); + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno))); + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno))); + + dev_info(dev, "OQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno))); + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_CONTROL(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno))); + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_ENABLE(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno))); + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_CNTS(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno))); + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno))); + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno))); + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", + qno, CNXK_VF_SDP_R_ERR_TYPE(qno), + octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno))); +} + +/* Reset Hardware Tx queue */ +static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no) +{ + u64 val = ULL(0); + + dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); + + /* Disable the Tx/Instruction Ring */ + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val); + + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); + + val = GENMASK_ULL(31, 0); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val); + + val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no)); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0)); +} + +/* Reset Hardware Rx queue */ +static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no) +{ + u64 val = ULL(0); + + /* Disable Output (Rx) Ring */ + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val); + + /* Clear count CSRs */ + val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no)); + octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val); + + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0)); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0)); +} + +/* Reset all hardware Tx/Rx queues */ +static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + int q; + + dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n"); + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + cnxk_vf_reset_iq(oct, q); + cnxk_vf_reset_oq(oct, q); + } +} + +/* Initialize configuration limits and initial active config */ +static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct) +{ + struct octep_vf_config *conf = oct->conf; + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0)); + conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) & + CNXK_VF_R_IN_CTL_RPVF_MASK; + conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings; + + conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; + conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; + conf->iq.db_min = OCTEP_VF_DB_MIN; + conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; + + conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; + conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; + conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; + conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; + conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; + conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN; + + conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings; +} + +/* Setup registers for a hardware Tx Queue */ +static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no) +{ + struct octep_vf_iq *iq = oct->iq[iq_no]; + u32 reset_instr_cnt; + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) { + do { + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); + } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)); + } + reg_val |= CNXK_VF_R_IN_CTL_RDSIZE; + reg_val |= CNXK_VF_R_IN_CTL_IS_64B; + reg_val |= CNXK_VF_R_IN_CTL_ESR; + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val); + + /* Write the start of the input queue's ring and its size */ + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr for this queue */ + iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no); + iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no); + + /* Store the current instruction counter (used in flush_iq calculation) */ + reset_instr_cnt = readl(iq->inst_cnt_reg); + writel(reset_instr_cnt, iq->inst_cnt_reg); + + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); +} + +/* Setup registers for a hardware Rx Queue */ +static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) +{ + struct octep_vf_oq *oq = oct->oq[oq_no]; + u32 time_threshold = 0; + u64 oq_ctl = ULL(0); + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) { + do { + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); + } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)); + } + + reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE); + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P); + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P); + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I); + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I); + reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I); + reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D); + reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D); + reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D); + reg_val |= (CNXK_VF_R_OUT_CTL_ES_P); + + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); + + oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); + /* Clear the ISIZE and BSIZE (22-0) */ + oq_ctl &= ~GENMASK_ULL(22, 0); + /* Populate the BSIZE (15-0) */ + oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no); + oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no); + + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); + reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + /* set watermark for backpressure */ + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no)); + reg_val &= ~GENMASK_ULL(31, 0); + reg_val |= CFG_GET_OQ_WMARK(oct->conf); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val); +} + +/* Setup registers for a VF mailbox */ +static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no) +{ + struct octep_vf_mbox *mbox = oct->mbox; + + /* PF to VF DATA reg. VF reads from this reg */ + mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no); + + /* VF mbox interrupt reg */ + mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no); + + /* VF to PF DATA reg. VF writes into this reg */ + mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no); +} + +/* Mailbox Interrupt handler */ +static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct) +{ + if (oct->mbox) + schedule_work(&oct->mbox->wk.work); + else + dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n"); +} + +/* Tx/Rx queue interrupt handler */ +static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data) +{ + struct octep_vf_ioq_vector *vector = data; + struct octep_vf_device *oct; + struct octep_vf_oq *oq; + u64 reg_val; + + oct = vector->octep_vf_dev; + oq = vector->oq; + /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */ + if (oq->q_no == 0) { + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0)); + if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) { + cnxk_handle_vf_mbox_intr(oct); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val); + } + } + napi_schedule_irqoff(oq->napi); + return IRQ_HANDLED; +} + +/* Re-initialize Octeon hardware registers */ +static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct) +{ + u32 i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_iq_regs(oct, i); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_oq_regs(oct, i); + + oct->hw_ops.enable_interrupts(oct); + oct->hw_ops.enable_io_queues(oct); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/* Enable all interrupts */ +static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct) +{ + int num_rings, q; + u64 reg_val; + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) { + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); + } + /* Enable PF to VF mbox interrupt by setting 2nd bit*/ + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), + CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB); +} + +/* Disable all interrupts */ +static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct) +{ + int num_rings, q; + u64 reg_val; + + /* Disable PF to VF mbox interrupt by setting 2nd bit*/ + if (oct->mbox) + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0); + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) { + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); + reg_val &= ~BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); + reg_val &= ~BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); + } +} + +/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ +static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done, new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count; + + return new_idx; +} + +/* Enable a hardware Tx Queue */ +static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no) +{ + u64 loop = HZ; + u64 reg_val; + + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0)); + + while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) && + loop--) { + schedule_timeout_interruptible(1); + } + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); + reg_val |= ULL(1); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Enable a hardware Rx Queue */ +static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no)); + reg_val |= BIT_ULL_MASK(62); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0)); + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); + reg_val |= ULL(1); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Enable all hardware Tx/Rx Queues assigned to VF */ +static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct) +{ + u8 q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_vf_enable_iq_cnxk(oct, q); + octep_vf_enable_oq_cnxk(oct, q); + } +} + +/* Disable a hardware Tx Queue assigned to VF */ +static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); + reg_val &= ~ULL(1); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Disable a hardware Rx Queue assigned to VF */ +static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no) +{ + u64 reg_val; + + reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); + reg_val &= ~ULL(1); + octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Disable all hardware Tx/Rx Queues assigned to VF */ +static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct) +{ + int q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_vf_disable_iq_cnxk(oct, q); + octep_vf_disable_oq_cnxk(oct, q); + } +} + +/* Dump hardware registers (including Tx/Rx queues) for debugging. */ +static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct) +{ + u8 num_rings, q; + + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + for (q = 0; q < num_rings; q++) + cnxk_vf_dump_q_regs(oct, q); +} + +/** + * octep_vf_device_setup_cnxk() - Setup Octeon device. + * + * @oct: Octeon device private data structure. + * + * - initialize hardware operations. + * - get target side pcie port number for the device. + * - set initial configuration and max limits. + */ +void octep_vf_device_setup_cnxk(struct octep_vf_device *oct) +{ + oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk; + oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk; + oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk; + + oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk; + oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk; + + oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk; + oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk; + + oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk; + + oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk; + oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk; + oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk; + + oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk; + oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk; + oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk; + oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk; + + oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk; + octep_vf_init_config_cnxk_vf(oct); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h new file mode 100644 index 000000000000..e03a647b0110 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_config.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_VF_CONFIG_H_ +#define _OCTEP_VF_CONFIG_H_ + +/* Tx instruction types by length */ +#define OCTEP_VF_32BYTE_INSTR 32 +#define OCTEP_VF_64BYTE_INSTR 64 + +/* Tx Queue: maximum descriptors per ring */ +#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024 +/* Minimum input (Tx) requests to be enqueued to ring doorbell */ +#define OCTEP_VF_DB_MIN 8 +/* Packet threshold for Tx queue interrupt */ +#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0 + +/* Minimum watermark for backpressure */ +#define OCTEP_VF_OQ_WMARK_MIN 256 + +/* Rx Queue: maximum descriptors per ring */ +#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024 + +/* Rx buffer size: Use page size buffers. + * Build skb from allocated page buffer once the packet is received. + * When a gathered packet is received, make head page as skb head and + * page buffers in consecutive Rx descriptors as fragments. + */ +#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) +#define OCTEP_VF_OQ_PKTS_PER_INTR 128 +#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4) + +#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1 +#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10 + +#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32) + +/* Tx Queue wake threshold + * wakeup a stopped Tx queue if minimum 2 descriptors are available. + * Even a skb with fragments consume only one Tx queue descriptor entry. + */ +#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2 + +/* Minimum MTU supported by Octeon network interface */ +#define OCTEP_VF_MIN_MTU ETH_MIN_MTU +/* Maximum MTU supported by Octeon interface*/ +#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) +/* Default MTU */ +#define OCTEP_VF_DEFAULT_MTU 1500 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_INSTR_SIZE(cfg) (64) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) + +#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) +#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) +#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark) + +#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings) +#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings) + +#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) +#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) + +#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) + +/* Hardware Tx Queue configuration. */ +struct octep_vf_iq_config { + /* Size of the Input queue (number of commands) */ + u16 num_descs; + + /* Command size - 32 or 64 bytes */ + u16 instr_type; + + /* Minimum number of commands pending to be posted to Octeon before driver + * hits the Input queue doorbell. + */ + u16 db_min; + + /* Trigger the IQ interrupt when processed cmd count reaches + * this level. + */ + u32 intr_threshold; +}; + +/* Hardware Rx Queue configuration. */ +struct octep_vf_oq_config { + /* Size of Output queue (number of descriptors) */ + u16 num_descs; + + /* Size of buffer in this Output queue. */ + u16 buf_size; + + /* The number of buffers that were consumed during packet processing + * by the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + u16 refill_threshold; + + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver usually does not use packet count interrupt coalescing. + */ + u32 oq_intr_pkt; + + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host + * if at least one packet was sent in the time interval specified by + * this field. The driver uses time interval interrupt coalescing by + * default. The time is specified in microseconds. + */ + u32 oq_intr_time; + + /* Water mark for backpressure. + * Output queue sends backpressure signal to source when + * free buffer count falls below wmark. + */ + u32 wmark; +}; + +/* Tx/Rx configuration */ +struct octep_vf_ring_config { + /* Max number of IOQs */ + u16 max_io_rings; + + /* Number of active IOQs */ + u16 active_io_rings; +}; + +/* Octeon MSI-x config. */ +struct octep_vf_msix_config { + /* Number of IOQ interrupts */ + u16 ioq_msix; +}; + +/* Data Structure to hold configuration limits and active config */ +struct octep_vf_config { + /* Input Queue attributes. */ + struct octep_vf_iq_config iq; + + /* Output Queue attributes. */ + struct octep_vf_oq_config oq; + + /* MSI-X interrupt config */ + struct octep_vf_msix_config msix_cfg; + + /* NIC VF ring Configuration */ + struct octep_vf_ring_config ring_cfg; +}; +#endif /* _OCTEP_VF_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c new file mode 100644 index 000000000000..a1979b45e355 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" + +static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = { + "rx_alloc_errors", + "tx_busy_errors", + "tx_hw_pkts", + "tx_hw_octs", + "tx_hw_bcast", + "tx_hw_mcast", + "rx_hw_pkts", + "rx_hw_bytes", + "rx_hw_bcast", + "rx_dropped_bytes_fifo_full", +}; + +#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN) + +static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { + "tx_packets_posted[Q-%u]", + "tx_packets_completed[Q-%u]", + "tx_bytes[Q-%u]", + "tx_busy[Q-%u]", +}; + +#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN) + +static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { + "rx_packets[Q-%u]", + "rx_bytes[Q-%u]", + "rx_alloc_errors[Q-%u]", +}; + +#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN) + +static void octep_vf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + + strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); +} + +static void octep_vf_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + char *strings = (char *)data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_vf_gstrings_global_stats[i]); + strings += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_vf_gstrings_tx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_vf_gstrings_rx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + break; + default: + break; + } +} + +static int octep_vf_get_sset_count(struct net_device *netdev, int sset) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + switch (sset) { + case ETH_SS_STATS: + return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues * + (OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT)); + break; + default: + return -EOPNOTSUPP; + } +} + +static void octep_vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + struct octep_vf_iface_tx_stats *iface_tx_stats; + struct octep_vf_iface_rx_stats *iface_rx_stats; + u64 rx_alloc_errors, tx_busy_errors; + int q, i; + + rx_alloc_errors = 0; + tx_busy_errors = 0; + + octep_vf_get_if_stats(oct); + iface_tx_stats = &oct->iface_tx_stats; + iface_rx_stats = &oct->iface_rx_stats; + + for (q = 0; q < oct->num_oqs; q++) { + struct octep_vf_iq *iq = oct->iq[q]; + struct octep_vf_oq *oq = oct->oq[q]; + + tx_busy_errors += iq->stats.tx_busy; + rx_alloc_errors += oq->stats.alloc_failures; + } + i = 0; + data[i++] = rx_alloc_errors; + data[i++] = tx_busy_errors; + data[i++] = iface_tx_stats->pkts; + data[i++] = iface_tx_stats->octs; + data[i++] = iface_tx_stats->bcst; + data[i++] = iface_tx_stats->mcst; + data[i++] = iface_rx_stats->pkts; + data[i++] = iface_rx_stats->octets; + data[i++] = iface_rx_stats->bcast_pkts; + data[i++] = iface_rx_stats->dropped_octets_fifo_full; + + /* Per Tx Queue stats */ + for (q = 0; q < oct->num_iqs; q++) { + struct octep_vf_iq *iq = oct->iq[q]; + + data[i++] = iq->stats.instr_posted; + data[i++] = iq->stats.instr_completed; + data[i++] = iq->stats.bytes_sent; + data[i++] = iq->stats.tx_busy; + } + + /* Per Rx Queue stats */ + for (q = 0; q < oct->num_oqs; q++) { + struct octep_vf_oq *oq = oct->oq[q]; + + data[i++] = oq->stats.packets; + data[i++] = oq->stats.bytes; + data[i++] = oq->stats.alloc_failures; + } +} + +#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \ +{ \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ + if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ +} + +static int octep_vf_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + struct octep_vf_iface_link_info *link_info; + u32 advertised_modes, supported_modes; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + octep_vf_get_link_info(oct); + + advertised_modes = oct->link_info.advertised_modes; + supported_modes = oct->link_info.supported_modes; + link_info = &oct->link_info; + + OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); + OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); + + if (link_info->autoneg) { + if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + if (netif_carrier_ok(netdev)) { + cmd->base.speed = link_info->speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +static const struct ethtool_ops octep_vf_ethtool_ops = { + .get_drvinfo = octep_vf_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = octep_vf_get_strings, + .get_sset_count = octep_vf_get_sset_count, + .get_ethtool_stats = octep_vf_get_ethtool_stats, + .get_link_ksettings = octep_vf_get_link_ksettings, +}; + +void octep_vf_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &octep_vf_ethtool_ops; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c new file mode 100644 index 000000000000..dd49d0b8b494 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -0,0 +1,1231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/vmalloc.h> +#include <net/netdev_queues.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" + +struct workqueue_struct *octep_vf_wq; + +/* Supported Devices */ +static const struct pci_device_id octep_vf_pci_id_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)}, + {0, }, +}; +MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl); + +MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); +MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING); +MODULE_LICENSE("GPL"); + +/** + * octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. + * + * @oct: Octeon device private data structure. + * + * Allocate resources to hold per Tx/Rx queue interrupt info. + * This is the information passed to interrupt handler, from which napi poll + * is scheduled and includes quick access to private data of Tx/Rx queue + * corresponding to the interrupt being handled. + * + * Return: 0, on successful allocation of resources for all queue interrupts. + * -1, if failed to allocate any resource. + */ +static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct) +{ + struct octep_vf_ioq_vector *ioq_vector; + int i; + + for (i = 0; i < oct->num_oqs; i++) { + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); + if (!oct->ioq_vector[i]) + goto free_ioq_vector; + + ioq_vector = oct->ioq_vector[i]; + ioq_vector->iq = oct->iq[i]; + ioq_vector->oq = oct->oq[i]; + ioq_vector->octep_vf_dev = oct; + } + + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); + return 0; + +free_ioq_vector: + while (i) { + i--; + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + return -1; +} + +/** + * octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + if (oct->ioq_vector[i]) { + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + } + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); +} + +/** + * octep_vf_enable_msix_range() - enable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) + * for the Octeon device. + * + * Return: 0, on successfully enabling all MSI-x interrupts. + * -1, if failed to enable any MSI-x interrupt. + */ +static int octep_vf_enable_msix_range(struct octep_vf_device *oct) +{ + int num_msix, msix_allocated; + int i; + + /* Generic interrupts apart from input/output queues */ + //num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); + num_msix = oct->num_oqs; + oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL); + if (!oct->msix_entries) + goto msix_alloc_err; + + for (i = 0; i < num_msix; i++) + oct->msix_entries[i].entry = i; + + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, + num_msix, num_msix); + if (msix_allocated != num_msix) { + dev_err(&oct->pdev->dev, + "Failed to enable %d msix irqs; got only %d\n", + num_msix, msix_allocated); + goto enable_msix_err; + } + oct->num_irqs = msix_allocated; + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); + + return 0; + +enable_msix_err: + if (msix_allocated > 0) + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; +msix_alloc_err: + return -1; +} + +/** + * octep_vf_disable_msix() - disable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Disable MSI-x on the Octeon device. + */ +static void octep_vf_disable_msix(struct octep_vf_device *oct) +{ + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); +} + +/** + * octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data contains pointers to Tx/Rx queue private data + * and correspong NAPI context. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data) +{ + struct octep_vf_ioq_vector *ioq_vector = data; + struct octep_vf_device *oct = ioq_vector->octep_vf_dev; + + return oct->hw_ops.ioq_intr_handler(ioq_vector); +} + +/** + * octep_vf_request_irqs() - Register interrupt handlers. + * + * @oct: Octeon device private data structure. + * + * Register handlers for all queue and non-queue interrupts. + * + * Return: 0, on successful registration of all interrupt handlers. + * -1, on any error. + */ +static int octep_vf_request_irqs(struct octep_vf_device *oct) +{ + struct net_device *netdev = oct->netdev; + struct octep_vf_ioq_vector *ioq_vector; + struct msix_entry *msix_entry; + int ret, i; + + /* Request IRQs for Tx/Rx queues */ + for (i = 0; i < oct->num_oqs; i++) { + ioq_vector = oct->ioq_vector[i]; + msix_entry = &oct->msix_entries[i]; + + snprintf(ioq_vector->name, sizeof(ioq_vector->name), + "%s-q%d", netdev->name, i); + ret = request_irq(msix_entry->vector, + octep_vf_ioq_intr_handler, 0, + ioq_vector->name, ioq_vector); + if (ret) { + netdev_err(netdev, + "request_irq failed for Q-%d; err=%d", + i, ret); + goto ioq_irq_err; + } + + cpumask_set_cpu(i % num_online_cpus(), + &ioq_vector->affinity_mask); + irq_set_affinity_hint(msix_entry->vector, + &ioq_vector->affinity_mask); + } + + return 0; +ioq_irq_err: + while (i) { + --i; + free_irq(oct->msix_entries[i].vector, oct); + } + return -1; +} + +/** + * octep_vf_free_irqs() - free all registered interrupts. + * + * @oct: Octeon device private data structure. + * + * Free all queue and non-queue interrupts of the Octeon device. + */ +static void octep_vf_free_irqs(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_irqs; i++) { + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]); + } + netdev_info(oct->netdev, "IRQs freed\n"); +} + +/** + * octep_vf_setup_irqs() - setup interrupts for the Octeon device. + * + * @oct: Octeon device private data structure. + * + * Allocate data structures to hold per interrupt information, allocate/enable + * MSI-x interrupt and register interrupt handlers. + * + * Return: 0, on successful allocation and registration of all interrupts. + * -1, on any error. + */ +static int octep_vf_setup_irqs(struct octep_vf_device *oct) +{ + if (octep_vf_alloc_ioq_vectors(oct)) + goto ioq_vector_err; + + if (octep_vf_enable_msix_range(oct)) + goto enable_msix_err; + + if (octep_vf_request_irqs(oct)) + goto request_irq_err; + + return 0; + +request_irq_err: + octep_vf_disable_msix(oct); +enable_msix_err: + octep_vf_free_ioq_vectors(oct); +ioq_vector_err: + return -1; +} + +/** + * octep_vf_clean_irqs() - free all interrupts and its resources. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_clean_irqs(struct octep_vf_device *oct) +{ + octep_vf_free_irqs(oct); + octep_vf_disable_msix(oct); + octep_vf_free_ioq_vectors(oct); +} + +/** + * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq) +{ + u32 pkts_pend = oq->pkts_pending; + + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); + if (iq->pkts_processed) { + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; + } + if (oq->last_pkt_count - pkts_pend) { + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); + oq->last_pkt_count = pkts_pend; + } + + /* Flush the previous wrties before writing to RESEND bit */ + smp_wmb(); + writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); + writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); +} + +/** + * octep_vf_napi_poll() - NAPI poll function for Tx/Rx. + * + * @napi: pointer to napi context. + * @budget: max number of packets to be processed in single invocation. + */ +static int octep_vf_napi_poll(struct napi_struct *napi, int budget) +{ + struct octep_vf_ioq_vector *ioq_vector = + container_of(napi, struct octep_vf_ioq_vector, napi); + u32 tx_pending, rx_done; + + tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64); + rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget); + + /* need more polling if tx completion processing is still pending or + * processed at least 'budget' number of rx packets. + */ + if (tx_pending || rx_done >= budget) + return budget; + + if (likely(napi_complete_done(napi, rx_done))) + octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); + + return rx_done; +} + +/** + * octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_napi_add(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll); + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; + } +} + +/** + * octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_napi_delete(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); + netif_napi_del(&oct->ioq_vector[i]->napi); + oct->oq[i]->napi = NULL; + } +} + +/** + * octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_napi_enable(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); + napi_enable(&oct->ioq_vector[i]->napi); + } +} + +/** + * octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_vf_napi_disable(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); + napi_disable(&oct->ioq_vector[i]->napi); + } +} + +static void octep_vf_link_up(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); +} + +static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up) +{ + int err; + + err = octep_vf_mbox_set_rx_state(oct, up); + if (err) + netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err); +} + +static int octep_vf_get_link_status(struct octep_vf_device *oct) +{ + int err; + + err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up); + if (err) + netdev_err(oct->netdev, "Get link status failed with err:%d\n", err); + return oct->link_info.oper_up; +} + +static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up) +{ + int err; + + err = octep_vf_mbox_set_link_status(oct, up); + if (err) { + netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err); + return; + } + oct->link_info.oper_up = up; +} + +/** + * octep_vf_open() - start the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues + * and interrupts.. + * + * Return: 0, on successfully setting up device and bring it up. + * -1, on any error. + */ +static int octep_vf_open(struct net_device *netdev) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + int err, ret; + + netdev_info(netdev, "Starting netdev ...\n"); + netif_carrier_off(netdev); + + oct->hw_ops.reset_io_queues(oct); + + if (octep_vf_setup_iqs(oct)) + goto setup_iq_err; + if (octep_vf_setup_oqs(oct)) + goto setup_oq_err; + if (octep_vf_setup_irqs(oct)) + goto setup_irq_err; + + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); + if (err) + goto set_queues_err; + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); + if (err) + goto set_queues_err; + + octep_vf_napi_add(oct); + octep_vf_napi_enable(oct); + + oct->link_info.admin_up = 1; + octep_vf_set_rx_state(oct, true); + + ret = octep_vf_get_link_status(oct); + if (!ret) + octep_vf_set_link_status(oct, true); + + /* Enable the input and output queues for this Octeon device */ + oct->hw_ops.enable_io_queues(oct); + + /* Enable Octeon device interrupts */ + oct->hw_ops.enable_interrupts(oct); + + octep_vf_oq_dbell_init(oct); + + ret = octep_vf_get_link_status(oct); + if (ret) + octep_vf_link_up(netdev); + + return 0; + +set_queues_err: + octep_vf_napi_disable(oct); + octep_vf_napi_delete(oct); + octep_vf_clean_irqs(oct); +setup_irq_err: + octep_vf_free_oqs(oct); +setup_oq_err: + octep_vf_free_iqs(oct); +setup_iq_err: + return -1; +} + +/** + * octep_vf_stop() - stop the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * stop the device Tx/Rx operations, bring down the link and + * free up all resources allocated for Tx/Rx queues and interrupts. + */ +static int octep_vf_stop(struct net_device *netdev) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + + netdev_info(netdev, "Stopping the device ...\n"); + + /* Stop Tx from stack */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + octep_vf_set_link_status(oct, false); + octep_vf_set_rx_state(oct, false); + + oct->link_info.admin_up = 0; + oct->link_info.oper_up = 0; + + oct->hw_ops.disable_interrupts(oct); + octep_vf_napi_disable(oct); + octep_vf_napi_delete(oct); + + octep_vf_clean_irqs(oct); + octep_vf_clean_iqs(oct); + + oct->hw_ops.disable_io_queues(oct); + oct->hw_ops.reset_io_queues(oct); + octep_vf_free_oqs(oct); + octep_vf_free_iqs(oct); + netdev_info(netdev, "Device stopped !!\n"); + return 0; +} + +/** + * octep_vf_iq_full_check() - check if a Tx queue is full. + * + * @iq: Octeon Tx queue data structure. + * + * Return: 0, if the Tx queue is not full. + * 1, if the Tx queue is full. + */ +static int octep_vf_iq_full_check(struct octep_vf_iq *iq) +{ + int ret; + + ret = netif_subqueue_maybe_stop(iq->netdev, iq->q_no, IQ_INSTR_SPACE(iq), + OCTEP_VF_WAKE_QUEUE_THRESHOLD, + OCTEP_VF_WAKE_QUEUE_THRESHOLD); + switch (ret) { + case 0: /* Stopped the queue, since IQ is full */ + return 1; + case -1: /* + * Pending updates in write index from + * iq_process_completion in other cpus + * caused queues to get re-enabled after + * being stopped + */ + iq->stats.restart_cnt++; + fallthrough; + case 1: /* Queue left enabled, since IQ is not yet full*/ + return 0; + } + + return 1; +} + +/** + * octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. + * + * @skb: packet skbuff pointer. + * @netdev: kernel network device. + * + * Return: NETDEV_TX_BUSY, if Tx Queue is full. + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. + */ +static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + netdev_features_t feat = netdev->features; + struct octep_vf_tx_sglist_desc *sglist; + struct octep_vf_tx_buffer *tx_buffer; + struct octep_vf_tx_desc_hw *hw_desc; + struct skb_shared_info *shinfo; + struct octep_vf_instr_hdr *ih; + struct octep_vf_iq *iq; + skb_frag_t *frag; + u16 nr_frags, si; + int xmit_more; + u16 q_no, wi; + + if (skb_put_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + q_no = skb_get_queue_mapping(skb); + if (q_no >= oct->num_iqs) { + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); + q_no = q_no % oct->num_iqs; + } + + iq = oct->iq[q_no]; + + shinfo = skb_shinfo(skb); + nr_frags = shinfo->nr_frags; + + wi = iq->host_write_index; + hw_desc = &iq->desc_ring[wi]; + hw_desc->ih64 = 0; + + tx_buffer = iq->buff_info + wi; + tx_buffer->skb = skb; + + ih = &hw_desc->ih; + ih->tlen = skb->len; + ih->pkind = oct->fw_info.pkind; + ih->fsz = oct->fw_info.fsz; + ih->tlen = skb->len + ih->fsz; + + if (!nr_frags) { + tx_buffer->gather = 0; + tx_buffer->dma = dma_map_single(iq->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, tx_buffer->dma)) + goto dma_map_err; + hw_desc->dptr = tx_buffer->dma; + } else { + /* Scatter/Gather */ + dma_addr_t dma; + u16 len; + + sglist = tx_buffer->sglist; + + ih->gsz = nr_frags + 1; + ih->gather = 1; + tx_buffer->gather = 1; + + len = skb_headlen(skb); + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_err; + + memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT); + sglist[0].len[3] = len; + sglist[0].dma_ptr[0] = dma; + + si = 1; /* entry 0 is main skb, mapped above */ + frag = &shinfo->frags[0]; + while (nr_frags--) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(iq->dev, frag, 0, + len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_sg_err; + + sglist[si >> 2].len[3 - (si & 3)] = len; + sglist[si >> 2].dma_ptr[si & 3] = dma; + + frag++; + si++; + } + hw_desc->dptr = tx_buffer->sglist_dma; + } + if (oct->fw_info.tx_ol_flags) { + if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) { + hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM; + hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO; + hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size; + hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs; + } else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM; + } + /* due to ESR txm will be swapped by hw */ + hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]); + } + + xmit_more = netdev_xmit_more(); + + netdev_tx_sent_queue(iq->netdev_q, skb->len); + + skb_tx_timestamp(skb); + iq->fill_cnt++; + wi++; + iq->host_write_index = wi & iq->ring_size_mask; + + /* octep_iq_full_check stops the queue and returns + * true if so, in case the queue has become full + * by inserting current packet. If so, we can + * go ahead and ring doorbell. + */ + if (!octep_vf_iq_full_check(iq) && xmit_more && + iq->fill_cnt < iq->fill_threshold) + return NETDEV_TX_OK; + + goto ring_dbell; + +dma_map_sg_err: + if (si > 0) { + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], + sglist[0].len[0], DMA_TO_DEVICE); + sglist[0].len[0] = 0; + } + while (si > 1) { + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], + sglist[si >> 2].len[si & 3], DMA_TO_DEVICE); + sglist[si >> 2].len[si & 3] = 0; + si--; + } + tx_buffer->gather = 0; +dma_map_err: + dev_kfree_skb_any(skb); +ring_dbell: + /* Flush the hw descriptors before writing to doorbell */ + smp_wmb(); + writel(iq->fill_cnt, iq->doorbell_reg); + iq->stats.instr_posted += iq->fill_cnt; + iq->fill_cnt = 0; + return NETDEV_TX_OK; +} + +int octep_vf_get_if_stats(struct octep_vf_device *oct) +{ + struct octep_vf_iface_rxtx_stats vf_stats; + int ret, size; + + memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats)); + ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS, + (u8 *)&vf_stats, &size); + + if (ret) + return ret; + + memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats, + sizeof(struct octep_vf_iface_rx_stats)); + memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats, + sizeof(struct octep_vf_iface_tx_stats)); + + return 0; +} + +int octep_vf_get_link_info(struct octep_vf_device *oct) +{ + int ret, size; + + ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO, + (u8 *)&oct->link_info, &size); + if (ret) { + dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n"); + return ret; + } + return 0; +} + +/** + * octep_vf_get_stats64() - Get Octeon network device statistics. + * + * @netdev: kernel network device. + * @stats: pointer to stats structure to be filled in. + */ +static void octep_vf_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + int q; + + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + for (q = 0; q < oct->num_oqs; q++) { + struct octep_vf_iq *iq = oct->iq[q]; + struct octep_vf_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + } + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->rx_bytes = rx_bytes; + if (!octep_vf_get_if_stats(oct)) { + stats->multicast = oct->iface_rx_stats.mcast_pkts; + stats->rx_errors = oct->iface_rx_stats.err_pkts; + stats->rx_dropped = oct->iface_rx_stats.dropped_pkts_fifo_full + + oct->iface_rx_stats.err_pkts; + stats->rx_missed_errors = oct->iface_rx_stats.dropped_pkts_fifo_full; + stats->tx_dropped = oct->iface_tx_stats.dropped; + } +} + +/** + * octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout. + * + * @work: pointer to Tx queue timeout work_struct + * + * Stop and start the device so that it frees up all queue resources + * and restarts the queues, that potentially clears a Tx queue timeout + * condition. + **/ +static void octep_vf_tx_timeout_task(struct work_struct *work) +{ + struct octep_vf_device *oct = container_of(work, struct octep_vf_device, + tx_timeout_task); + struct net_device *netdev = oct->netdev; + + rtnl_lock(); + if (netif_running(netdev)) { + octep_vf_stop(netdev); + octep_vf_open(netdev); + } + rtnl_unlock(); + netdev_put(netdev, NULL); +} + +/** + * octep_vf_tx_timeout() - Handle Tx Queue timeout. + * + * @netdev: pointer to kernel network device. + * @txqueue: Timed out Tx queue number. + * + * Schedule a work to handle Tx queue timeout. + */ +static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + + netdev_hold(netdev, NULL, GFP_ATOMIC); + schedule_work(&oct->tx_timeout_task); +} + +static int octep_vf_set_mac(struct net_device *netdev, void *p) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + struct sockaddr *addr = (struct sockaddr *)p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data); + if (err) + return err; + + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); + + return 0; +} + +static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + struct octep_vf_iface_link_info *link_info; + int err; + + link_info = &oct->link_info; + if (link_info->mtu == new_mtu) + return 0; + + err = octep_vf_mbox_set_mtu(oct, new_mtu); + if (!err) { + oct->link_info.mtu = new_mtu; + netdev->mtu = new_mtu; + } + return err; +} + +static int octep_vf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct octep_vf_device *oct = netdev_priv(netdev); + u16 rx_offloads = 0, tx_offloads = 0; + int err; + + /* We only support features received from firmware */ + if ((features & netdev->hw_features) != features) + return -EINVAL; + + if (features & NETIF_F_TSO) + tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO; + + if (features & NETIF_F_TSO6) + tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO; + + if (features & NETIF_F_IP_CSUM) + tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM; + + if (features & NETIF_F_IPV6_CSUM) + tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM; + + if (features & NETIF_F_RXCSUM) + rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM; + + err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads); + if (!err) + netdev->features = features; + + return err; +} + +static const struct net_device_ops octep_vf_netdev_ops = { + .ndo_open = octep_vf_open, + .ndo_stop = octep_vf_stop, + .ndo_start_xmit = octep_vf_start_xmit, + .ndo_get_stats64 = octep_vf_get_stats64, + .ndo_tx_timeout = octep_vf_tx_timeout, + .ndo_set_mac_address = octep_vf_set_mac, + .ndo_change_mtu = octep_vf_change_mtu, + .ndo_set_features = octep_vf_set_features, +}; + +static const char *octep_vf_devid_to_str(struct octep_vf_device *oct) +{ + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_VF: + return "CN93XX"; + case OCTEP_PCI_DEVICE_ID_CNF95N_VF: + return "CNF95N"; + case OCTEP_PCI_DEVICE_ID_CN10KA_VF: + return "CN10KA"; + case OCTEP_PCI_DEVICE_ID_CNF10KA_VF: + return "CNF10KA"; + case OCTEP_PCI_DEVICE_ID_CNF10KB_VF: + return "CNF10KB"; + case OCTEP_PCI_DEVICE_ID_CN10KB_VF: + return "CN10KB"; + default: + return "Unsupported"; + } +} + +/** + * octep_vf_device_setup() - Setup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Setup Octeon device hardware operations, configuration, etc ... + */ +int octep_vf_device_setup(struct octep_vf_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + + /* allocate memory for oct->conf */ + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); + if (!oct->conf) + return -ENOMEM; + + /* Map BAR region 0 */ + oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0), + pci_resource_len(oct->pdev, 0)); + if (!oct->mmio.hw_addr) { + dev_err(&pdev->dev, + "Failed to remap BAR0; start=0x%llx len=0x%llx\n", + pci_resource_start(oct->pdev, 0), + pci_resource_len(oct->pdev, 0)); + goto ioremap_err; + } + oct->mmio.mapped = 1; + + oct->chip_id = pdev->device; + oct->rev_id = pdev->revision; + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); + + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_VF: + case OCTEP_PCI_DEVICE_ID_CNF95N_VF: + case OCTEP_PCI_DEVICE_ID_CN98_VF: + dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n", + octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct), + OCTEP_VF_MINOR_REV(oct)); + octep_vf_device_setup_cn93(oct); + break; + case OCTEP_PCI_DEVICE_ID_CNF10KA_VF: + case OCTEP_PCI_DEVICE_ID_CN10KA_VF: + case OCTEP_PCI_DEVICE_ID_CNF10KB_VF: + case OCTEP_PCI_DEVICE_ID_CN10KB_VF: + dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n", + octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct), + OCTEP_VF_MINOR_REV(oct)); + octep_vf_device_setup_cnxk(oct); + break; + default: + dev_err(&pdev->dev, "Unsupported device\n"); + goto unsupported_dev; + } + + return 0; + +unsupported_dev: + iounmap(oct->mmio.hw_addr); +ioremap_err: + kfree(oct->conf); + return -EOPNOTSUPP; +} + +/** + * octep_vf_device_cleanup() - Cleanup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Cleanup Octeon device allocated resources. + */ +static void octep_vf_device_cleanup(struct octep_vf_device *oct) +{ + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); + + if (oct->mmio.mapped) + iounmap(oct->mmio.hw_addr); + + kfree(oct->conf); + oct->conf = NULL; +} + +static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr) +{ + return octep_vf_mbox_get_mac_addr(oct, addr); +} + +/** + * octep_vf_probe() - Octeon PCI device probe handler. + * + * @pdev: PCI device structure. + * @ent: entry in Octeon PCI device ID table. + * + * Initializes and enables the Octeon PCI device for network operations. + * Initializes Octeon private data structure and registers a network device. + */ +static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct octep_vf_device *octep_vf_dev; + struct net_device *netdev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); + goto disable_pci_device; + } + + err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); + goto disable_pci_device; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device), + OCTEP_VF_MAX_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto mem_regions_release; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + octep_vf_dev = netdev_priv(netdev); + octep_vf_dev->netdev = netdev; + octep_vf_dev->pdev = pdev; + octep_vf_dev->dev = &pdev->dev; + pci_set_drvdata(pdev, octep_vf_dev); + + err = octep_vf_device_setup(octep_vf_dev); + if (err) { + dev_err(&pdev->dev, "Device setup failed\n"); + goto netdevice_free; + } + INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task); + + netdev->netdev_ops = &octep_vf_netdev_ops; + octep_vf_set_ethtool_ops(netdev); + netif_carrier_off(netdev); + + if (octep_vf_setup_mbox(octep_vf_dev)) { + dev_err(&pdev->dev, "VF Mailbox setup failed\n"); + err = -ENOMEM; + goto device_cleanup; + } + + if (octep_vf_mbox_version_check(octep_vf_dev)) { + dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n"); + err = -EINVAL; + goto delete_mbox; + } + + if (octep_vf_mbox_get_fw_info(octep_vf_dev)) { + dev_err(&pdev->dev, "unable to get fw info\n"); + err = -EINVAL; + goto delete_mbox; + } + + netdev->hw_features = NETIF_F_SG; + if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags)) + netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + + if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags)) + netdev->hw_features |= NETIF_F_RXCSUM; + + netdev->min_mtu = OCTEP_VF_MIN_MTU; + netdev->max_mtu = OCTEP_VF_MAX_MTU; + netdev->mtu = OCTEP_VF_DEFAULT_MTU; + + if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) { + netdev->hw_features |= NETIF_F_TSO; + netif_set_tso_max_size(netdev, netdev->max_mtu); + } + + netdev->features |= netdev->hw_features; + octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr); + eth_hw_addr_set(netdev, octep_vf_dev->mac_addr); + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto delete_mbox; + } + dev_info(&pdev->dev, "Device probe successful\n"); + return 0; + +delete_mbox: + octep_vf_delete_mbox(octep_vf_dev); +device_cleanup: + octep_vf_device_cleanup(octep_vf_dev); +netdevice_free: + free_netdev(netdev); +mem_regions_release: + pci_release_mem_regions(pdev); +disable_pci_device: + pci_disable_device(pdev); + dev_err(&pdev->dev, "Device probe failed\n"); + return err; +} + +/** + * octep_vf_remove() - Remove Octeon PCI device from driver control. + * + * @pdev: PCI device structure of the Octeon device. + * + * Cleanup all resources allocated for the Octeon device. + * Unregister from network device and disable the PCI device. + */ +static void octep_vf_remove(struct pci_dev *pdev) +{ + struct octep_vf_device *oct = pci_get_drvdata(pdev); + struct net_device *netdev; + + if (!oct) + return; + + octep_vf_mbox_dev_remove(oct); + cancel_work_sync(&oct->tx_timeout_task); + netdev = oct->netdev; + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + octep_vf_delete_mbox(oct); + octep_vf_device_cleanup(oct); + pci_release_mem_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver octep_vf_driver = { + .name = OCTEP_VF_DRV_NAME, + .id_table = octep_vf_pci_id_tbl, + .probe = octep_vf_probe, + .remove = octep_vf_remove, +}; + +/** + * octep_vf_init_module() - Module initialization. + * + * create common resource for the driver and register PCI driver. + */ +static int __init octep_vf_init_module(void) +{ + int ret; + + pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING); + + ret = pci_register_driver(&octep_vf_driver); + if (ret < 0) { + pr_err("%s: Failed to register PCI driver; err=%d\n", + OCTEP_VF_DRV_NAME, ret); + return ret; + } + + return ret; +} + +/** + * octep_vf_exit_module() - Module exit routine. + * + * unregister the driver with PCI subsystem and cleanup common resources. + */ +static void __exit octep_vf_exit_module(void) +{ + pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME); + + pci_unregister_driver(&octep_vf_driver); + + pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME); +} + +module_init(octep_vf_init_module); +module_exit(octep_vf_exit_module); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h new file mode 100644 index 000000000000..5769f62545cd --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.h @@ -0,0 +1,334 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_VF_MAIN_H_ +#define _OCTEP_VF_MAIN_H_ + +#include "octep_vf_tx.h" +#include "octep_vf_rx.h" +#include "octep_vf_mbox.h" + +#define OCTEP_VF_DRV_NAME "octeon_ep_vf" +#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver" + +#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF +#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF +#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103 +#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903 +#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03 +#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03 +#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03 + +#define OCTEP_VF_MAX_QUEUES 63 +#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES +#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES + +#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ + +#define OCTEP_VF_IQ_INTR_RESEND_BIT 59 +#define OCTEP_VF_OQ_INTR_RESEND_BIT 59 + +#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \ + ((iq__)->host_write_index - (iq__)->flush_index) & \ + (iq__)->ring_size_mask; \ + }) +#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \ + (iq_)->max_count - IQ_INSTR_PENDING(iq_); \ + }) + +/* PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octep_vf_mmio { + /* The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /* Flag indicating the mapping was successful. */ + int mapped; +}; + +struct octep_vf_hw_ops { + void (*setup_iq_regs)(struct octep_vf_device *oct, int q); + void (*setup_oq_regs)(struct octep_vf_device *oct, int q); + void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox); + + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); + void (*reinit_regs)(struct octep_vf_device *oct); + u32 (*update_iq_read_idx)(struct octep_vf_iq *iq); + + void (*enable_interrupts)(struct octep_vf_device *oct); + void (*disable_interrupts)(struct octep_vf_device *oct); + + void (*enable_io_queues)(struct octep_vf_device *oct); + void (*disable_io_queues)(struct octep_vf_device *oct); + void (*enable_iq)(struct octep_vf_device *oct, int q); + void (*disable_iq)(struct octep_vf_device *oct, int q); + void (*enable_oq)(struct octep_vf_device *oct, int q); + void (*disable_oq)(struct octep_vf_device *oct, int q); + void (*reset_io_queues)(struct octep_vf_device *oct); + void (*dump_registers)(struct octep_vf_device *oct); +}; + +/* Octeon mailbox data */ +struct octep_vf_mbox_data { + /* Holds the offset of received data via mailbox. */ + u32 data_index; + + /* Holds the received data via mailbox. */ + u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE]; +}; + +/* wrappers around work structs */ +struct octep_vf_mbox_wk { + struct work_struct work; + void *ctxptr; +}; + +/* Octeon device mailbox */ +struct octep_vf_mbox { + /* A mutex to protect access to this q_mbox. */ + struct mutex lock; + + u32 state; + + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + u8 __iomem *mbox_int_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + u8 __iomem *mbox_write_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + u8 __iomem *mbox_read_reg; + + /* Octeon mailbox data */ + struct octep_vf_mbox_data mbox_data; + + /* Octeon mailbox work handler to process Mbox messages */ + struct octep_vf_mbox_wk wk; +}; + +/* Tx/Rx queue vector per interrupt. */ +struct octep_vf_ioq_vector { + char name[OCTEP_VF_MSIX_NAME_SIZE]; + struct napi_struct napi; + struct octep_vf_device *octep_vf_dev; + struct octep_vf_iq *iq; + struct octep_vf_oq *oq; + cpumask_t affinity_mask; +}; + +/* Octeon hardware/firmware offload capability flags. */ +#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0) +#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1) +#define OCTEP_VF_CAP_TSO BIT(2) + +/* Link modes */ +enum octep_vf_link_mode_bit_indices { + OCTEP_VF_LINK_MODE_10GBASE_T = 0, + OCTEP_VF_LINK_MODE_10GBASE_R, + OCTEP_VF_LINK_MODE_10GBASE_CR, + OCTEP_VF_LINK_MODE_10GBASE_KR, + OCTEP_VF_LINK_MODE_10GBASE_LR, + OCTEP_VF_LINK_MODE_10GBASE_SR, + OCTEP_VF_LINK_MODE_25GBASE_CR, + OCTEP_VF_LINK_MODE_25GBASE_KR, + OCTEP_VF_LINK_MODE_25GBASE_SR, + OCTEP_VF_LINK_MODE_40GBASE_CR4, + OCTEP_VF_LINK_MODE_40GBASE_KR4, + OCTEP_VF_LINK_MODE_40GBASE_LR4, + OCTEP_VF_LINK_MODE_40GBASE_SR4, + OCTEP_VF_LINK_MODE_50GBASE_CR2, + OCTEP_VF_LINK_MODE_50GBASE_KR2, + OCTEP_VF_LINK_MODE_50GBASE_SR2, + OCTEP_VF_LINK_MODE_50GBASE_CR, + OCTEP_VF_LINK_MODE_50GBASE_KR, + OCTEP_VF_LINK_MODE_50GBASE_LR, + OCTEP_VF_LINK_MODE_50GBASE_SR, + OCTEP_VF_LINK_MODE_100GBASE_CR4, + OCTEP_VF_LINK_MODE_100GBASE_KR4, + OCTEP_VF_LINK_MODE_100GBASE_LR4, + OCTEP_VF_LINK_MODE_100GBASE_SR4, + OCTEP_VF_LINK_MODE_NBITS +}; + +/* Hardware interface link state information. */ +struct octep_vf_iface_link_info { + /* Bitmap of Supported link speeds/modes. */ + u64 supported_modes; + + /* Bitmap of Advertised link speeds/modes. */ + u64 advertised_modes; + + /* Negotiated link speed in Mbps. */ + u32 speed; + + /* MTU */ + u16 mtu; + + /* Autonegotiation state. */ +#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0) +#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1) + u8 autoneg; + + /* Pause frames setting. */ +#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0) +#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1) + u8 pause; + + /* Admin state of the link (ifconfig <iface> up/down */ + u8 admin_up; + + /* Operational state of the link: physical link is up down */ + u8 oper_up; +}; + +/* Hardware interface stats information. */ +struct octep_vf_iface_rxtx_stats { + /* Hardware Interface Rx statistics */ + struct octep_vf_iface_rx_stats iface_rx_stats; + + /* Hardware Interface Tx statistics */ + struct octep_vf_iface_tx_stats iface_tx_stats; +}; + +struct octep_vf_fw_info { + /* pkind value to be used in every Tx hardware descriptor */ + u8 pkind; + /* front size data */ + u8 fsz; + /* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */ + u16 rx_ol_flags; + /* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */ + u16 tx_ol_flags; +}; + +/* The Octeon device specific private data structure. + * Each Octeon device has this structure to represent all its components. + */ +struct octep_vf_device { + struct octep_vf_config *conf; + + /* Octeon Chip type. */ + u16 chip_id; + u16 rev_id; + + /* Device capabilities enabled */ + u64 caps_enabled; + /* Device capabilities supported */ + u64 caps_supported; + + /* Pointer to basic Linux device */ + struct device *dev; + /* Linux PCI device pointer */ + struct pci_dev *pdev; + /* Netdev corresponding to the Octeon device */ + struct net_device *netdev; + + /* memory mapped io range */ + struct octep_vf_mmio mmio; + + /* MAC address */ + u8 mac_addr[ETH_ALEN]; + + /* Tx queues (IQ: Instruction Queue) */ + u16 num_iqs; + /* Pointers to Octeon Tx queues */ + struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ]; + + /* Rx queues (OQ: Output Queue) */ + u16 num_oqs; + /* Pointers to Octeon Rx queues */ + struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ]; + + /* Hardware port number of the PCIe interface */ + u16 pcie_port; + + /* Hardware operations */ + struct octep_vf_hw_ops hw_ops; + + /* IRQ info */ + u16 num_irqs; + u16 num_non_ioq_irqs; + char *non_ioq_irq_names; + struct msix_entry *msix_entries; + /* IOq information of it's corresponding MSI-X interrupt. */ + struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES]; + + /* Hardware Interface Tx statistics */ + struct octep_vf_iface_tx_stats iface_tx_stats; + /* Hardware Interface Rx statistics */ + struct octep_vf_iface_rx_stats iface_rx_stats; + + /* Hardware Interface Link info like supported modes, aneg support */ + struct octep_vf_iface_link_info link_info; + + /* Mailbox to talk to VFs */ + struct octep_vf_mbox *mbox; + + /* Work entry to handle Tx timeout */ + struct work_struct tx_timeout_task; + + /* offset for iface stats */ + u32 ctrl_mbox_ifstats_offset; + + /* Negotiated Mbox version */ + u32 mbox_neg_ver; + + /* firmware info */ + struct octep_vf_fw_info fw_info; +}; + +static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct) +{ + u16 rev = (oct->rev_id & 0xC) >> 2; + + return (rev == 0) ? 1 : rev; +} + +static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct) +{ + return (oct->rev_id & 0x3); +} + +/* Octeon CSR read/write access APIs */ +#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \ + writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off)) + +#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \ + writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off)) + +#define octep_vf_read_csr(octep_vf_dev, reg_off) \ + readl((octep_vf_dev)->mmio.hw_addr + (reg_off)) + +#define octep_vf_read_csr64(octep_vf_dev, reg_off) \ + readq((octep_vf_dev)->mmio.hw_addr + (reg_off)) + +extern struct workqueue_struct *octep_vf_wq; + +int octep_vf_device_setup(struct octep_vf_device *oct); +int octep_vf_setup_iqs(struct octep_vf_device *oct); +void octep_vf_free_iqs(struct octep_vf_device *oct); +void octep_vf_clean_iqs(struct octep_vf_device *oct); +int octep_vf_setup_oqs(struct octep_vf_device *oct); +void octep_vf_free_oqs(struct octep_vf_device *oct); +void octep_vf_oq_dbell_init(struct octep_vf_device *oct); +void octep_vf_device_setup_cn93(struct octep_vf_device *oct); +void octep_vf_device_setup_cnxk(struct octep_vf_device *oct); +int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget); +int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget); +void octep_vf_set_ethtool_ops(struct net_device *netdev); +int octep_vf_get_link_info(struct octep_vf_device *oct); +int octep_vf_get_if_stats(struct octep_vf_device *oct); +void octep_vf_mbox_work(struct work_struct *work); +#endif /* _OCTEP_VF_MAIN_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c new file mode 100644 index 000000000000..2eab21e43048 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "octep_vf_config.h" +#include "octep_vf_main.h" + +/* When a new command is implemented, the below table should be updated + * with new command and it's version info. + */ +static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = { + [0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1, + [OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] = + OCTEP_PFVF_MBOX_VERSION_V2 +}; + +int octep_vf_setup_mbox(struct octep_vf_device *oct) +{ + int ring = 0; + + oct->mbox = vzalloc(sizeof(*oct->mbox)); + if (!oct->mbox) + return -1; + + mutex_init(&oct->mbox->lock); + + oct->hw_ops.setup_mbox_regs(oct, ring); + INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work); + oct->mbox->wk.ctxptr = oct; + oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT; + dev_info(&oct->pdev->dev, "setup vf mbox successfully\n"); + return 0; +} + +void octep_vf_delete_mbox(struct octep_vf_device *oct) +{ + if (oct->mbox) { + if (work_pending(&oct->mbox->wk.work)) + cancel_work_sync(&oct->mbox->wk.work); + + mutex_destroy(&oct->mbox->lock); + vfree(oct->mbox); + oct->mbox = NULL; + dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n"); + } +} + +int octep_vf_mbox_version_check(struct octep_vf_device *oct) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION; + cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) { + dev_err(&oct->pdev->dev, + "VF Mbox version is incompatible with PF\n"); + return -EINVAL; + } + oct->mbox_neg_ver = (u32)rsp.s_version.version; + dev_dbg(&oct->pdev->dev, + "VF Mbox version:%u Negotiated VF version with PF:%u\n", + (u32)cmd.s_version.version, + (u32)rsp.s_version.version); + return 0; +} + +void octep_vf_mbox_work(struct work_struct *work) +{ + struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work); + struct octep_vf_iface_link_info *link_info; + struct octep_vf_device *oct = NULL; + struct octep_vf_mbox *mbox = NULL; + union octep_pfvf_mbox_word *notif; + u64 pf_vf_data; + + oct = (struct octep_vf_device *)wk->ctxptr; + link_info = &oct->link_info; + mbox = oct->mbox; + pf_vf_data = readq(mbox->mbox_read_reg); + + notif = (union octep_pfvf_mbox_word *)&pf_vf_data; + + switch (notif->s.opcode) { + case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS: + if (notif->s_link_status.status) { + link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP; + netif_carrier_on(oct->netdev); + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); + } else { + link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN; + netif_carrier_off(oct->netdev); + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); + } + break; + default: + dev_err(&oct->pdev->dev, + "Received unsupported notif %d\n", notif->s.opcode); + break; + } +} + +static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct, + union octep_pfvf_mbox_word cmd, + union octep_pfvf_mbox_word *rsp) +{ + struct octep_vf_mbox *mbox = oct->mbox; + u64 reg_val = 0ull; + int count; + + if (!mbox) + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; + + cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD; + writeq(cmd.u64, mbox->mbox_write_reg); + + /* No response for notification messages */ + if (!rsp) + return 0; + + for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) { + usleep_range(1000, 1500); + reg_val = readq(mbox->mbox_write_reg); + if (reg_val != cmd.u64) { + rsp->u64 = reg_val; + break; + } + } + if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) { + dev_err(&oct->pdev->dev, "mbox send command timed out\n"); + return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT; + } + if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n"); + return OCTEP_PFVF_MBOX_CMD_STATUS_NACK; + } + rsp->u64 = reg_val; + return 0; +} + +int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd, + union octep_pfvf_mbox_word *rsp) +{ + struct octep_vf_mbox *mbox = oct->mbox; + int ret; + + if (!mbox) + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; + mutex_lock(&mbox->lock); + if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) { + dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n", + cmd.s.opcode, oct->mbox_neg_ver); + mutex_unlock(&mbox->lock); + return -EOPNOTSUPP; + } + ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp); + mutex_unlock(&mbox->lock); + return ret; +} + +int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode, + u8 *data, int *size) +{ + struct octep_vf_mbox *mbox = oct->mbox; + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int data_len = 0, tmp_len = 0; + int read_cnt, i = 0, ret; + + if (!mbox) + return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP; + + mutex_lock(&mbox->lock); + cmd.u64 = 0; + cmd.s_data.opcode = opcode; + cmd.s_data.frag = 0; + /* Send cmd to read data from PF */ + ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n"); + mutex_unlock(&mbox->lock); + return ret; + } + /* PF sends the data length of requested CMD + * in ACK + */ + data_len = *((int32_t *)rsp.s_data.data); + tmp_len = data_len; + cmd.u64 = 0; + rsp.u64 = 0; + cmd.s_data.opcode = opcode; + cmd.s_data.frag = 1; + while (data_len) { + ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n"); + mutex_unlock(&mbox->lock); + mbox->mbox_data.data_index = 0; + memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE); + return ret; + } + if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) { + data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE; + read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE; + } else { + read_cnt = data_len; + data_len = 0; + } + for (i = 0; i < read_cnt; i++) { + mbox->mbox_data.recv_data[mbox->mbox_data.data_index] = + rsp.s_data.data[i]; + mbox->mbox_data.data_index++; + } + cmd.u64 = 0; + rsp.u64 = 0; + cmd.s_data.opcode = opcode; + cmd.s_data.frag = 1; + } + memcpy(data, mbox->mbox_data.recv_data, tmp_len); + *size = tmp_len; + mbox->mbox_data.data_index = 0; + memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE); + mutex_unlock(&mbox->lock); + return 0; +} + +int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu) +{ + int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN; + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret = 0; + + if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) { + dev_err(&oct->pdev->dev, + "Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n", + mtu, ETH_MIN_MTU, ETH_MAX_MTU); + return -EINVAL; + } + + cmd.u64 = 0; + cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU; + cmd.s_set_mtu.mtu = mtu; + + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret); + return ret; + } + if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu); + return -EINVAL; + } + + return 0; +} + +int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int i, ret; + + cmd.u64 = 0; + cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR; + for (i = 0; i < ETH_ALEN; i++) + cmd.s_set_mac.mac_addr[i] = mac_addr[i]; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret); + return ret; + } + if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "received NACK\n"); + return -EINVAL; + } + return 0; +} + +int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int i, ret; + + cmd.u64 = 0; + cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret); + return ret; + } + if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "get_mac: received NACK\n"); + return -EINVAL; + } + for (i = 0; i < ETH_ALEN; i++) + mac_addr[i] = rsp.s_set_mac.mac_addr[i]; + return 0; +} + +int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE; + cmd.s_link_state.state = state; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n"); + return ret; + } + if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Set Rx state received NACK\n"); + return -EINVAL; + } + return 0; +} + +int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS; + cmd.s_link_status.status = status; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n"); + return ret; + } + if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Set link status received NACK\n"); + return -EINVAL; + } + return 0; +} + +int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n"); + return ret; + } + if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Get link status received NACK\n"); + return -EINVAL; + } + *oper_up = rsp.s_link_status.status; + return 0; +} + +int octep_vf_mbox_dev_remove(struct octep_vf_device *oct) +{ + union octep_pfvf_mbox_word cmd; + int ret; + + cmd.u64 = 0; + cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE; + ret = octep_vf_mbox_send_cmd(oct, cmd, NULL); + return ret; +} + +int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n"); + return ret; + } + if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Get link status received NACK\n"); + return -EINVAL; + } + oct->fw_info.pkind = rsp.s_fw_info.pkind; + oct->fw_info.fsz = rsp.s_fw_info.fsz; + oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags; + oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags; + + return 0; +} + +int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, + u16 rx_offloads) +{ + union octep_pfvf_mbox_word cmd; + union octep_pfvf_mbox_word rsp; + int ret; + + cmd.u64 = 0; + cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS; + cmd.s_offloads.rx_ol_flags = rx_offloads; + cmd.s_offloads.tx_ol_flags = tx_offloads; + ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp); + if (ret) { + dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n"); + return ret; + } + if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) { + dev_err(&oct->pdev->dev, "Set offloads received NACK\n"); + return -EINVAL; + } + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h new file mode 100644 index 000000000000..9b5efad37eab --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef _OCTEP_VF_MBOX_H_ +#define _OCTEP_VF_MBOX_H_ + +/* When a new command is implemented, VF Mbox version should be bumped. + */ +enum octep_pfvf_mbox_version { + OCTEP_PFVF_MBOX_VERSION_V0, + OCTEP_PFVF_MBOX_VERSION_V1, + OCTEP_PFVF_MBOX_VERSION_V2 +}; + +#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2 + +enum octep_pfvf_mbox_opcode { + OCTEP_PFVF_MBOX_CMD_VERSION, + OCTEP_PFVF_MBOX_CMD_SET_MTU, + OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR, + OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR, + OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO, + OCTEP_PFVF_MBOX_CMD_GET_STATS, + OCTEP_PFVF_MBOX_CMD_SET_RX_STATE, + OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS, + OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS, + OCTEP_PFVF_MBOX_CMD_GET_MTU, + OCTEP_PFVF_MBOX_CMD_DEV_REMOVE, + OCTEP_PFVF_MBOX_CMD_GET_FW_INFO, + OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS, + OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS, + OCTEP_PFVF_MBOX_CMD_MAX, +}; + +enum octep_pfvf_mbox_word_type { + OCTEP_PFVF_MBOX_TYPE_CMD, + OCTEP_PFVF_MBOX_TYPE_RSP_ACK, + OCTEP_PFVF_MBOX_TYPE_RSP_NACK, +}; + +enum octep_pfvf_mbox_cmd_status { + OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1, + OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2, + OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3, + OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4, + OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5 +}; + +enum octep_pfvf_link_status { + OCTEP_PFVF_LINK_STATUS_DOWN, + OCTEP_PFVF_LINK_STATUS_UP, +}; + +enum octep_pfvf_link_speed { + OCTEP_PFVF_LINK_SPEED_NONE, + OCTEP_PFVF_LINK_SPEED_1000, + OCTEP_PFVF_LINK_SPEED_10000, + OCTEP_PFVF_LINK_SPEED_25000, + OCTEP_PFVF_LINK_SPEED_40000, + OCTEP_PFVF_LINK_SPEED_50000, + OCTEP_PFVF_LINK_SPEED_100000, + OCTEP_PFVF_LINK_SPEED_LAST, +}; + +enum octep_pfvf_link_duplex { + OCTEP_PFVF_LINK_HALF_DUPLEX, + OCTEP_PFVF_LINK_FULL_DUPLEX, +}; + +enum octep_pfvf_link_autoneg { + OCTEP_PFVF_LINK_AUTONEG, + OCTEP_PFVF_LINK_FIXED, +}; + +#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000 +#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000 +#define OCTEP_PFVF_MBOX_MAX_RETRIES 2 +#define OCTEP_PFVF_MBOX_VERSION 0 +#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6 +#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320 +#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1 + +union octep_pfvf_mbox_word { + u64 u64; + struct { + u64 opcode:8; + u64 type:2; + u64 rsvd:6; + u64 data:48; + } s; + struct { + u64 opcode:8; + u64 type:2; + u64 frag:1; + u64 rsvd:5; + u8 data[6]; + } s_data; + struct { + u64 opcode:8; + u64 type:2; + u64 rsvd:6; + u64 version:48; + } s_version; + struct { + u64 opcode:8; + u64 type:2; + u64 rsvd:6; + u8 mac_addr[6]; + } s_set_mac; + struct { + u64 opcode:8; + u64 type:2; + u64 rsvd:6; + u64 mtu:48; + } s_set_mtu; + struct { + u64 opcode:8; + u64 type:2; + u64 state:1; + u64 rsvd:53; + } s_link_state; + struct { + u64 opcode:8; + u64 type:2; + u64 status:1; + u64 rsvd:53; + } s_link_status; + struct { + u64 opcode:8; + u64 type:2; + u64 pkind:8; + u64 fsz:8; + u64 rx_ol_flags:16; + u64 tx_ol_flags:16; + u64 rsvd:6; + } s_fw_info; + struct { + u64 opcode:8; + u64 type:2; + u64 rsvd:22; + u64 rx_ol_flags:16; + u64 tx_ol_flags:16; + } s_offloads; +} __packed; + +int octep_vf_setup_mbox(struct octep_vf_device *oct); +void octep_vf_delete_mbox(struct octep_vf_device *oct); +int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd, + union octep_pfvf_mbox_word *rsp); +int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode, + u8 *data, int *size); +int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu); +int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr); +int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr); +int octep_vf_mbox_version_check(struct octep_vf_device *oct); +int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state); +int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status); +int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up); +int octep_vf_mbox_dev_remove(struct octep_vf_device *oct); +int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct); +int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads); + +#endif diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h new file mode 100644 index 000000000000..25e2a876ebba --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cn9k.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef _OCTEP_VF_REGS_CN9K_H_ +#define _OCTEP_VF_REGS_CN9K_H_ + +/*############################ RST #########################*/ +#define CN93_VF_CONFIG_XPANSION_BAR 0x38 +#define CN93_VF_CONFIG_PCIE_CAP 0x70 +#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74 +#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78 +#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C +#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80 +#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84 +#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN93_VF_RING_OFFSET BIT_ULL(17) + +/*###################### RING IN REGISTERS #########################*/ +#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000 +#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010 +#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020 +#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030 +#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040 +#define CN93_VF_SDP_R_IN_CNTS_START 0x10050 +#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060 +#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080 +#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090 + +#define CN93_VF_SDP_R_IN_CONTROL(ring) \ + (CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_ENABLE(ring) \ + (CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \ + (CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \ + (CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \ + (CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_CNTS(ring) \ + (CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \ + (CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \ + (CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \ + (CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) + +/*------------------ R_IN Masks ----------------*/ + +/** Rings per Virtual Function **/ +#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF) +#define CN93_VF_R_IN_CTL_RPVF_POS (48) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + **/ +#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28) +#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25) +#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24) +#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8) +#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6) +#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5) +#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3) +#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1) +#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0) + +#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B) + +/*###################### RING OUT REGISTERS #########################*/ +#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100 +#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110 +#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120 +#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130 +#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140 +#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150 +#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160 +#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180 +#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190 + +#define CN93_VF_SDP_R_OUT_CONTROL(ring) \ + (CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_ENABLE(ring) \ + (CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \ + (CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \ + (CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \ + (CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_CNTS(ring) \ + (CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \ + (CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \ + (CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \ + (CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET)) + +/*------------------ R_OUT Masks ----------------*/ +#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) +#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32) + +#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40) +#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34) +#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33) +#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32) +#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30) +#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29) +#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28) +#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26) +#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25) +#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24) +#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23) + +/* ##################### Mail Box Registers ########################## */ +/* SDP PF to VF Mailbox Data Register */ +#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210 +/* SDP Packet PF to VF Mailbox Interrupt Register */ +#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220 +/* SDP VF to PF Mailbox Data Register */ +#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230 + +#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1) +#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0) + +#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \ + (CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \ + (CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET)) + +#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \ + (CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET)) +#endif /* _OCTEP_VF_REGS_CN9K_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h new file mode 100644 index 000000000000..2e156745ef64 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_regs_cnxk.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef _OCTEP_VF_REGS_CNXK_H_ +#define _OCTEP_VF_REGS_CNXK_H_ + +/*############################ RST #########################*/ +#define CNXK_VF_CONFIG_XPANSION_BAR 0x38 +#define CNXK_VF_CONFIG_PCIE_CAP 0x70 +#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74 +#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78 +#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C +#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80 +#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84 +#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88 + +#define CNXK_VF_RING_OFFSET (0x1ULL << 17) + +/*###################### RING IN REGISTERS #########################*/ +#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000 +#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010 +#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020 +#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030 +#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040 +#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050 +#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060 +#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080 +#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090 +#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400 + +#define CNXK_VF_SDP_R_ERR_TYPE(ring) \ + (CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_CONTROL(ring) \ + (CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_ENABLE(ring) \ + (CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \ + (CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \ + (CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \ + (CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_CNTS(ring) \ + (CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \ + (CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \ + (CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \ + (CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) + +/*------------------ R_IN Masks ----------------*/ + +/** Rings per Virtual Function **/ +#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF) +#define CNXK_VF_R_IN_CTL_RPVF_POS (48) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + **/ +#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28) +#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25) +#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24) +#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8) +#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6) +#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5) +#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3) +#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1) +#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0) + +#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B) + +/*###################### RING OUT REGISTERS #########################*/ +#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100 +#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110 +#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120 +#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130 +#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140 +#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150 +#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160 +#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170 +#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180 +#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190 + +#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \ + (CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \ + (CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \ + (CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \ + (CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \ + (CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_WMARK(ring) \ + (CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_CNTS(ring) \ + (CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \ + (CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \ + (CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \ + (CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET)) + +/*------------------ R_OUT Masks ----------------*/ +#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) +#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32) + +#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40) +#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34) +#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33) +#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32) +#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30) +#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29) +#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28) +#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26) +#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25) +#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24) +#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23) + +/* ##################### Mail Box Registers ########################## */ +/* SDP PF to VF Mailbox Data Register */ +#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210 +/* SDP Packet PF to VF Mailbox Interrupt Register */ +#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220 +/* SDP VF to PF Mailbox Data Register */ +#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230 + +#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1) +#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0) + +#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \ + (CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \ + (CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET)) + +#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \ + (CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET)) +#endif /* _OCTEP_VF_REGS_CNXK_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c new file mode 100644 index 000000000000..82821bc28634 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" + +static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq) +{ + oq->host_read_idx = 0; + oq->host_refill_idx = 0; + oq->refill_count = 0; + oq->last_pkt_count = 0; + oq->pkts_pending = 0; +} + +/** + * octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. + * + * @oq: Octeon Rx queue data structure. + * + * Return: 0, if successfully filled receive buffers for all descriptors. + * -ENOMEM, if failed to allocate a buffer or failed to map for DMA. + */ +static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq) +{ + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 i; + + for (i = 0; i < oq->max_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "Rx buffer alloc failed\n"); + goto rx_buf_alloc_err; + } + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer alloc: DMA mapping error!\n", + oq->q_no); + goto dma_map_err; + } + oq->buff_info[i].page = page; + } + + return 0; + +dma_map_err: + put_page(page); +rx_buf_alloc_err: + while (i) { + i--; + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + } + + return -ENOMEM; +} + +/** + * octep_vf_oq_refill() - refill buffers for used Rx ring descriptors. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: number of descriptors successfully refilled with receive buffers. + */ +static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq) +{ + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 refill_idx, i; + + refill_idx = oq->host_refill_idx; + for (i = 0; i < oq->refill_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); + oq->stats.alloc_failures++; + break; + } + + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer refill: DMA mapping error!\n", + oq->q_no); + put_page(page); + oq->stats.alloc_failures++; + break; + } + oq->buff_info[refill_idx].page = page; + refill_idx++; + if (refill_idx == oq->max_count) + refill_idx = 0; + } + oq->host_refill_idx = refill_idx; + oq->refill_count -= i; + + return i; +} + +/** + * octep_vf_setup_oq() - Setup a Rx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Rx queue number to be setup. + * + * Allocate resources for a Rx queue. + */ +static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no) +{ + struct octep_vf_oq *oq; + u32 desc_ring_size; + + oq = vzalloc(sizeof(*oq)); + if (!oq) + goto create_oq_fail; + oct->oq[q_no] = oq; + + oq->octep_vf_dev = oct; + oq->netdev = oct->netdev; + oq->dev = &oct->pdev->dev; + oq->q_no = q_no; + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); + oq->ring_size_mask = oq->max_count - 1; + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); + oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE; + + /* When the hardware/firmware supports additional capabilities, + * additional header is filled-in by Octeon after length field in + * Rx packets. this header contains additional packet information. + */ + if (oct->fw_info.rx_ol_flags) + oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE; + + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); + + desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE; + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, + &oq->desc_ring_dma, GFP_KERNEL); + + if (unlikely(!oq->desc_ring)) { + dev_err(oq->dev, + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); + goto desc_dma_alloc_err; + } + + oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE); + + if (unlikely(!oq->buff_info)) { + dev_err(&oct->pdev->dev, + "Failed to allocate buffer info for OQ-%d\n", q_no); + goto buf_list_err; + } + + if (octep_vf_oq_fill_ring_buffers(oq)) + goto oq_fill_buff_err; + + octep_vf_oq_reset_indices(oq); + oct->hw_ops.setup_oq_regs(oct, q_no); + oct->num_oqs++; + + return 0; + +oq_fill_buff_err: + vfree(oq->buff_info); + oq->buff_info = NULL; +buf_list_err: + dma_free_coherent(oq->dev, desc_ring_size, + oq->desc_ring, oq->desc_ring_dma); + oq->desc_ring = NULL; +desc_dma_alloc_err: + vfree(oq); + oct->oq[q_no] = NULL; +create_oq_fail: + return -ENOMEM; +} + +/** + * octep_vf_oq_free_ring_buffers() - Free ring buffers. + * + * @oq: Octeon Rx queue data structure. + * + * Free receive buffers in unused Rx queue descriptors. + */ +static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq) +{ + struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; + int i; + + if (!oq->desc_ring || !oq->buff_info) + return; + + for (i = 0; i < oq->max_count; i++) { + if (oq->buff_info[i].page) { + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + desc_ring[i].buffer_ptr = 0; + } + } + octep_vf_oq_reset_indices(oq); +} + +/** + * octep_vf_free_oq() - Free Rx queue resources. + * + * @oq: Octeon Rx queue data structure. + * + * Free all resources of a Rx queue. + */ +static int octep_vf_free_oq(struct octep_vf_oq *oq) +{ + struct octep_vf_device *oct = oq->octep_vf_dev; + int q_no = oq->q_no; + + octep_vf_oq_free_ring_buffers(oq); + + vfree(oq->buff_info); + + if (oq->desc_ring) + dma_free_coherent(oq->dev, + oq->max_count * OCTEP_VF_OQ_DESC_SIZE, + oq->desc_ring, oq->desc_ring_dma); + + vfree(oq); + oct->oq[q_no] = NULL; + oct->num_oqs--; + return 0; +} + +/** + * octep_vf_setup_oqs() - setup resources for all Rx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_vf_setup_oqs(struct octep_vf_device *oct) +{ + int i, retval = 0; + + oct->num_oqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + retval = octep_vf_setup_oq(oct, i); + if (retval) { + dev_err(&oct->pdev->dev, + "Failed to setup OQ(RxQ)-%d.\n", i); + goto oq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); + } + + return 0; + +oq_setup_err: + while (i) { + i--; + octep_vf_free_oq(oct->oq[i]); + } + return retval; +} + +/** + * octep_vf_oq_dbell_init() - Initialize Rx queue doorbell. + * + * @oct: Octeon device private data structure. + * + * Write number of descriptors to Rx queue doorbell register. + */ +void octep_vf_oq_dbell_init(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/** + * octep_vf_free_oqs() - Free resources of all Rx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_vf_free_oqs(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (!oct->oq[i]) + continue; + octep_vf_free_oq(oct->oq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully freed OQ(RxQ)-%d.\n", i); + } +} + +/** + * octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: packets received after previous check. + */ +static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct, + struct octep_vf_oq *oq) +{ + u32 pkt_count, new_pkts; + + pkt_count = readl(oq->pkts_sent_reg); + new_pkts = pkt_count - oq->last_pkt_count; + + /* Clear the hardware packets counter register if the rx queue is + * being processed continuously with-in a single interrupt and + * reached half its max value. + * this counter is not cleared every time read, to save write cycles. + */ + if (unlikely(pkt_count > 0xF0000000U)) { + writel(pkt_count, oq->pkts_sent_reg); + pkt_count = readl(oq->pkts_sent_reg); + new_pkts += pkt_count; + } + oq->last_pkt_count = pkt_count; + oq->pkts_pending += new_pkts; + return new_pkts; +} + +/** + * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * @pkts_to_process: number of packets to be processed. + * + * Process the new packets in Rx queue. + * Packets larger than single Rx buffer arrive in consecutive descriptors. + * But, count returned by the API only accounts full packets, not fragments. + * + * Return: number of packets processed and pushed to stack. + */ +static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, + struct octep_vf_oq *oq, u16 pkts_to_process) +{ + struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL; + netdev_features_t feat = oq->netdev->features; + struct octep_vf_rx_buffer *buff_info; + struct octep_vf_oq_resp_hw *resp_hw; + u32 pkt, rx_bytes, desc_used; + u16 data_offset, rx_ol_flags; + struct sk_buff *skb; + u32 read_idx; + + read_idx = oq->host_read_idx; + rx_bytes = 0; + desc_used = 0; + for (pkt = 0; pkt < pkts_to_process; pkt++) { + buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx]; + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + resp_hw = page_address(buff_info->page); + buff_info->page = NULL; + + /* Swap the length field that is in Big-Endian to CPU */ + buff_info->len = be64_to_cpu(resp_hw->length); + if (oct->fw_info.rx_ol_flags) { + /* Extended response header is immediately after + * response header (resp_hw) + */ + resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *) + (resp_hw + 1); + buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE; + /* Packet Data is immediately after + * extended response header. + */ + data_offset = OCTEP_VF_OQ_RESP_HW_SIZE + + OCTEP_VF_OQ_RESP_HW_EXT_SIZE; + rx_ol_flags = resp_hw_ext->rx_ol_flags; + } else { + /* Data is immediately after + * Hardware Rx response header. + */ + data_offset = OCTEP_VF_OQ_RESP_HW_SIZE; + rx_ol_flags = 0; + } + rx_bytes += buff_info->len; + + if (buff_info->len <= oq->max_single_buffer_size) { + skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + skb_put(skb, buff_info->len); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } else { + struct skb_shared_info *shinfo; + u16 data_len; + + skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + /* Head fragment includes response header(s); + * subsequent fragments contains only data. + */ + skb_put(skb, oq->max_single_buffer_size); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + + shinfo = skb_shinfo(skb); + data_len = buff_info->len - oq->max_single_buffer_size; + while (data_len) { + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info = (struct octep_vf_rx_buffer *) + &oq->buff_info[read_idx]; + if (data_len < oq->buffer_size) { + buff_info->len = data_len; + data_len = 0; + } else { + buff_info->len = oq->buffer_size; + data_len -= oq->buffer_size; + } + + skb_add_rx_frag(skb, shinfo->nr_frags, + buff_info->page, 0, + buff_info->len, + buff_info->len); + buff_info->page = NULL; + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } + } + + skb->dev = oq->netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + if (feat & NETIF_F_RXCSUM && + OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + napi_gro_receive(oq->napi, skb); + } + + oq->host_read_idx = read_idx; + oq->refill_count += desc_used; + oq->stats.packets += pkt; + oq->stats.bytes += rx_bytes; + + return pkt; +} + +/** + * octep_vf_oq_process_rx() - Process Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @budget: max number of packets can be processed in one invocation. + * + * Check for newly received packets and process them. + * Keeps checking for new packets until budget is used or no new packets seen. + * + * Return: number of packets processed. + */ +int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget) +{ + u32 pkts_available, pkts_processed, total_pkts_processed; + struct octep_vf_device *oct = oq->octep_vf_dev; + + pkts_available = 0; + pkts_processed = 0; + total_pkts_processed = 0; + while (total_pkts_processed < budget) { + /* update pending count only when current one exhausted */ + if (oq->pkts_pending == 0) + octep_vf_oq_check_hw_for_pkts(oct, oq); + pkts_available = min(budget - total_pkts_processed, + oq->pkts_pending); + if (!pkts_available) + break; + + pkts_processed = __octep_vf_oq_process_rx(oct, oq, + pkts_available); + oq->pkts_pending -= pkts_processed; + total_pkts_processed += pkts_processed; + } + + if (oq->refill_count >= oq->refill_threshold) { + u32 desc_refilled = octep_vf_oq_refill(oct, oq); + + /* flush pending writes before updating credits */ + smp_wmb(); + writel(desc_refilled, oq->pkts_credit_reg); + } + + return total_pkts_processed; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h new file mode 100644 index 000000000000..fe46838b5200 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_VF_RX_H_ +#define _OCTEP_VF_RX_H_ + +/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format. + * + * The descriptor ring is made of descriptors which have 2 64-bit values: + * + * @buffer_ptr: DMA address of the skb->data + * @info_ptr: DMA address of host memory, used to update pkt count by hw. + * This is currently unused to save pci writes. + */ +struct octep_vf_oq_desc_hw { + dma_addr_t buffer_ptr; + u64 info_ptr; +}; + +static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16); + +#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw)) + +/* Rx offload flags */ +#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0) +#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1) +#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2) +#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3) + +#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \ + OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \ + OCTEP_VF_RX_OFFLOAD_TCP_CKSUM) + +#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \ + (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \ + OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \ + OCTEP_VF_RX_OFFLOAD_UDP_CKSUM)) + +/* bit 0 is vlan strip */ +#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1) +#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2) + +#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \ + (OCTEP_VF_RX_CSUM_L4_VERIFIED | \ + OCTEP_VF_RX_CSUM_IP_VERIFIED)) + +/* Extended Response Header in packet data received from Hardware. + * Includes metadata like checksum status. + * this is valid only if hardware/firmware published support for this. + * This is at offset 0 of packet data (skb->data). + */ +struct octep_vf_oq_resp_hw_ext { + /* Reserved. */ + u64 rsvd:48; + + /* rx offload flags */ + u16 rx_ol_flags; +}; + +static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8); + +#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext)) + +/* Length of Rx packet DMA'ed by Octeon to Host. + * this is in bigendian; so need to be converted to cpu endian. + * Octeon writes this at the beginning of Rx buffer (skb->data). + */ +struct octep_vf_oq_resp_hw { + /* The Length of the packet. */ + __be64 length; +}; + +static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8); + +#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw)) + +/* Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. The fields are operated by + * OS-dependent routines. + */ +struct octep_vf_rx_buffer { + struct page *page; + + /* length from rx hardware descriptor after converting to cpu endian */ + u64 len; +}; + +#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer)) + +/* Output Queue statistics. Each output queue has four stats fields. */ +struct octep_vf_oq_stats { + /* Number of packets received from the Device. */ + u64 packets; + + /* Number of bytes received from the Device. */ + u64 bytes; + + /* Number of times failed to allocate buffers. */ + u64 alloc_failures; +}; + +#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats)) + +/* Hardware interface Rx statistics */ +struct octep_vf_iface_rx_stats { + /* Received packets */ + u64 pkts; + + /* Octets of received packets */ + u64 octets; + + /* Received PAUSE and Control packets */ + u64 pause_pkts; + + /* Received PAUSE and Control octets */ + u64 pause_octets; + + /* Filtered DMAC0 packets */ + u64 dmac0_pkts; + + /* Filtered DMAC0 octets */ + u64 dmac0_octets; + + /* Packets dropped due to RX FIFO full */ + u64 dropped_pkts_fifo_full; + + /* Octets dropped due to RX FIFO full */ + u64 dropped_octets_fifo_full; + + /* Error packets */ + u64 err_pkts; + + /* Filtered DMAC1 packets */ + u64 dmac1_pkts; + + /* Filtered DMAC1 octets */ + u64 dmac1_octets; + + /* NCSI-bound packets dropped */ + u64 ncsi_dropped_pkts; + + /* NCSI-bound octets dropped */ + u64 ncsi_dropped_octets; + + /* Multicast packets received. */ + u64 mcast_pkts; + + /* Broadcast packets received. */ + u64 bcast_pkts; + +}; + +/* The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon OQ. + */ +struct octep_vf_oq { + u32 q_no; + + struct octep_vf_device *octep_vf_dev; + struct net_device *netdev; + struct device *dev; + + struct napi_struct *napi; + + /* The receive buffer list. This list has the virtual addresses + * of the buffers. + */ + struct octep_vf_rx_buffer *buff_info; + + /* Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + u8 __iomem *pkts_credit_reg; + + /* Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + u8 __iomem *pkts_sent_reg; + + /* Statistics for this OQ. */ + struct octep_vf_oq_stats stats; + + /* Packets pending to be processed */ + u32 pkts_pending; + u32 last_pkt_count; + + /* Index in the ring where the driver should read the next packet */ + u32 host_read_idx; + + /* Number of descriptors in this ring. */ + u32 max_count; + u32 ring_size_mask; + + /* The number of descriptors pending refill. */ + u32 refill_count; + + /* Index in the ring where the driver will refill the + * descriptor's buffer + */ + u32 host_refill_idx; + u32 refill_threshold; + + /* The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + u32 max_single_buffer_size; + + /* The 8B aligned descriptor ring starts at this address. */ + struct octep_vf_oq_desc_hw *desc_ring; + + /* DMA mapped address of the OQ descriptor ring. */ + dma_addr_t desc_ring_dma; +}; + +#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq)) +#endif /* _OCTEP_VF_RX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c new file mode 100644 index 000000000000..47a5c054fdb6 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <net/netdev_queues.h> + +#include "octep_vf_config.h" +#include "octep_vf_main.h" + +/* Reset various index of Tx queue data structure. */ +static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq) +{ + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octep_vf_read_index = 0; + iq->flush_index = 0; + iq->pkts_processed = 0; + iq->pkt_in_done = 0; +} + +/** + * octep_vf_iq_process_completions() - Process Tx queue completions. + * + * @iq: Octeon Tx queue data structure. + * @budget: max number of completions to be processed in one invocation. + */ +int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget) +{ + u32 compl_pkts, compl_bytes, compl_sg; + struct octep_vf_device *oct = iq->octep_vf_dev; + struct octep_vf_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + compl_pkts = 0; + compl_sg = 0; + compl_bytes = 0; + iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq); + + while (likely(budget && (fi != iq->octep_vf_read_index))) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + compl_bytes += skb->len; + compl_pkts++; + budget--; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + compl_sg++; + + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + iq->pkts_processed += compl_pkts; + iq->stats.instr_completed += compl_pkts; + iq->stats.bytes_sent += compl_bytes; + iq->stats.sgentry_sent += compl_sg; + iq->flush_index = fi; + + netif_subqueue_completed_wake(iq->netdev, iq->q_no, compl_pkts, + compl_bytes, IQ_INSTR_SPACE(iq), + OCTEP_VF_WAKE_QUEUE_THRESHOLD); + + return !budget; +} + +/** + * octep_vf_iq_free_pending() - Free Tx buffers for pending completions. + * + * @iq: Octeon Tx queue data structure. + */ +static void octep_vf_iq_free_pending(struct octep_vf_iq *iq) +{ + struct octep_vf_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + while (fi != iq->host_write_index) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + + dma_unmap_single(iq->dev, + tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[0], + DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + iq->flush_index = fi; + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); +} + +/** + * octep_vf_clean_iqs() - Clean Tx queues to shutdown the device. + * + * @oct: Octeon device private data structure. + * + * Free the buffers in Tx queue descriptors pending completion and + * reset queue indices + */ +void octep_vf_clean_iqs(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + octep_vf_iq_free_pending(oct->iq[i]); + octep_vf_iq_reset_indices(oct->iq[i]); + } +} + +/** + * octep_vf_setup_iq() - Setup a Tx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Tx queue number to be setup. + * + * Allocate resources for a Tx queue. + */ +static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no) +{ + u32 desc_ring_size, buff_info_size, sglist_size; + struct octep_vf_iq *iq; + int i; + + iq = vzalloc(sizeof(*iq)); + if (!iq) + goto iq_alloc_err; + oct->iq[q_no] = iq; + + iq->octep_vf_dev = oct; + iq->netdev = oct->netdev; + iq->dev = &oct->pdev->dev; + iq->q_no = q_no; + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); + iq->ring_size_mask = iq->max_count - 1; + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); + + /* Allocate memory for hardware queue descriptors */ + desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, + &iq->desc_ring_dma, GFP_KERNEL); + if (unlikely(!iq->desc_ring)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d\n", q_no); + goto desc_dma_alloc_err; + } + + /* Allocate memory for hardware SGLIST descriptors */ + sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, + &iq->sglist_dma, GFP_KERNEL); + if (unlikely(!iq->sglist)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d SGLIST\n", + q_no); + goto sglist_alloc_err; + } + + /* allocate memory to manage Tx packets pending completion */ + buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count; + iq->buff_info = vzalloc(buff_info_size); + if (!iq->buff_info) { + dev_err(iq->dev, + "Failed to allocate buff info for IQ-%d\n", q_no); + goto buff_info_err; + } + + /* Setup sglist addresses in tx_buffer entries */ + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { + struct octep_vf_tx_buffer *tx_buffer; + + tx_buffer = &iq->buff_info[i]; + tx_buffer->sglist = + &iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT]; + tx_buffer->sglist_dma = + iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT); + } + + octep_vf_iq_reset_indices(iq); + oct->hw_ops.setup_iq_regs(oct, q_no); + + oct->num_iqs++; + return 0; + +buff_info_err: + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); +sglist_alloc_err: + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); +desc_dma_alloc_err: + vfree(iq); + oct->iq[q_no] = NULL; +iq_alloc_err: + return -1; +} + +/** + * octep_vf_free_iq() - Free Tx queue resources. + * + * @iq: Octeon Tx queue data structure. + * + * Free all the resources allocated for a Tx queue. + */ +static void octep_vf_free_iq(struct octep_vf_iq *iq) +{ + struct octep_vf_device *oct = iq->octep_vf_dev; + u64 desc_ring_size, sglist_size; + int q_no = iq->q_no; + + desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + + vfree(iq->buff_info); + + if (iq->desc_ring) + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); + + sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + if (iq->sglist) + dma_free_coherent(iq->dev, sglist_size, + iq->sglist, iq->sglist_dma); + + vfree(iq); + oct->iq[q_no] = NULL; + oct->num_iqs--; +} + +/** + * octep_vf_setup_iqs() - setup resources for all Tx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_vf_setup_iqs(struct octep_vf_device *oct) +{ + int i; + + oct->num_iqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (octep_vf_setup_iq(oct, i)) { + dev_err(&oct->pdev->dev, + "Failed to setup IQ(TxQ)-%d.\n", i); + goto iq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); + } + + return 0; + +iq_setup_err: + while (i) { + i--; + octep_vf_free_iq(oct->iq[i]); + } + return -1; +} + +/** + * octep_vf_free_iqs() - Free resources of all Tx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_vf_free_iqs(struct octep_vf_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + octep_vf_free_iq(oct->iq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully destroyed IQ(TxQ)-%d.\n", i); + } + oct->num_iqs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h new file mode 100644 index 000000000000..f338b975103c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_tx.h @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) VF Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_VF_TX_H_ +#define _OCTEP_VF_TX_H_ + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +#define TX_BUFTYPE_NONE 0 +#define TX_BUFTYPE_NET 1 +#define TX_BUFTYPE_NET_SG 2 +#define NUM_TX_BUFTYPES 3 + +/* Hardware format for Scatter/Gather list + * + * 63 48|47 32|31 16|15 0 + * ----------------------------------------- + * | Len 0 | Len 1 | Len 2 | Len 3 | + * ----------------------------------------- + * | Ptr 0 | + * ----------------------------------------- + * | Ptr 1 | + * ----------------------------------------- + * | Ptr 2 | + * ----------------------------------------- + * | Ptr 3 | + * ----------------------------------------- + */ +struct octep_vf_tx_sglist_desc { + u16 len[4]; + dma_addr_t dma_ptr[4]; +}; + +static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40); + +/* Each Scatter/Gather entry sent to hardwar hold four pointers. + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' + * is for main skb which also goes as a gather buffer to Octeon hardware. + * To allocate sufficient SGLIST entries for a packet with max fragments, + * align by adding 3 before calcuating max SGLIST entries per packet. + */ +#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) +#define OCTEP_VF_SGLIST_SIZE_PER_PKT \ + (OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc)) + +struct octep_vf_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct octep_vf_tx_sglist_desc *sglist; + dma_addr_t sglist_dma; + u8 gather; +}; + +#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer)) + +/* VF Hardware interface Tx statistics */ +struct octep_vf_iface_tx_stats { + /* Total frames sent on the interface */ + u64 pkts; + + /* Total octets sent on the interface */ + u64 octs; + + /* Packets sent to a broadcast DMAC */ + u64 bcst; + + /* Packets sent to the multicast DMAC */ + u64 mcst; + + /* Packets dropped */ + u64 dropped; + + /* Reserved */ + u64 reserved[13]; +}; + +/* VF Input Queue statistics */ +struct octep_vf_iq_stats { + /* Instructions posted to this queue. */ + u64 instr_posted; + + /* Instructions copied by hardware for processing. */ + u64 instr_completed; + + /* Instructions that could not be processed. */ + u64 instr_dropped; + + /* Bytes sent through this queue. */ + u64 bytes_sent; + + /* Gather entries sent through this queue. */ + u64 sgentry_sent; + + /* Number of transmit failures due to TX_BUSY */ + u64 tx_busy; + + /* Number of times the queue is restarted */ + u64 restart_cnt; +}; + +/* The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (up to 4) for + * a Octeon device has one such structure to represent it. + */ +struct octep_vf_iq { + u32 q_no; + + struct octep_vf_device *octep_vf_dev; + struct net_device *netdev; + struct device *dev; + struct netdev_queue *netdev_q; + + /* Index in input ring where driver should write the next packet */ + u16 host_write_index; + + /* Index in input ring where Octeon is expected to read next packet */ + u16 octep_vf_read_index; + + /* This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u16 flush_index; + + /* Statistics for this input queue. */ + struct octep_vf_iq_stats stats; + + /* Pointer to the Virtual Base addr of the input ring. */ + struct octep_vf_tx_desc_hw *desc_ring; + + /* DMA mapped base address of the input descriptor ring. */ + dma_addr_t desc_ring_dma; + + /* Info of Tx buffers pending completion. */ + struct octep_vf_tx_buffer *buff_info; + + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ + struct octep_vf_tx_sglist_desc *sglist; + + /* DMA mapped addr of Scatter Gather Lists */ + dma_addr_t sglist_dma; + + /* Octeon doorbell register for the ring. */ + u8 __iomem *doorbell_reg; + + /* Octeon instruction count register for this ring. */ + u8 __iomem *inst_cnt_reg; + + /* interrupt level register for this ring */ + u8 __iomem *intr_lvl_reg; + + /* Maximum no. of instructions in this queue. */ + u32 max_count; + u32 ring_size_mask; + + u32 pkt_in_done; + u32 pkts_processed; + + u32 status; + + /* Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /* The max. number of instructions that can be held pending by the + * driver before ringing doorbell. + */ + u32 fill_threshold; +}; + +/* Hardware Tx Instruction Header */ +struct octep_vf_instr_hdr { + /* Data Len */ + u64 tlen:16; + + /* Reserved */ + u64 rsvd:20; + + /* PKIND for SDP */ + u64 pkind:6; + + /* Front Data size */ + u64 fsz:6; + + /* No. of entries in gather list */ + u64 gsz:14; + + /* Gather indicator 1=gather*/ + u64 gather:1; + + /* Reserved3 */ + u64 reserved3:1; +}; + +static_assert(sizeof(struct octep_vf_instr_hdr) == 8); + +/* Tx offload flags */ +#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0) +#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1) +#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2) +#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3) +#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4) +#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5) +#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6) + +#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ + OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \ + OCTEP_VF_TX_OFFLOAD_TCP_CKSUM) + +#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ + OCTEP_VF_TX_OFFLOAD_UDP_TSO) + +#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \ + (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \ + OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \ + OCTEP_VF_TX_OFFLOAD_UDP_CKSUM)) + +#define OCTEP_VF_TX_TSO(flags) ((flags) & \ + (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \ + OCTEP_VF_TX_OFFLOAD_UDP_TSO)) + +struct tx_mdata { + /* offload flags */ + u16 ol_flags; + + /* gso size */ + u16 gso_size; + + /* gso flags */ + u16 gso_segs; + + /* reserved */ + u16 rsvd1; + + /* reserved */ + u64 rsvd2; +}; + +static_assert(sizeof(struct tx_mdata) == 16); + +/* 64-byte Tx instruction format. + * Format of instruction for a 64-byte mode input queue. + * + * only first 16-bytes (dptr and ih) are mandatory; rest are optional + * and filled by the driver based on firmware/hardware capabilities. + * These optional headers together called Front Data and its size is + * described by ih->fsz. + */ +struct octep_vf_tx_desc_hw { + /* Pointer where the input data is available. */ + u64 dptr; + + /* Instruction Header. */ + union { + struct octep_vf_instr_hdr ih; + u64 ih64; + }; + + union { + u64 txm64[2]; + struct tx_mdata txm; + }; + + /* Additional headers available in a 64-byte instruction. */ + u64 exhdr[4]; +}; + +static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64); + +#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw)) +#endif /* _OCTEP_VF_TX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index bb8d60e7bab1..d5c4f810da61 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -837,6 +837,8 @@ enum nix_af_status { NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, NIX_AF_ERR_LINK_CREDITS = -431, + NIX_AF_ERR_INVALID_BPID = -434, + NIX_AF_ERR_INVALID_BPID_REQ = -435, NIX_AF_ERR_INVALID_MCAST_GRP = -436, NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437, NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5c1d04a3c559..edd12d09dc89 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1484,7 +1484,7 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { blkaddr = pf->nix_blkaddr; - } else if (is_afvf(pcifunc)) { + } else if (is_lbk_vf(rvu, pcifunc)) { vf = pcifunc - 1; /* Assign NIX based on VF number. All even numbered VFs get * NIX0 and odd numbered gets NIX1 @@ -2034,7 +2034,7 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, u16 target; /* Only PF can add VF permissions */ - if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc)) + if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_lbk_vf(rvu, pcifunc)) return -EOPNOTSUPP; target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1); @@ -2618,6 +2618,9 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) * 3. Cleanup pools (NPA) */ + /* Free allocated BPIDs */ + rvu_nix_flr_free_bpids(rvu, pcifunc); + /* Free multicast/mirror node associated with the 'pcifunc' */ rvu_nix_mcast_flr_free_entries(rvu, pcifunc); @@ -3151,6 +3154,7 @@ static int rvu_enable_sriov(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; int err, chans, vfs; + int pos = 0; if (!rvu_afvf_msix_vectors_num_ok(rvu)) { dev_warn(&pdev->dev, @@ -3158,6 +3162,12 @@ static int rvu_enable_sriov(struct rvu *rvu) return 0; } + /* Get RVU VFs device id */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &rvu->vf_devid); + chans = rvu_get_num_lbk_chans(); if (chans < 0) return chans; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 43be37dd1f32..de8eba902276 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -288,6 +288,16 @@ enum rvu_pfvf_flags { #define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC) +struct nix_bp { + struct rsrc_bmap bpids; /* free bpids bitmap */ + u16 cgx_bpid_cnt; + u16 sdp_bpid_cnt; + u16 free_pool_base; + u16 *fn_map; /* pcifunc mapping */ + u8 *intf_map; /* interface type map */ + u8 *ref_cnt; +}; + struct nix_txsch { struct rsrc_bmap schq; u8 lvl; @@ -363,6 +373,7 @@ struct nix_hw { struct nix_lso lso; struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; + struct nix_bp bp; u64 *tx_credits; u8 cc_mcs_cnt; }; @@ -432,6 +443,13 @@ struct mbox_wq_info { struct workqueue_struct *mbox_wq; }; +struct channel_fwdata { + struct sdp_node_info info; + u8 valid; +#define RVU_CHANL_INFO_RESERVED 379 + u8 reserved[RVU_CHANL_INFO_RESERVED]; +}; + struct rvu_fwdata { #define RVU_FWDATA_HEADER_MAGIC 0xCFDA /* Custom Firmware Data*/ #define RVU_FWDATA_VERSION 0x0001 @@ -450,7 +468,8 @@ struct rvu_fwdata { u64 msixtr_base; u32 ptp_ext_clk_rate; u32 ptp_ext_tstamp; -#define FWDATA_RESERVED_MEM 1022 + struct channel_fwdata channel_data; +#define FWDATA_RESERVED_MEM 1014 u64 reserved[FWDATA_RESERVED_MEM]; #define CGX_MAX 9 #define CGX_LMACS_MAX 4 @@ -503,6 +522,7 @@ struct rvu { struct mutex rsrc_lock; /* Serialize resource alloc/free */ struct mutex alias_lock; /* Serialize bar2 alias access */ int vfs; /* Number of VFs attached to RVU */ + u16 vf_devid; /* VF devices id */ int nix_blkaddr[MAX_NIX_BLKS]; /* Mbox */ @@ -732,9 +752,11 @@ static inline bool is_rvu_supports_nix1(struct rvu *rvu) /* Function Prototypes * RVU */ -static inline bool is_afvf(u16 pcifunc) +#define RVU_LBK_VF_DEVID 0xA0F8 +static inline bool is_lbk_vf(struct rvu *rvu, u16 pcifunc) { - return !(pcifunc & ~RVU_PFVF_FUNC_MASK); + return (!(pcifunc & ~RVU_PFVF_FUNC_MASK) && + (rvu->vf_devid == RVU_LBK_VF_DEVID)); } static inline bool is_vf(u16 pcifunc) @@ -794,7 +816,7 @@ void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); int rvu_sdp_init(struct rvu *rvu); bool is_sdp_pfvf(u16 pcifunc); bool is_sdp_pf(u16 pcifunc); -bool is_sdp_vf(u16 pcifunc); +bool is_sdp_vf(struct rvu *rvu, u16 pcifunc); /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) @@ -873,6 +895,7 @@ int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx); int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc, u32 mcast_grp_idx, u16 mcam_index); +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ void rvu_npc_freemem(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index febd00c63bf6..d39001cdc707 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -499,29 +499,115 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) rvu_cgx_disable_dmac_entries(rvu, pcifunc); } +#define NIX_BPIDS_PER_LMAC 8 +#define NIX_BPIDS_PER_CPT 1 +static int nix_setup_bpids(struct rvu *rvu, struct nix_hw *hw, int blkaddr) +{ + struct nix_bp *bp = &hw->bp; + int err, max_bpids; + u64 cfg; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + max_bpids = FIELD_GET(NIX_CONST_MAX_BPIDS, cfg); + + /* Reserve the BPIds for CGX and SDP */ + bp->cgx_bpid_cnt = rvu->hw->cgx_links * NIX_BPIDS_PER_LMAC; + bp->sdp_bpid_cnt = rvu->hw->sdp_links * FIELD_GET(NIX_CONST_SDP_CHANS, cfg); + bp->free_pool_base = bp->cgx_bpid_cnt + bp->sdp_bpid_cnt + + NIX_BPIDS_PER_CPT; + bp->bpids.max = max_bpids - bp->free_pool_base; + + err = rvu_alloc_bitmap(&bp->bpids); + if (err) + return err; + + bp->fn_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u16), GFP_KERNEL); + if (!bp->fn_map) + return -ENOMEM; + + bp->intf_map = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->intf_map) + return -ENOMEM; + + bp->ref_cnt = devm_kcalloc(rvu->dev, bp->bpids.max, + sizeof(u8), GFP_KERNEL); + if (!bp->ref_cnt) + return -ENOMEM; + + return 0; +} + +void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc) +{ + int blkaddr, bpid, err; + struct nix_hw *nix_hw; + struct nix_bp *bp; + + if (!is_lbk_vf(rvu, pcifunc)) + return; + + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return; + + bp = &nix_hw->bp; + + mutex_lock(&rvu->rsrc_lock); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->ref_cnt[bpid]--; + if (bp->ref_cnt[bpid]) + continue; + rvu_free_rsrc(&bp->bpids, bpid); + bp->fn_map[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); +} + int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, struct nix_bp_cfg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; + int blkaddr, pf, type, err; + u16 chan_base, chan, bpid; struct rvu_pfvf *pfvf; - int blkaddr, pf, type; - u16 chan_base, chan; + struct nix_hw *nix_hw; + struct nix_bp *bp; u64 cfg; pf = rvu_get_pf(pcifunc); - type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + bp = &nix_hw->bp; chan_base = pfvf->rx_chan_base + req->chan_base; for (chan = chan_base; chan < (chan_base + req->chan_cnt); chan++) { cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), cfg & ~BIT_ULL(16)); + + if (type == NIX_INTF_TYPE_LBK) { + bpid = cfg & GENMASK(8, 0); + mutex_lock(&rvu->rsrc_lock); + rvu_free_rsrc(&bp->bpids, bpid - bp->free_pool_base); + for (bpid = 0; bpid < bp->bpids.max; bpid++) { + if (bp->fn_map[bpid] == pcifunc) { + bp->fn_map[bpid] = 0; + bp->ref_cnt[bpid] = 0; + } + } + mutex_unlock(&rvu->rsrc_lock); + } } return 0; } @@ -529,25 +615,20 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { - int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; - u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; + int bpid, blkaddr, sdp_chan_base, err; struct rvu_hwinfo *hw = rvu->hw; struct rvu_pfvf *pfvf; + struct nix_hw *nix_hw; u8 cgx_id, lmac_id; - u64 cfg; + struct nix_bp *bp; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); - lmac_chan_cnt = cfg & 0xFF; - - cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; - lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); - sdp_chan_cnt = cfg & 0xFFF; - sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; - pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + bp = &nix_hw->bp; /* Backpressure IDs range division * CGX channles are mapped to (0 - 191) BPIDs @@ -561,38 +642,48 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, */ switch (type) { case NIX_INTF_TYPE_CGX: - if ((req->chan_base + req->chan_cnt) > 16) - return -EINVAL; + if ((req->chan_base + req->chan_cnt) > NIX_BPIDS_PER_LMAC) + return NIX_AF_ERR_INVALID_BPID_REQ; rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); /* Assign bpid based on cgx, lmac and chan id */ - bpid = (cgx_id * hw->lmac_per_cgx * lmac_chan_cnt) + - (lmac_id * lmac_chan_cnt) + req->chan_base; + bpid = (cgx_id * hw->lmac_per_cgx * NIX_BPIDS_PER_LMAC) + + (lmac_id * NIX_BPIDS_PER_LMAC) + req->chan_base; if (req->bpid_per_chan) bpid += chan_id; - if (bpid > cgx_bpid_cnt) - return -EINVAL; + if (bpid > bp->cgx_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID; break; case NIX_INTF_TYPE_LBK: - if ((req->chan_base + req->chan_cnt) > 63) - return -EINVAL; - bpid = cgx_bpid_cnt + req->chan_base; - if (req->bpid_per_chan) - bpid += chan_id; - if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) - return -EINVAL; + /* Alloc bpid from the free pool */ + mutex_lock(&rvu->rsrc_lock); + bpid = rvu_alloc_rsrc(&bp->bpids); + if (bpid < 0) { + mutex_unlock(&rvu->rsrc_lock); + return NIX_AF_ERR_INVALID_BPID; + } + bp->fn_map[bpid] = req->hdr.pcifunc; + bp->ref_cnt[bpid]++; + bpid += bp->free_pool_base; + mutex_unlock(&rvu->rsrc_lock); break; case NIX_INTF_TYPE_SDP: - if ((req->chan_base + req->chan_cnt) > 255) - return -EINVAL; + if ((req->chan_base + req->chan_cnt) > bp->sdp_bpid_cnt) + return NIX_AF_ERR_INVALID_BPID_REQ; + + /* Handle usecase of 2 SDP blocks */ + if (!hw->cap.programmable_chans) + sdp_chan_base = pfvf->rx_chan_base - NIX_CHAN_SDP_CH_START; + else + sdp_chan_base = pfvf->rx_chan_base - hw->sdp_chan_base; - bpid = sdp_bpid_cnt + req->chan_base; + bpid = bp->cgx_bpid_cnt + req->chan_base + sdp_chan_base; if (req->bpid_per_chan) bpid += chan_id; - if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) - return -EINVAL; + if (bpid > (bp->cgx_bpid_cnt + bp->sdp_bpid_cnt)) + return NIX_AF_ERR_INVALID_BPID; break; default: return -EINVAL; @@ -612,7 +703,7 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, u64 cfg; pf = rvu_get_pf(pcifunc); - type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + type = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) type = NIX_INTF_TYPE_SDP; @@ -1523,7 +1614,7 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, cfg = NPC_TX_DEF_PKIND; rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); - intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + intf = is_lbk_vf(rvu, pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; if (is_sdp_pfvf(pcifunc)) intf = NIX_INTF_TYPE_SDP; @@ -1899,7 +1990,7 @@ static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; - if (is_afvf(pcifunc)) {/* LBK links */ + if (is_lbk_vf(rvu, pcifunc)) {/* LBK links */ return hw->cgx_links; } else if (is_pf_cgxmapped(rvu, pf)) { rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); @@ -1916,7 +2007,7 @@ static void nix_get_txschq_range(struct rvu *rvu, u16 pcifunc, struct rvu_hwinfo *hw = rvu->hw; int pf = rvu_get_pf(pcifunc); - if (is_afvf(pcifunc)) { /* LBK links */ + if (is_lbk_vf(rvu, pcifunc)) { /* LBK links */ *start = hw->cap.nix_txsch_per_cgx_lmac * link; *end = *start + hw->cap.nix_txsch_per_lbk_lmac; } else if (is_pf_cgxmapped(rvu, pf)) { /* CGX links */ @@ -3356,7 +3447,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int pf; /* skip multicast pkt replication for AF's VFs & SDP links */ - if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) @@ -3703,7 +3794,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu); else rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); @@ -4427,7 +4518,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (!nix_hw) return NIX_AF_ERR_INVALID_NIXBLK; - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); else rvu_get_lmac_link_max_frs(rvu, &max_mtu); @@ -4791,6 +4882,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + err = nix_setup_bpids(rvu, nix_hw, blkaddr); + if (err) + return err; + /* Configure segmentation offload formats */ nix_setup_lso(rvu, nix_hw, blkaddr); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 167145bdcb75..e350242bbafb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -61,28 +61,6 @@ int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena) return 0; } -static int npc_mcam_verify_pf_func(struct rvu *rvu, - struct mcam_entry *entry_data, u8 intf, - u16 pcifunc) -{ - u16 pf_func, pf_func_mask; - - if (is_npc_intf_rx(intf)) - return 0; - - pf_func_mask = (entry_data->kw_mask[0] >> 32) & - NPC_KEX_PF_FUNC_MASK; - pf_func = (entry_data->kw[0] >> 32) & NPC_KEX_PF_FUNC_MASK; - - pf_func = be16_to_cpu((__force __be16)pf_func); - if (pf_func_mask != NPC_KEX_PF_FUNC_MASK || - ((pf_func & ~RVU_PFVF_FUNC_MASK) != - (pcifunc & ~RVU_PFVF_FUNC_MASK))) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -417,7 +395,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_pffunc_af(owner) || is_afvf(target_func) || + if (is_pffunc_af(owner) || is_lbk_vf(rvu, target_func) || (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@ -437,6 +415,10 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, return; } + /* AF modifies given action iff PF/VF has requested for it */ + if ((entry->action & 0xFULL) != NIX_RX_ACTION_DEFAULT) + return; + /* copy VF default entry action to the VF mcam entry */ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr, target_func); @@ -626,7 +608,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int blkaddr, index; /* AF's and SDP VFs work in promiscuous mode */ - if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) || is_sdp_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -791,7 +773,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, return; /* Skip LBK VFs */ - if (is_afvf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc)) return; /* If pkt replication is not supported, @@ -871,7 +853,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) + if (is_lbk_vf(rvu, pcifunc) && is_sdp_vf(rvu, pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -1850,8 +1832,8 @@ void npc_mcam_rsrcs_deinit(struct rvu *rvu) { struct npc_mcam *mcam = &rvu->hw->mcam; - kfree(mcam->bmap); - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap); + bitmap_free(mcam->bmap_reverse); kfree(mcam->entry2pfvf_map); kfree(mcam->cntr2pfvf_map); kfree(mcam->entry2cntr_map); @@ -1904,21 +1886,20 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) mcam->pf_offset = mcam->nixlf_offset + nixlf_count; /* Allocate bitmaps for managing MCAM entries */ - mcam->bmap = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap) return -ENOMEM; - mcam->bmap_reverse = kmalloc_array(BITS_TO_LONGS(mcam->bmap_entries), - sizeof(long), GFP_KERNEL); + mcam->bmap_reverse = bitmap_zalloc(mcam->bmap_entries, GFP_KERNEL); if (!mcam->bmap_reverse) goto free_bmap; mcam->bmap_fcnt = mcam->bmap_entries; /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ - mcam->entry2pfvf_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2pfvf_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); + if (!mcam->entry2pfvf_map) goto free_bmap_reverse; @@ -1941,21 +1922,21 @@ int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (err) goto free_entry_map; - mcam->cntr2pfvf_map = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr2pfvf_map = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr2pfvf_map) goto free_cntr_bmap; /* Alloc memory for MCAM entry to counter mapping and for tracking * counter's reference count. */ - mcam->entry2cntr_map = kmalloc_array(mcam->bmap_entries, - sizeof(u16), GFP_KERNEL); + mcam->entry2cntr_map = kcalloc(mcam->bmap_entries, sizeof(u16), + GFP_KERNEL); if (!mcam->entry2cntr_map) goto free_cntr_map; - mcam->cntr_refcnt = kmalloc_array(mcam->counters.max, - sizeof(u16), GFP_KERNEL); + mcam->cntr_refcnt = kcalloc(mcam->counters.max, sizeof(u16), + GFP_KERNEL); if (!mcam->cntr_refcnt) goto free_entry_cntr_map; @@ -1988,9 +1969,9 @@ free_cntr_bmap: free_entry_map: kfree(mcam->entry2pfvf_map); free_bmap_reverse: - kfree(mcam->bmap_reverse); + bitmap_free(mcam->bmap_reverse); free_bmap: - kfree(mcam->bmap); + bitmap_free(mcam->bmap); return -ENOMEM; } @@ -2852,12 +2833,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - /* For AF installed rules, the nix_intf should be set to target NIX */ if (is_pffunc_af(req->hdr.pcifunc)) nix_intf = req->intf; @@ -3209,10 +3184,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - req->hdr.pcifunc)) - return NPC_MCAM_INVALID_REQ; - /* Try to allocate a MCAM entry */ entry_req.hdr.pcifunc = req->hdr.pcifunc; entry_req.contig = true; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 6f73ad9807f0..086f05c0376f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -439,6 +439,9 @@ #define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16) #define NIX_AF_LINKX_MCS_CNT_MASK GENMASK_ULL(33, 32) +#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) +#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) + /* SSO */ #define SSO_AF_CONST (0x1000) #define SSO_AF_CONST1 (0x1008) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c index ae50d56258ec..38cfe148f4b7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -40,8 +40,12 @@ bool is_sdp_pf(u16 pcifunc) !(pcifunc & RVU_PFVF_FUNC_MASK)); } -bool is_sdp_vf(u16 pcifunc) +#define RVU_SDP_VF_DEVID 0xA0F7 +bool is_sdp_vf(struct rvu *rvu, u16 pcifunc) { + if (!(pcifunc & ~RVU_PFVF_FUNC_MASK)) + return (rvu->vf_devid == RVU_SDP_VF_DEVID); + return (is_sdp_pfvf(pcifunc) && !!(pcifunc & RVU_PFVF_FUNC_MASK)); } @@ -52,6 +56,14 @@ int rvu_sdp_init(struct rvu *rvu) struct rvu_pfvf *pfvf; u32 i = 0; + if (rvu->fwdata->channel_data.valid) { + sdp_pf_num[0] = 0; + pfvf = &rvu->pf[sdp_pf_num[0]]; + pfvf->sdp_info = &rvu->fwdata->channel_data.info; + + return 0; + } + while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OTX2_SDP_PF, pdev)) != NULL) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 7ca6941ea0b9..02d0b707aea5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -951,8 +951,11 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) if (pfvf->ptp && qidx < pfvf->hw.tx_queues) { err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt, sizeof(*sq->timestamps)); - if (err) + if (err) { + kfree(sq->sg); + sq->sg = NULL; return err; + } } sq->head = 0; @@ -968,7 +971,14 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; - return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura); + if (err) { + kfree(sq->sg); + sq->sg = NULL; + return err; + } + + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 2928898c7f8d..7f786de61014 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -314,7 +314,6 @@ static int otx2_set_channels(struct net_device *dev, pfvf->hw.tx_queues = channel->tx_count; if (pfvf->xdp_prog) pfvf->hw.xdp_queues = channel->rx_count; - pfvf->hw.non_qos_queues = pfvf->hw.tx_queues + pfvf->hw.xdp_queues; if (if_up) err = dev->netdev_ops->ndo_open(dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index a57455aebff6..e5fe67e73865 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1744,6 +1744,7 @@ int otx2_open(struct net_device *netdev) /* RQ and SQs are mapped to different CQs, * so find out max CQ IRQs (i.e CINTs) needed. */ + pf->hw.non_qos_queues = pf->hw.tx_queues + pf->hw.xdp_queues; pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues, pf->hw.tc_tx_queues); @@ -2643,8 +2644,6 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog) xdp_features_clear_redirect_target(dev); } - pf->hw.non_qos_queues += pf->hw.xdp_queues; - if (if_up) otx2_open(pf->netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 4d519ea833b2..f828d32737af 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1403,7 +1403,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, bool *need_xdp_flush) { - unsigned char *hard_start, *data; + unsigned char *hard_start; int qidx = cq->cq_idx; struct xdp_buff xdp; struct page *page; @@ -1417,9 +1417,8 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq); - data = (unsigned char *)phys_to_virt(pa); - hard_start = page_address(page); - xdp_prepare_buff(&xdp, hard_start, data - hard_start, + hard_start = (unsigned char *)phys_to_virt(pa); + xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM, cqe->sg.seg_size, false); act = bpf_prog_run_xdp(prog, &xdp); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index a6e91573f8da..de123350bd46 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4761,7 +4761,10 @@ static int mtk_probe(struct platform_device *pdev) } if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36)); + if (!err) + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { dev_err(&pdev->dev, "Wrong DMA config\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index f5b1f8c7834f..7f20813456e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2199,8 +2199,9 @@ reset_slave: if (cmd != MLX4_COMM_CMD_RESET) { mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n", slave, cmd); - /* Turn on internal error letting slave reset itself immeditaly, - * otherwise it might take till timeout on command is passed + /* Turn on internal error letting slave reset itself + * immediately, otherwise it might take till timeout on + * command is passed */ reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR); } @@ -2954,7 +2955,7 @@ static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port, dummy_admin.default_vlan = vlan; /* VF wants to move to other VST state which is valid with current - * rate limit. Either differnt default vlan in VST or other + * rate limit. Either different default vlan in VST or other * supported QoS priority. Otherwise we don't allow this change when * the TX rate is still configured. */ diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 4d4f9cf9facb..e130e7259275 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -115,7 +115,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) return; } - /* Acessing the CQ outside of rcu_read_lock is safe, because + /* Accessing the CQ outside of rcu_read_lock is safe, because * the CQ is freed only after interrupt handling is completed. */ ++cq->arm_sn; @@ -137,7 +137,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) return; } - /* Acessing the CQ outside of rcu_read_lock is safe, because + /* Accessing the CQ outside of rcu_read_lock is safe, because * the CQ is freed only after interrupt handling is completed. */ cq->event(cq, event_type); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c index 9e3b76182088..cd754cd76bde 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c @@ -96,8 +96,8 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev) #define MLX4_EN_WRAP_AROUND_SEC 10UL /* By scheduling the overflow check every 5 seconds, we have a reasonably - * good chance we wont miss a wrap around. - * TOTO: Use a timer instead of a work queue to increase the guarantee. + * good chance we won't miss a wrap around. + * TODO: Use a timer instead of a work queue to increase the guarantee. */ #define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 33bbcced8105..d7da62cda821 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1072,7 +1072,8 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 1, MLX4_MCAST_CONFIG); /* Update multicast list - we cache all addresses so they won't - * change while HW is updated holding the command semaphor */ + * change while HW is updated holding the command semaphore + */ netif_addr_lock_bh(dev); mlx4_en_cache_mclist(dev); netif_addr_unlock_bh(dev); @@ -1817,7 +1818,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_set_rss_steer_rules(priv)) mlx4_warn(mdev, "Failed setting steering rules\n"); - /* Attach rx QP to bradcast address */ + /* Attach rx QP to broadcast address */ eth_broadcast_addr(&mc_list[10]); mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, priv->rss_map.indir_qp, mc_list, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a09b6e05337d..eac49657bd07 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -762,7 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Drop packet on bad receive or bad checksum */ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_ERROR)) { - en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n", + en_err(priv, "CQE completed in error - vendor syndrome:%d syndrome:%d\n", ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome, ((struct mlx4_err_cqe *)cqe)->syndrome); goto next; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 65cb63f6c465..1ddb11cb25f9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -992,7 +992,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->ts_requested = 1; } - /* Prepare ctrl segement apart opcode+ownership, which depends on + /* Prepare ctrl segment apart opcode+ownership, which depends on * whether LSO is used */ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 6598b10a9ff4..9572a45f6143 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -210,7 +210,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); s_eqe->slave_id = slave; - /* ensure all information is written before setting the ownersip bit */ + /* ensure all information is written before setting the ownership bit */ dma_wmb(); s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; ++slave_eq->prod; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h index 954b86faac29..40ca29bb928c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -44,7 +44,7 @@ /* Default supported priorities for VPP allocation */ #define MLX4_DEFAULT_QOS_PRIO (0) -/* Derived from FW feature definition, 0 is the default vport fo all QPs */ +/* Derived from FW feature definition, 0 is the default vport for all QPs */ #define MLX4_VPP_DEFAULT_VPORT (0) struct mlx4_vport_qos_param { @@ -98,7 +98,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, u16 *available_vpp, u8 *vpp_p_up); /** - * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. + * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among different priorities. * The total number of VPPs assigned to all for a port must not exceed * the value reported by available_vpp in mlx4_ALLOCATE_VPP_get. * VPP allocation is allowed only after the port type has been set, @@ -113,7 +113,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, int mlx4_ALLOCATE_VPP_set(struct mlx4_dev *dev, u8 port, u8 *vpp_p_up); /** - * mlx4_SET_VPORT_QOS_get - Query QoS proporties of a Vport. + * mlx4_SET_VPORT_QOS_get - Query QoS properties of a Vport. * Each priority allowed for the Vport is assigned with a share of the BW, * and a BW limitation. This commands query the current QoS values. * @@ -128,7 +128,7 @@ int mlx4_SET_VPORT_QOS_get(struct mlx4_dev *dev, u8 port, u8 vport, struct mlx4_vport_qos_param *out_param); /** - * mlx4_SET_VPORT_QOS_set - Set QoS proporties of a Vport. + * mlx4_SET_VPORT_QOS_set - Set QoS properties of a Vport. * QoS parameters can be modified at any time, but must be initialized * before any QP is associated with the VPort. * diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2581226836b5..7b02ff61126d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -129,7 +129,7 @@ static const struct mlx4_profile default_profile = { .num_cq = 1 << 16, .num_mcg = 1 << 13, .num_mpt = 1 << 19, - .num_mtt = 1 << 20, /* It is really num mtt segements */ + .num_mtt = 1 << 20, /* It is really num mtt segments */ }; static const struct mlx4_profile low_mem_profile = { @@ -1508,7 +1508,7 @@ static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) priv->v2p.port1 = port1; priv->v2p.port2 = port2; } else { - mlx4_err(dev, "Failed to change port mape: %d\n", err); + mlx4_err(dev, "Failed to change port map: %d\n", err); } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h index e9cd4bb6f83d..d3d9ec042d2c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h @@ -112,7 +112,7 @@ struct mlx4_en_stat_out_flow_control_mbox { __be64 tx_pause_duration; /* Number of transmitter transitions from XOFF state to XON state */ __be64 tx_pause_transition; - /* Reserverd */ + /* Reserved */ __be64 reserved[2]; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 256a06b3c096..4e43f4a7d246 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -2118,7 +2118,7 @@ static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset) * @data: output buffer to put the requested data into. * * Reads cable module eeprom data, puts the outcome data into - * data pointer paramer. + * data pointer parameter. * Returns num of read bytes on success or a negative error * code. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index cf0477f53dc4..47e7c2639774 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev) return false; if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) { - mlx5_core_warn(dev, "Missing SyncE capability\n"); + mlx5_core_dbg(dev, "Missing SyncE capability\n"); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index 18fed2b34fb1..c9c7fddb246f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -41,6 +41,7 @@ struct mlx5_dpll_synce_status { enum mlx5_msees_oper_status oper_status; bool ho_acq; bool oper_freq_measure; + enum mlx5_msees_failure_reason failure_reason; s32 frequency_diff; }; @@ -60,6 +61,7 @@ mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev, synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status); synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq); synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure); + synce_status->failure_reason = MLX5_GET(msees_reg, out, failure_reason); synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff); return 0; } @@ -99,6 +101,26 @@ mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status) } } +static enum dpll_lock_status_error +mlx5_dpll_lock_status_error_get(struct mlx5_dpll_synce_status *synce_status) +{ + switch (synce_status->oper_status) { + case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER: + fallthrough; + case MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING: + switch (synce_status->failure_reason) { + case MLX5_MSEES_FAILURE_REASON_PORT_DOWN: + return DPLL_LOCK_STATUS_ERROR_MEDIA_DOWN; + case MLX5_MSEES_FAILURE_REASON_TOO_HIGH_FREQUENCY_DIFF: + return DPLL_LOCK_STATUS_ERROR_FRACTIONAL_FREQUENCY_OFFSET_TOO_HIGH; + default: + return DPLL_LOCK_STATUS_ERROR_UNDEFINED; + } + default: + return DPLL_LOCK_STATUS_ERROR_NONE; + } +} + static enum dpll_pin_state mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status) { @@ -118,10 +140,11 @@ mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status, return 0; } -static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, - void *priv, - enum dpll_lock_status *status, - struct netlink_ext_ack *extack) +static int +mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, void *priv, + enum dpll_lock_status *status, + enum dpll_lock_status_error *status_error, + struct netlink_ext_ack *extack) { struct mlx5_dpll_synce_status synce_status; struct mlx5_dpll *mdpll = priv; @@ -131,6 +154,7 @@ static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll, if (err) return err; *status = mlx5_dpll_lock_status_get(&synce_status); + *status_error = mlx5_dpll_lock_status_error_get(&synce_status); return 0; } @@ -389,7 +413,7 @@ static void mlx5_dpll_remove(struct auxiliary_device *adev) struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev); struct mlx5_core_dev *mdev = mdpll->mdev; - cancel_delayed_work(&mdpll->work); + cancel_delayed_work_sync(&mdpll->work); mlx5_dpll_mdev_netdev_untrack(mdpll, mdev); destroy_workqueue(mdpll->wq); dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 5d213a9886f1..5757f4f10c12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, return xsk->headroom + hw_mtu; } -static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) +static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room) { - /* SKBs built on XDP_PASS on XSK RQs don't have headroom. */ - u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL); u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); + u16 headroom; + + if (no_head_tail_room) + return SKB_DATA_ALIGN(hw_mtu); + headroom = mlx5e_get_linear_rq_headroom(params, NULL); return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); } @@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, struct mlx5e_xsk_param *xsk, bool mpwqe) { + bool no_head_tail_room; u32 sz; /* XSK frames are mapped as individual pages, because frames may come in @@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, if (xsk) return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; - sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); + no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk); + + /* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations. + * no_head_tail_room should be set in the case of XDP with Striding RQ + * when SKB is not linear. This is because another page is allocated for the linear part. + */ + sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room)); /* XDP in mlx5e doesn't support multiple packets per page. * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set. @@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) return false; - /* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data + /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set + * to exclude headroom and tailroom from calculations. + * no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs + * since packet data buffers don't have headroom and tailroom resreved for the SKB. + * Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data * must fit into a CPU page. */ if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 078f56a3cbb2..fd4ef6431142 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -935,6 +935,7 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c) if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { mlx5e_ptp_rx_set_fs(c->priv); mlx5e_activate_rq(&c->rq); + netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, &c->napi); } mlx5e_trigger_napi_sched(&c->napi); } @@ -943,8 +944,10 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c) { int tc; - if (test_bit(MLX5E_PTP_STATE_RX, c->state)) + if (test_bit(MLX5E_PTP_STATE_RX, c->state)) { + netif_queue_set_napi(c->netdev, c->rq.ix, NETDEV_QUEUE_TYPE_RX, NULL); mlx5e_deactivate_rq(&c->rq); + } if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { for (tc = 0; tc < c->num_tc; tc++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 05612d9c6080..c54fd01ea635 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) queue_work(sa_entry->ipsec->wq, &work->work); } -static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) +static void mlx5e_xfrm_update_stats(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; + struct net *net = dev_net(x->xso.dev); u64 packets, bytes, lastuse; lockdep_assert(lockdep_is_held(&x->lock) || - lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex)); + lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) || + lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock)); if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) return; + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { + mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse); + x->stats.integrity_failed += packets; + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets); + + mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse); + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets); + } + + if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) + return; + mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse); x->curlft.packets += packets; x->curlft.bytes += bytes; + + if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { + mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse); + x->stats.replay += packets; + XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets); + } } static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, @@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, - .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, + .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats, .xdo_dev_policy_add = mlx5e_xfrm_add_policy, .xdo_dev_policy_delete = mlx5e_xfrm_del_policy, .xdo_dev_policy_free = mlx5e_xfrm_free_policy, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index adaea3493193..7d943e93cf6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats { struct mlx5e_ipsec_sw_stats { atomic64_t ipsec_rx_drop_sp_alloc; atomic64_t ipsec_rx_drop_sadb_miss; - atomic64_t ipsec_rx_drop_syndrome; atomic64_t ipsec_tx_drop_bundle; atomic64_t ipsec_tx_drop_no_state; atomic64_t ipsec_tx_drop_not_ip; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 51a144246ea6..727fa7c18523 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -304,12 +304,6 @@ drop: return false; } -enum { - MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED, - MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED, - MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER, -}; - void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 ipsec_meta_data) @@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; - - switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { - case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: - xo->status = CRYPTO_SUCCESS; - break; - case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: - xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; - break; - case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER: - xo->status = CRYPTO_INVALID_PACKET_SYNTAX; - break; - default: - atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome); - } + xo->status = CRYPTO_SUCCESS; } int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata) @@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada return err; } - *metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id, - MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED); - + *metadata = ipsec_obj_id; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2ed99772f168..82064614846f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -43,7 +43,6 @@ #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0)) #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) -#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24)) struct mlx5e_accel_tx_ipsec_state { struct xfrm_offload *xo; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index e0e36a09721c..dd36b04e30a0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) }, - { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c8e8f512803e..be809556b2e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1806,6 +1806,7 @@ void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); + netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, sq->cq.napi); } void mlx5e_tx_disable_queue(struct netdev_queue *txq) @@ -1819,6 +1820,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { struct mlx5_wq_cyc *wq = &sq->wq; + netif_queue_set_napi(sq->netdev, sq->txq_ix, NETDEV_QUEUE_TYPE_TX, NULL); clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ @@ -2560,6 +2562,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll); + netif_napi_set_irq(&c->napi, irq); err = mlx5e_open_queues(c, params, cparam); if (unlikely(err)) @@ -2602,12 +2605,16 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) mlx5e_activate_xsk(c); else mlx5e_activate_rq(&c->rq); + + netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, &c->napi); } static void mlx5e_deactivate_channel(struct mlx5e_channel *c) { int tc; + netif_queue_set_napi(c->netdev, c->ix, NETDEV_QUEUE_TYPE_RX, NULL); + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) mlx5e_deactivate_xsk(c); else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 58f4c0d0fafa..e7faf7e73ca4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) return -EIO; } - mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); + mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED); /* Loop until device state turns to disable */ end = jiffies + msecs_to_jiffies(delay_ms); do { - if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; cond_resched(); } while (!time_after(jiffies, end)); - if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { + if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) { dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", mlx5_get_nic_state(dev), delay_ms); return -EIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 8ff6dc9bc803..9463ede84d8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev) return MLX5_SENSOR_PCI_COMM_ERR; if (pci_channel_offline(dev->pdev)) return MLX5_SENSOR_PCI_ERR; - if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) return MLX5_SENSOR_NIC_DISABLED; - if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET) + if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET) return MLX5_SENSOR_NIC_SW_RESET; if (sensor_fw_synd_rfr(dev)) return MLX5_SENSOR_FW_SYND_RFR; @@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) /* Write the NIC interface field to initiate the reset, the command * interface address also resides here, don't overwrite it. */ - mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET); + mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET); return true; } @@ -246,13 +246,13 @@ recover_from_sw_reset: /* Recover from SW reset */ end = jiffies + msecs_to_jiffies(delay_ms); do { - if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) + if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) break; msleep(20); } while (!time_after(jiffies, end)); - if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { + if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) { dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", mlx5_get_nic_state(dev), delay_ms); } @@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) u8 nic_interface = mlx5_get_nic_state(dev); switch (nic_interface) { - case MLX5_NIC_IFC_FULL: + case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER: mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); break; - case MLX5_NIC_IFC_DISABLED: + case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED: mlx5_core_warn(dev, "starting teardown\n"); break; - case MLX5_NIC_IFC_NO_DRAM_NIC: + case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC: mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); break; - case MLX5_NIC_IFC_SW_RESET: + case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET: /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases: * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * and this is a VF), this is not recoverable by SW reset. * Logging of this is handled elsewhere. * 2. FW reset has been issued by another function, driver can * be reloaded to recover after the mode switches to - * MLX5_NIC_IFC_DISABLED. + * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED. */ if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) mlx5_core_warn(dev, "NIC SW reset in progress\n"); @@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work) &fw_reporter_ctx); } -static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = { +static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = { .name = "fw", .diagnose = mlx5_fw_reporter_diagnose, .dump = mlx5_fw_reporter_dump, }; +static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = { + .name = "fw", + .diagnose = mlx5_fw_reporter_diagnose, +}; + static int mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, void *priv_ctx, @@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) } } -static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { +static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = { .name = "fw_fatal", .recover = mlx5_fw_fatal_reporter_recover, .dump = mlx5_fw_fatal_reporter_dump, }; +static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { + .name = "fw_fatal", + .recover = mlx5_fw_fatal_reporter_recover, +}; + #define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000 #define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000 #define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000 @@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) { + const struct devlink_health_reporter_ops *fw_fatal_ops; struct mlx5_core_health *health = &dev->priv.health; + const struct devlink_health_reporter_ops *fw_ops; struct devlink *devlink = priv_to_devlink(dev); u64 grace_period; + fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops; + fw_ops = &mlx5_fw_reporter_pf_ops; if (mlx5_core_is_ecpf(dev)) { grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD; } else if (mlx5_core_is_pf(dev)) { @@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) } else { /* VF or SF */ grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD; + fw_fatal_ops = &mlx5_fw_fatal_reporter_ops; + fw_ops = &mlx5_fw_reporter_ops; } health->fw_reporter = - devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops, - 0, dev); + devl_health_reporter_create(devlink, fw_ops, 0, dev); if (IS_ERR(health->fw_reporter)) mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n", PTR_ERR(health->fw_reporter)); health->fw_fatal_reporter = devl_health_reporter_create(devlink, - &mlx5_fw_fatal_reporter_ops, + fw_fatal_ops, grace_period, dev); if (IS_ERR(health->fw_fatal_reporter)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index bccf6e53556c..c2593625c09a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = { }; static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, - u32 warn_time_mili) + u32 warn_time_mili, const char *init_state) { unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili); unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); u32 fw_initializing; - int err = 0; do { fw_initializing = ioread32be(&dev->iseg->initializing); if (!(fw_initializing >> 31)) break; - if (time_after(jiffies, end) || - test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) { - err = -EBUSY; - break; + if (time_after(jiffies, end)) { + mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n", + max_wait_mili, init_state); + return -ETIMEDOUT; + } + if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) { + mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n", + init_state); + return -ENODEV; } if (warn_time_mili && time_after(jiffies, warn)) { - mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n", - jiffies_to_msecs(end - warn) / 1000, fw_initializing); + mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n", + init_state, jiffies_to_msecs(end - warn) / 1000, + fw_initializing); warn = jiffies + msecs_to_jiffies(warn_time_mili); } msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT)); } while (true); - return err; + return 0; } static void mlx5_set_driver_version(struct mlx5_core_dev *dev) @@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou /* wait for firmware to accept initialization segments configurations */ err = wait_fw_init(dev, timeout, - mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL)); - if (err) { - mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", - timeout); + mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL), + "pre-initializing"); + if (err) return err; - } err = mlx5_cmd_enable(dev); if (err) { @@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou mlx5_tout_query_iseg(dev); - err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0); - if (err) { - mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n", - mlx5_tout_ms(dev, FW_INIT)); + err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing"); + if (err) goto err_cmd_cleanup; - } dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a79b7959361b..58732f44940f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) return ret; } -enum { - MLX5_NIC_IFC_FULL = 0, - MLX5_NIC_IFC_DISABLED = 1, - MLX5_NIC_IFC_NO_DRAM_NIC = 2, - MLX5_NIC_IFC_SW_RESET = 7 -}; - u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index c93492b67788..99219ea52c4b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device) kfree(sf_dev); } -static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev) +static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev, + struct mlx5_sf_dev *sf_dev) { int id; @@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, return; xa_err: - mlx5_sf_dev_remove(dev, sf_dev); + mlx5_sf_dev_remove_aux(dev, sf_dev); add_err: mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n", sf_index, sfnum, err); @@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; xa_erase(&table->devices, sf_index); - mlx5_sf_dev_remove(dev, sf_dev); + mlx5_sf_dev_remove_aux(dev, sf_dev); } static int @@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) xa_for_each(&table->devices, index, sf_dev) { xa_erase(&table->devices, index); - mlx5_sf_dev_remove(table->dev, sf_dev); + mlx5_sf_dev_remove_aux(table->dev, sf_dev); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 169c2c68ed5c..bc863e1f062e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -95,24 +95,29 @@ mdev_err: static void mlx5_sf_dev_remove(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - struct devlink *devlink = priv_to_devlink(sf_dev->mdev); + struct mlx5_core_dev *mdev = sf_dev->mdev; + struct devlink *devlink; - mlx5_drain_health_wq(sf_dev->mdev); + devlink = priv_to_devlink(mdev); + set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); + mlx5_drain_health_wq(mdev); devlink_unregister(devlink); - if (mlx5_dev_is_lightweight(sf_dev->mdev)) - mlx5_uninit_one_light(sf_dev->mdev); + if (mlx5_dev_is_lightweight(mdev)) + mlx5_uninit_one_light(mdev); else - mlx5_uninit_one(sf_dev->mdev); - iounmap(sf_dev->mdev->iseg); - mlx5_mdev_uninit(sf_dev->mdev); + mlx5_uninit_one(mdev); + iounmap(mdev->iseg); + mlx5_mdev_uninit(mdev); mlx5_devlink_free(devlink); } static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) { struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); + struct mlx5_core_dev *mdev = sf_dev->mdev; - mlx5_unload_one(sf_dev->mdev, false); + set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state); + mlx5_unload_one(mdev, false); } static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c index 7e36e1062139..64f4cc284aea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c @@ -54,6 +54,107 @@ enum dr_dump_rec_type { DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425, }; +static struct mlx5dr_dbg_dump_buff * +mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data) +{ + struct mlx5dr_dbg_dump_buff *new_buff; + + new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL); + if (!new_buff) + return NULL; + + new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL); + if (!new_buff->buff) { + kfree(new_buff); + return NULL; + } + + INIT_LIST_HEAD(&new_buff->node); + list_add_tail(&new_buff->node, &dump_data->buff_list); + + return new_buff; +} + +static struct mlx5dr_dbg_dump_data * +mlx5dr_dbg_create_dump_data(void) +{ + struct mlx5dr_dbg_dump_data *dump_data; + + dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL); + if (!dump_data) + return NULL; + + INIT_LIST_HEAD(&dump_data->buff_list); + + if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) { + kfree(dump_data); + return NULL; + } + + return dump_data; +} + +static void +mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data) +{ + struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff; + + if (!dump_data) + return; + + list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) { + kvfree(dump_buff->buff); + list_del(&dump_buff->node); + kfree(dump_buff); + } + + kfree(dump_data); +} + +static int +mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size) +{ + struct mlx5dr_domain *dmn = file->private; + struct mlx5dr_dbg_dump_data *dump_data; + struct mlx5dr_dbg_dump_buff *buff; + u32 buff_capacity, write_size; + int remain_size, ret; + + if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE) + return -EINVAL; + + dump_data = dmn->dump_info.dump_data; + buff = list_last_entry(&dump_data->buff_list, + struct mlx5dr_dbg_dump_buff, node); + + buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index; + remain_size = buff_capacity - size; + write_size = (remain_size > 0) ? size : buff_capacity; + + if (likely(write_size)) { + ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str); + if (ret < 0) + return ret; + + buff->index += write_size; + } + + if (remain_size < 0) { + remain_size *= -1; + buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data); + if (!buff) + return -ENOMEM; + + ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size); + if (ret < 0) + return ret; + + buff->index += remain_size; + } + + return 0; +} + void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl) { mutex_lock(&tbl->dmn->dump_info.dbg_mutex); @@ -109,36 +210,68 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, { struct mlx5dr_action *action = action_mem->action; const u64 action_id = DR_DBG_PTR_TO_ID(action); + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; u64 hit_tbl_ptr, miss_tbl_ptr; u32 hit_tbl_id, miss_tbl_id; + int ret; switch (action->action_type) { case DR_ACTION_TYP_DROP: - seq_printf(file, "%d,0x%llx,0x%llx\n", - DR_DUMP_REC_TYPE_ACTION_DROP, action_id, rule_id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_DROP, action_id, + rule_id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_FT: if (action->dest_tbl->is_fw_tbl) - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->fw_tbl.id, - -1); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_FT, action_id, + rule_id, action->dest_tbl->fw_tbl.id, + -1); else - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx\n", - DR_DUMP_REC_TYPE_ACTION_FT, action_id, - rule_id, action->dest_tbl->tbl->table_id, - DR_DBG_PTR_TO_ID(action->dest_tbl->tbl)); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_FT, action_id, + rule_id, action->dest_tbl->tbl->table_id, + DR_DBG_PTR_TO_ID(action->dest_tbl->tbl)); + + if (ret < 0) + return ret; + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_CTR: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id, - action->ctr->ctr_id + action->ctr->offset); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id, + action->ctr->ctr_id + action->ctr->offset); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_TAG: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id, - action->flow_tag->flow_tag); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id, + action->flow_tag->flow_tag); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_MODIFY_HDR: { @@ -150,83 +283,171 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg; - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x", - DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id, - rule_id, action->rewrite->index, - action->rewrite->single_action_opt, - ptrn_arg ? action->rewrite->num_of_actions : 0, - ptrn_arg ? ptrn->index : 0, - ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x", + DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id, + rule_id, action->rewrite->index, + action->rewrite->single_action_opt, + ptrn_arg ? action->rewrite->num_of_actions : 0, + ptrn_arg ? ptrn->index : 0, + ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; if (ptrn_arg) { for (i = 0; i < action->rewrite->num_of_actions; i++) { - seq_printf(file, ",0x%016llx", - be64_to_cpu(((__be64 *)rewrite_data)[i])); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + ",0x%016llx", + be64_to_cpu(((__be64 *)rewrite_data)[i])); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; } } - seq_puts(file, "\n"); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n"); + if (ret < 0) + return ret; + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; } case DR_ACTION_TYP_VPORT: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id, - action->vport->caps->num); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id, + action->vport->caps->num); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_TNL_L2_TO_L2: - seq_printf(file, "%d,0x%llx,0x%llx\n", - DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id, - rule_id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id, + rule_id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_TNL_L3_TO_L2: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id, - rule_id, - (action->rewrite->ptrn && action->rewrite->arg) ? - mlx5dr_arg_get_obj_id(action->rewrite->arg) : - action->rewrite->index); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id, + rule_id, + (action->rewrite->ptrn && action->rewrite->arg) ? + mlx5dr_arg_get_obj_id(action->rewrite->arg) : + action->rewrite->index); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_L2_TO_TNL_L2: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id, - rule_id, action->reformat->id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id, + rule_id, action->reformat->id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_L2_TO_TNL_L3: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id, - rule_id, action->reformat->id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id, + rule_id, action->reformat->id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_POP_VLAN: - seq_printf(file, "%d,0x%llx,0x%llx\n", - DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id, - rule_id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id, + rule_id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_PUSH_VLAN: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id, - rule_id, action->push_vlan->vlan_hdr); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id, + rule_id, action->push_vlan->vlan_hdr); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_INSERT_HDR: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id, - rule_id, action->reformat->id, - action->reformat->param_0, - action->reformat->param_1); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id, + rule_id, action->reformat->id, + action->reformat->param_0, + action->reformat->param_1); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_REMOVE_HDR: - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id, - rule_id, action->reformat->id, - action->reformat->param_0, - action->reformat->param_1); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id, + rule_id, action->reformat->id, + action->reformat->param_0, + action->reformat->param_1); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_SAMPLER: - seq_printf(file, - "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n", - DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, rule_id, - 0, 0, action->sampler->sampler_id, - action->sampler->rx_icm_addr, - action->sampler->tx_icm_addr); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id, + rule_id, 0, 0, action->sampler->sampler_id, + action->sampler->rx_icm_addr, + action->sampler->tx_icm_addr); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; case DR_ACTION_TYP_RANGE: if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) { @@ -247,10 +468,17 @@ dr_dump_rule_action_mem(struct seq_file *file, const u64 rule_id, DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl); } - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n", - DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, rule_id, - hit_tbl_id, hit_tbl_ptr, miss_tbl_id, miss_tbl_ptr, - action->range->definer_id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n", + DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id, + rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id, + miss_tbl_ptr, action->range->definer_id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; break; default: return 0; @@ -263,8 +491,10 @@ static int dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste, bool is_rx, const u64 rule_id, u8 format_ver) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; char hw_ste_dump[DR_HEX_SIZE]; u32 mem_rec_type; + int ret; if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) { mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 : @@ -277,9 +507,16 @@ dr_dump_rule_mem(struct seq_file *file, struct mlx5dr_ste *ste, dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED); - seq_printf(file, "%d,0x%llx,0x%llx,%s\n", mem_rec_type, - dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), rule_id, - hw_ste_dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,%s\n", mem_rec_type, + dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)), + rule_id, hw_ste_dump); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; return 0; } @@ -309,6 +546,7 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule) { struct mlx5dr_rule_action_member *action_mem; const u64 rule_id = DR_DBG_PTR_TO_ID(rule); + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; struct mlx5dr_rule_rx_tx *rx = &rule->rx; struct mlx5dr_rule_rx_tx *tx = &rule->tx; u8 format_ver; @@ -316,8 +554,15 @@ static int dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule) format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver; - seq_printf(file, "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, rule_id, - DR_DBG_PTR_TO_ID(rule->matcher)); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE, + rule_id, DR_DBG_PTR_TO_ID(rule->matcher)); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; if (rx->nic_matcher) { ret = dr_dump_rule_rx_tx(file, rx, true, rule_id, format_ver); @@ -344,46 +589,94 @@ static int dr_dump_matcher_mask(struct seq_file *file, struct mlx5dr_match_param *mask, u8 criteria, const u64 matcher_id) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; char dump[DR_HEX_SIZE]; + int ret; - seq_printf(file, "%d,0x%llx,", DR_DUMP_REC_TYPE_MATCHER_MASK, - matcher_id); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,", + DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; if (criteria & DR_MATCHER_CRITERIA_OUTER) { dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer)); - seq_printf(file, "%s,", dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%s,", dump); } else { - seq_puts(file, ","); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ","); } + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + if (criteria & DR_MATCHER_CRITERIA_INNER) { dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner)); - seq_printf(file, "%s,", dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%s,", dump); } else { - seq_puts(file, ","); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ","); } + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + if (criteria & DR_MATCHER_CRITERIA_MISC) { dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc)); - seq_printf(file, "%s,", dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%s,", dump); } else { - seq_puts(file, ","); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ","); } + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + if (criteria & DR_MATCHER_CRITERIA_MISC2) { dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2)); - seq_printf(file, "%s,", dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%s,", dump); } else { - seq_puts(file, ","); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ","); } + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + if (criteria & DR_MATCHER_CRITERIA_MISC3) { dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3)); - seq_printf(file, "%s\n", dump); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%s\n", dump); } else { - seq_puts(file, ",\n"); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n"); } + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + return 0; } @@ -391,9 +684,19 @@ static int dr_dump_matcher_builder(struct seq_file *file, struct mlx5dr_ste_build *builder, u32 index, bool is_rx, const u64 matcher_id) { - seq_printf(file, "%d,0x%llx,%d,%d,0x%x\n", - DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, is_rx, - builder->lu_type); + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; + int ret; + + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,%d,%d,0x%x\n", + DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index, + is_rx, builder->lu_type); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; return 0; } @@ -403,6 +706,7 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx, struct mlx5dr_matcher_rx_tx *matcher_rx_tx, const u64 matcher_id) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; enum dr_dump_rec_type rec_type; u64 s_icm_addr, e_icm_addr; int i, ret; @@ -412,11 +716,19 @@ dr_dump_matcher_rx_tx(struct seq_file *file, bool is_rx, s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk); e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk); - seq_printf(file, "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n", - rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx), - matcher_id, matcher_rx_tx->num_of_builders, - dr_dump_icm_to_idx(s_icm_addr), - dr_dump_icm_to_idx(e_icm_addr)); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n", + rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx), + matcher_id, matcher_rx_tx->num_of_builders, + dr_dump_icm_to_idx(s_icm_addr), + dr_dump_icm_to_idx(e_icm_addr)); + + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; for (i = 0; i < matcher_rx_tx->num_of_builders; i++) { ret = dr_dump_matcher_builder(file, @@ -434,13 +746,22 @@ dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher) { struct mlx5dr_matcher_rx_tx *rx = &matcher->rx; struct mlx5dr_matcher_rx_tx *tx = &matcher->tx; + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; u64 matcher_id; int ret; matcher_id = DR_DBG_PTR_TO_ID(matcher); - seq_printf(file, "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER, - matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), matcher->prio); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER, + matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl), + matcher->prio); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; ret = dr_dump_matcher_mask(file, &matcher->mask, matcher->match_criteria, matcher_id); @@ -486,15 +807,24 @@ dr_dump_table_rx_tx(struct seq_file *file, bool is_rx, struct mlx5dr_table_rx_tx *table_rx_tx, const u64 table_id) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; enum dr_dump_rec_type rec_type; u64 s_icm_addr; + int ret; rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX : DR_DUMP_REC_TYPE_TABLE_TX; s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk); - seq_printf(file, "%d,0x%llx,0x%llx\n", rec_type, table_id, - dr_dump_icm_to_idx(s_icm_addr)); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx\n", rec_type, table_id, + dr_dump_icm_to_idx(s_icm_addr)); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; return 0; } @@ -503,11 +833,19 @@ static int dr_dump_table(struct seq_file *file, struct mlx5dr_table *table) { struct mlx5dr_table_rx_tx *rx = &table->rx; struct mlx5dr_table_rx_tx *tx = &table->tx; + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; int ret; - seq_printf(file, "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE, - DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn), - table->table_type, table->level); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE, + DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn), + table->table_type, table->level); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; if (rx->nic_dmn) { ret = dr_dump_table_rx_tx(file, true, rx, @@ -546,46 +884,86 @@ static int dr_dump_send_ring(struct seq_file *file, struct mlx5dr_send_ring *ring, const u64 domain_id) { - seq_printf(file, "%d,0x%llx,0x%llx,0x%x,0x%x\n", - DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, DR_DBG_PTR_TO_ID(ring), - domain_id, ring->cq->mcq.cqn, ring->qp->qpn); + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; + int ret; + + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%llx,0x%x,0x%x\n", + DR_DUMP_REC_TYPE_DOMAIN_SEND_RING, + DR_DBG_PTR_TO_ID(ring), domain_id, + ring->cq->mcq.cqn, ring->qp->qpn); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + return 0; } -static int +static noinline_for_stack int dr_dump_domain_info_flex_parser(struct seq_file *file, const char *flex_parser_name, const u8 flex_parser_value, const u64 domain_id) { - seq_printf(file, "%d,0x%llx,%s,0x%x\n", - DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id, - flex_parser_name, flex_parser_value); + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; + int ret; + + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,%s,0x%x\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id, + flex_parser_name, flex_parser_value); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; + return 0; } -static int +static noinline_for_stack int dr_dump_domain_info_caps(struct seq_file *file, struct mlx5dr_cmd_caps *caps, const u64 domain_id) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; struct mlx5dr_cmd_vport_cap *vport_caps; unsigned long i, vports_num; + int ret; xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps) ; /* count the number of vports in xarray */ - seq_printf(file, "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n", - DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi, - caps->nic_rx_drop_address, caps->nic_tx_drop_address, - caps->flex_protocols, vports_num, caps->eswitch_manager); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi, + caps->nic_rx_drop_address, caps->nic_tx_drop_address, + caps->flex_protocols, vports_num, caps->eswitch_manager); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) { vport_caps = xa_load(&caps->vports.vports_caps_xa, i); - seq_printf(file, "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n", - DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, domain_id, i, - vport_caps->vport_gvmi, vport_caps->icm_address_rx, - vport_caps->icm_address_tx); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n", + DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT, + domain_id, i, vport_caps->vport_gvmi, + vport_caps->icm_address_rx, + vport_caps->icm_address_tx); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; } return 0; } @@ -627,24 +1005,32 @@ dr_dump_domain_info(struct seq_file *file, struct mlx5dr_domain_info *info, return 0; } -static int +static noinline_for_stack int dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn) { + char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH]; u64 domain_id = DR_DBG_PTR_TO_ID(dmn); int ret; - seq_printf(file, "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n", - DR_DUMP_REC_TYPE_DOMAIN, - domain_id, dmn->type, dmn->info.caps.gvmi, - dmn->info.supp_sw_steering, - /* package version */ - LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, - LINUX_VERSION_SUBLEVEL, - pci_name(dmn->mdev->pdev), - 0, /* domain flags */ - dmn->num_buddies[DR_ICM_TYPE_STE], - dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION], - dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]); + ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, + "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n", + DR_DUMP_REC_TYPE_DOMAIN, + domain_id, dmn->type, dmn->info.caps.gvmi, + dmn->info.supp_sw_steering, + /* package version */ + LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL, + LINUX_VERSION_SUBLEVEL, + pci_name(dmn->mdev->pdev), + 0, /* domain flags */ + dmn->num_buddies[DR_ICM_TYPE_STE], + dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION], + dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]); + if (ret < 0) + return ret; + + ret = mlx5dr_dbg_dump_data_print(file, buff, ret); + if (ret) + return ret; ret = dr_dump_domain_info(file, &dmn->info, domain_id); if (ret < 0) @@ -683,11 +1069,91 @@ unlock_mutex: return ret; } -static int dr_dump_show(struct seq_file *file, void *priv) +static void * +dr_dump_start(struct seq_file *file, loff_t *pos) { - return dr_dump_domain_all(file, file->private); + struct mlx5dr_domain *dmn = file->private; + struct mlx5dr_dbg_dump_data *dump_data; + + if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) { + mlx5_core_warn(dmn->mdev, "Dump already in progress\n"); + return ERR_PTR(-EBUSY); + } + + atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS); + dump_data = dmn->dump_info.dump_data; + + if (dump_data) { + return seq_list_start(&dump_data->buff_list, *pos); + } else if (*pos == 0) { + dump_data = mlx5dr_dbg_create_dump_data(); + if (!dump_data) + goto exit; + + dmn->dump_info.dump_data = dump_data; + if (dr_dump_domain_all(file, dmn)) { + mlx5dr_dbg_destroy_dump_data(dump_data); + dmn->dump_info.dump_data = NULL; + goto exit; + } + + return seq_list_start(&dump_data->buff_list, *pos); + } + +exit: + atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE); + return NULL; } -DEFINE_SHOW_ATTRIBUTE(dr_dump); + +static void * +dr_dump_next(struct seq_file *file, void *v, loff_t *pos) +{ + struct mlx5dr_domain *dmn = file->private; + struct mlx5dr_dbg_dump_data *dump_data; + + dump_data = dmn->dump_info.dump_data; + + return seq_list_next(v, &dump_data->buff_list, pos); +} + +static void +dr_dump_stop(struct seq_file *file, void *v) +{ + struct mlx5dr_domain *dmn = file->private; + struct mlx5dr_dbg_dump_data *dump_data; + + if (v && IS_ERR(v)) + return; + + if (!v) { + dump_data = dmn->dump_info.dump_data; + if (dump_data) { + mlx5dr_dbg_destroy_dump_data(dump_data); + dmn->dump_info.dump_data = NULL; + } + } + + atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE); +} + +static int +dr_dump_show(struct seq_file *file, void *v) +{ + struct mlx5dr_dbg_dump_buff *entry; + + entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node); + seq_printf(file, "%s", entry->buff); + + return 0; +} + +static const struct seq_operations dr_dump_sops = { + .start = dr_dump_start, + .next = dr_dump_next, + .stop = dr_dump_stop, + .show = dr_dump_show, +}; +DEFINE_SEQ_ATTRIBUTE(dr_dump); void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h index def6cf853eea..57c6b363b870 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h @@ -1,10 +1,30 @@ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ +#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024) +#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512 + +enum { + MLX5DR_DEBUG_DUMP_STATE_FREE, + MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS, +}; + +struct mlx5dr_dbg_dump_buff { + char *buff; + u32 index; + struct list_head node; +}; + +struct mlx5dr_dbg_dump_data { + struct list_head buff_list; +}; + struct mlx5dr_dbg_dump_info { struct mutex dbg_mutex; /* protect dbg lists */ struct dentry *steering_debugfs; struct dentry *fdb_debugfs; + struct mlx5dr_dbg_dump_data *dump_data; + atomic_t state; }; void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index faa63ea9b83e..1915fa41c622 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -95,7 +95,7 @@ struct mlxsw_afa_set { */ has_trap:1, has_police:1; - unsigned int ref_count; + refcount_t ref_count; struct mlxsw_afa_set *next; /* Pointer to the next set. */ struct mlxsw_afa_set *prev; /* Pointer to the previous set, * note that set may have multiple @@ -120,7 +120,7 @@ struct mlxsw_afa_fwd_entry { struct rhash_head ht_node; struct mlxsw_afa_fwd_entry_ht_key ht_key; u32 kvdl_index; - unsigned int ref_count; + refcount_t ref_count; }; static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = { @@ -282,7 +282,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first) /* Need to initialize the set to pass by default */ mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0); set->ht_key.is_first = is_first; - set->ref_count = 1; + refcount_set(&set->ref_count, 1); return set; } @@ -330,7 +330,7 @@ static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa, static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa, struct mlxsw_afa_set *set) { - if (--set->ref_count) + if (!refcount_dec_and_test(&set->ref_count)) return; if (set->shared) mlxsw_afa_set_unshare(mlxsw_afa, set); @@ -350,7 +350,7 @@ static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa, set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key, mlxsw_afa_set_ht_params); if (set) { - set->ref_count++; + refcount_inc(&set->ref_count); mlxsw_afa_set_put(mlxsw_afa, orig_set); } else { set = orig_set; @@ -564,7 +564,7 @@ mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u16 local_port) if (!fwd_entry) return ERR_PTR(-ENOMEM); fwd_entry->ht_key.local_port = local_port; - fwd_entry->ref_count = 1; + refcount_set(&fwd_entry->ref_count, 1); err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node, @@ -607,7 +607,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port) fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key, mlxsw_afa_fwd_entry_ht_params); if (fwd_entry) { - fwd_entry->ref_count++; + refcount_inc(&fwd_entry->ref_count); return fwd_entry; } return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port); @@ -616,7 +616,7 @@ mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u16 local_port) static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa, struct mlxsw_afa_fwd_entry *fwd_entry) { - if (--fwd_entry->ref_count) + if (!refcount_dec_and_test(&fwd_entry->ref_count)) return; mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 0d5e6f9b466e..947500f8ed71 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -5,6 +5,7 @@ #include <linux/slab.h> #include <linux/list.h> #include <linux/errno.h> +#include <linux/refcount.h> #include "item.h" #include "core_acl_flex_keys.h" @@ -107,7 +108,7 @@ EXPORT_SYMBOL(mlxsw_afk_destroy); struct mlxsw_afk_key_info { struct list_head list; - unsigned int ref_count; + refcount_t ref_count; unsigned int blocks_count; int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value * is index inside "blocks" @@ -334,7 +335,7 @@ mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk, if (err) goto err_picker; list_add(&key_info->list, &mlxsw_afk->key_info_list); - key_info->ref_count = 1; + refcount_set(&key_info->ref_count, 1); return key_info; err_picker: @@ -356,7 +357,7 @@ mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk, key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage); if (key_info) { - key_info->ref_count++; + refcount_inc(&key_info->ref_count); return key_info; } return mlxsw_afk_key_info_create(mlxsw_afk, elusage); @@ -365,7 +366,7 @@ EXPORT_SYMBOL(mlxsw_afk_key_info_get); void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info) { - if (--key_info->ref_count) + if (!refcount_dec_and_test(&key_info->ref_count)) return; mlxsw_afk_key_info_destroy(key_info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index 6b98c3287b49..f0ceb196a6ce 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -708,7 +708,6 @@ static const struct i2c_device_id mlxsw_m_i2c_id[] = { static struct i2c_driver mlxsw_m_i2c_driver = { .driver.name = "mlxsw_minimal", - .class = I2C_CLASS_HWMON, .id_table = mlxsw_m_i2c_id, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5d3413636a62..ecde2086c703 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2695,23 +2695,18 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) { char sgcr_pl[MLXSW_REG_SGCR_LEN]; - u16 max_lag; int err; if (mlxsw_core_lag_mode(mlxsw_sp->core) != MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) return 0; - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); - if (err) - return err; - /* In DDD mode, which we by default use, each LAG entry is 8 PGT * entries. The LAG table address needs to be 8-aligned, but that ought * to be the case, since the LAG table is allocated first. */ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base, - max_lag * 8); + mlxsw_sp->max_lag * 8); if (err) return err; if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) { @@ -2728,33 +2723,31 @@ static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp) err_mid_alloc_range: mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, - max_lag * 8); + mlxsw_sp->max_lag * 8); return err; } static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp) { - u16 max_lag; - int err; - if (mlxsw_core_lag_mode(mlxsw_sp->core) != MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW) return; - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); - if (err) - return; - mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base, - max_lag * 8); + mlxsw_sp->max_lag * 8); } #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe +struct mlxsw_sp_lag { + struct net_device *dev; + refcount_t ref_count; + u16 lag_id; +}; + static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) { char slcr_pl[MLXSW_REG_SLCR_LEN]; - u16 max_lag; u32 seed; int err; @@ -2773,7 +2766,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) if (err) return err; - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); + err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag); if (err) return err; @@ -2784,7 +2777,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) if (err) return err; - mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper), + mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag), GFP_KERNEL); if (!mlxsw_sp->lags) { err = -ENOMEM; @@ -4269,19 +4262,48 @@ mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, } } -static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +static struct mlxsw_sp_lag * +mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, + struct netlink_ext_ack *extack) { char sldr_pl[MLXSW_REG_SLDR_LEN]; + struct mlxsw_sp_lag *lag; + u16 lag_id; + int i, err; + + for (i = 0; i < mlxsw_sp->max_lag; i++) { + if (!mlxsw_sp->lags[i].dev) + break; + } + + if (i == mlxsw_sp->max_lag) { + NL_SET_ERR_MSG_MOD(extack, + "Exceeded number of supported LAG devices"); + return ERR_PTR(-EBUSY); + } + lag_id = i; mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); + if (err) + return ERR_PTR(err); + + lag = &mlxsw_sp->lags[lag_id]; + lag->lag_id = lag_id; + lag->dev = lag_dev; + refcount_set(&lag->ref_count, 1); + + return lag; } -static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +static int +mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) { char sldr_pl[MLXSW_REG_SLDR_LEN]; - mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); + lag->dev = NULL; + + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); } @@ -4329,34 +4351,44 @@ static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); } -static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *lag_dev, - u16 *p_lag_id) +static struct mlxsw_sp_lag * +mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev) { - struct mlxsw_sp_upper *lag; - int free_lag_id = -1; - u16 max_lag; - int err, i; + int i; - err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag); - if (err) - return err; + for (i = 0; i < mlxsw_sp->max_lag; i++) { + if (!mlxsw_sp->lags[i].dev) + continue; - for (i = 0; i < max_lag; i++) { - lag = mlxsw_sp_lag_get(mlxsw_sp, i); - if (lag->ref_count) { - if (lag->dev == lag_dev) { - *p_lag_id = i; - return 0; - } - } else if (free_lag_id < 0) { - free_lag_id = i; - } + if (mlxsw_sp->lags[i].dev == lag_dev) + return &mlxsw_sp->lags[i]; } - if (free_lag_id < 0) - return -EBUSY; - *p_lag_id = free_lag_id; - return 0; + + return NULL; +} + +static struct mlxsw_sp_lag * +mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_lag *lag; + + lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev); + if (lag) { + refcount_inc(&lag->ref_count); + return lag; + } + + return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack); +} + +static void +mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag) +{ + if (!refcount_dec_and_test(&lag->ref_count)) + return; + + mlxsw_sp_lag_destroy(mlxsw_sp, lag); } static bool @@ -4365,12 +4397,6 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, struct netdev_lag_upper_info *lag_upper_info, struct netlink_ext_ack *extack) { - u16 lag_id; - - if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { - NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices"); - return false; - } if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); return false; @@ -4482,22 +4508,16 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_upper *lag; + struct mlxsw_sp_lag *lag; u16 lag_id; u8 port_index; int err; - err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); - if (err) - return err; - lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); - if (!lag->ref_count) { - err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); - if (err) - return err; - lag->dev = lag_dev; - } + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack); + if (IS_ERR(lag)) + return PTR_ERR(lag); + lag_id = lag->lag_id; err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); if (err) return err; @@ -4515,7 +4535,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->local_port); mlxsw_sp_port->lag_id = lag_id; mlxsw_sp_port->lagged = 1; - lag->ref_count++; err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port); if (err) @@ -4542,7 +4561,6 @@ err_replay: err_router_join: mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); err_fid_port_join_lag: - lag->ref_count--; mlxsw_sp_port->lagged = 0; mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); @@ -4550,8 +4568,7 @@ err_fid_port_join_lag: err_col_port_add: mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev); err_lag_uppers_bridge_join: - if (!lag->ref_count) - mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + mlxsw_sp_lag_put(mlxsw_sp, lag); return err; } @@ -4560,12 +4577,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 lag_id = mlxsw_sp_port->lag_id; - struct mlxsw_sp_upper *lag; + struct mlxsw_sp_lag *lag; if (!mlxsw_sp_port->lagged) return; - lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); - WARN_ON(lag->ref_count == 0); + lag = &mlxsw_sp->lags[lag_id]; mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); @@ -4579,13 +4595,11 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port); - if (lag->ref_count == 1) - mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + mlxsw_sp_lag_put(mlxsw_sp, lag); mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; - lag->ref_count--; /* Make sure untagged frames are allowed to ingress */ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a0c9775fa955..898d24232935 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -78,11 +78,6 @@ struct mlxsw_sp_span_entry; enum mlxsw_sp_l3proto; union mlxsw_sp_l3addr; -struct mlxsw_sp_upper { - struct net_device *dev; - unsigned int ref_count; -}; - enum mlxsw_sp_rif_type { MLXSW_SP_RIF_TYPE_SUBPORT, MLXSW_SP_RIF_TYPE_VLAN, @@ -136,6 +131,7 @@ struct mlxsw_sp_span_ops; struct mlxsw_sp_qdisc_state; struct mlxsw_sp_mall_entry; struct mlxsw_sp_pgt; +struct mlxsw_sp_lag; struct mlxsw_sp_port_mapping { u8 module; @@ -164,7 +160,8 @@ struct mlxsw_sp { const struct mlxsw_bus_info *bus_info; unsigned char base_mac[ETH_ALEN]; const unsigned char *mac_mask; - struct mlxsw_sp_upper *lags; + struct mlxsw_sp_lag *lags; + u16 max_lag; struct mlxsw_sp_port_mapping *port_mapping; struct mlxsw_sp_port_mapping_events port_mapping_events; struct rhashtable sample_trigger_ht; @@ -257,12 +254,6 @@ struct mlxsw_sp_fid_core_ops { void (*fini)(struct mlxsw_sp *mlxsw_sp); }; -static inline struct mlxsw_sp_upper * -mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) -{ - return &mlxsw_sp->lags[lag_id]; -} - struct mlxsw_sp_port_pcpu_stats { u64 rx_packets; u64 rx_bytes; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 7c59c8a13584..b01b000bc71c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -9,6 +9,7 @@ #include <linux/rhashtable.h> #include <linux/netdevice.h> #include <linux/mutex.h> +#include <linux/refcount.h> #include <net/net_namespace.h> #include <net/tc_act/tc_vlan.h> @@ -55,7 +56,7 @@ struct mlxsw_sp_acl_ruleset { struct rhash_head ht_node; /* Member of acl HT */ struct mlxsw_sp_acl_ruleset_ht_key ht_key; struct rhashtable rule_ht; - unsigned int ref_count; + refcount_t ref_count; unsigned int min_prio; unsigned int max_prio; unsigned long priv[]; @@ -99,7 +100,7 @@ static bool mlxsw_sp_acl_ruleset_is_singular(const struct mlxsw_sp_acl_ruleset *ruleset) { /* We hold a reference on ruleset ourselves */ - return ruleset->ref_count == 2; + return refcount_read(&ruleset->ref_count) == 2; } int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, @@ -176,7 +177,7 @@ mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp, ruleset = kzalloc(alloc_size, GFP_KERNEL); if (!ruleset) return ERR_PTR(-ENOMEM); - ruleset->ref_count = 1; + refcount_set(&ruleset->ref_count, 1); ruleset->ht_key.block = block; ruleset->ht_key.chain_index = chain_index; ruleset->ht_key.ops = ops; @@ -222,13 +223,13 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset) { - ruleset->ref_count++; + refcount_inc(&ruleset->ref_count); } static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset) { - if (--ruleset->ref_count) + if (!refcount_dec_and_test(&ruleset->ref_count)) return; mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 50ea1eff02b2..f20052776b3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -9,6 +9,7 @@ #include <linux/rhashtable.h> #include <linux/netdevice.h> #include <linux/mutex.h> +#include <linux/refcount.h> #include <net/devlink.h> #include <trace/events/mlxsw.h> @@ -155,7 +156,7 @@ struct mlxsw_sp_acl_tcam_vregion { struct mlxsw_sp_acl_tcam_rehash_ctx ctx; } rehash; struct mlxsw_sp *mlxsw_sp; - unsigned int ref_count; + refcount_t ref_count; }; struct mlxsw_sp_acl_tcam_vchunk; @@ -176,7 +177,7 @@ struct mlxsw_sp_acl_tcam_vchunk { unsigned int priority; /* Priority within the vregion and group */ struct mlxsw_sp_acl_tcam_vgroup *vgroup; struct mlxsw_sp_acl_tcam_vregion *vregion; - unsigned int ref_count; + refcount_t ref_count; }; struct mlxsw_sp_acl_tcam_entry { @@ -769,7 +770,7 @@ mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp, vregion->tcam = tcam; vregion->mlxsw_sp = mlxsw_sp; vregion->vgroup = vgroup; - vregion->ref_count = 1; + refcount_set(&vregion->ref_count, 1); vregion->key_info = mlxsw_afk_key_info_get(afk, elusage); if (IS_ERR(vregion->key_info)) { @@ -856,7 +857,7 @@ mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp, */ return ERR_PTR(-EOPNOTSUPP); } - vregion->ref_count++; + refcount_inc(&vregion->ref_count); return vregion; } @@ -871,7 +872,7 @@ static void mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_vregion *vregion) { - if (--vregion->ref_count) + if (!refcount_dec_and_test(&vregion->ref_count)) return; mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion); } @@ -924,7 +925,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp, INIT_LIST_HEAD(&vchunk->ventry_list); vchunk->priority = priority; vchunk->vgroup = vgroup; - vchunk->ref_count = 1; + refcount_set(&vchunk->ref_count, 1); vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup, priority, elusage); @@ -1008,7 +1009,7 @@ mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info, elusage))) return ERR_PTR(-EINVAL); - vchunk->ref_count++; + refcount_inc(&vchunk->ref_count); return vchunk; } return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup, @@ -1019,7 +1020,7 @@ static void mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_vchunk *vchunk) { - if (--vchunk->ref_count) + if (!refcount_dec_and_test(&vchunk->ref_count)) return; mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7164f9e6370f..87617df694ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -501,7 +501,7 @@ struct mlxsw_sp_rt6 { struct mlxsw_sp_lpm_tree { u8 id; /* tree ID */ - unsigned int ref_count; + refcount_t ref_count; enum mlxsw_sp_l3proto proto; unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; struct mlxsw_sp_prefix_usage prefix_usage; @@ -578,7 +578,7 @@ mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { lpm_tree = &mlxsw_sp->router->lpm.trees[i]; - if (lpm_tree->ref_count == 0) + if (refcount_read(&lpm_tree->ref_count) == 0) return lpm_tree; } return NULL; @@ -654,7 +654,7 @@ mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, sizeof(lpm_tree->prefix_usage)); memset(&lpm_tree->prefix_ref_count, 0, sizeof(lpm_tree->prefix_ref_count)); - lpm_tree->ref_count = 1; + refcount_set(&lpm_tree->ref_count, 1); return lpm_tree; err_left_struct_set: @@ -678,7 +678,7 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) { lpm_tree = &mlxsw_sp->router->lpm.trees[i]; - if (lpm_tree->ref_count != 0 && + if (refcount_read(&lpm_tree->ref_count) && lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, prefix_usage)) { @@ -691,14 +691,15 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) { - lpm_tree->ref_count++; + refcount_inc(&lpm_tree->ref_count); } static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lpm_tree *lpm_tree) { - if (--lpm_tree->ref_count == 0) - mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); + if (!refcount_dec_and_test(&lpm_tree->ref_count)) + return; + mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); } #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 6c749c148148..6397ff0dc951 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -61,7 +61,7 @@ struct mlxsw_sp_bridge_port { struct mlxsw_sp_bridge_device *bridge_device; struct list_head list; struct list_head vlans_list; - unsigned int ref_count; + refcount_t ref_count; u8 stp_state; unsigned long flags; bool mrouter; @@ -495,7 +495,7 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, BR_MCAST_FLOOD; INIT_LIST_HEAD(&bridge_port->vlans_list); list_add(&bridge_port->list, &bridge_device->ports_list); - bridge_port->ref_count = 1; + refcount_set(&bridge_port->ref_count, 1); err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev, NULL, NULL, NULL, false, extack); @@ -531,7 +531,7 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev); if (bridge_port) { - bridge_port->ref_count++; + refcount_inc(&bridge_port->ref_count); return bridge_port; } @@ -558,7 +558,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, { struct mlxsw_sp_bridge_device *bridge_device; - if (--bridge_port->ref_count != 0) + if (!refcount_dec_and_test(&bridge_port->ref_count)) return; bridge_device = bridge_port->bridge_device; mlxsw_sp_bridge_port_destroy(bridge_port); diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 2e0fe16a4082..443128adbcb6 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -464,7 +464,7 @@ static struct regmap_config regcfg = { .val_bits = 16, .max_register = 0xee, .reg_stride = 2, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .val_format_endian = REGMAP_ENDIAN_LITTLE, .readable_reg = encx24j600_regmap_readable, .writeable_reg = encx24j600_regmap_writeable, @@ -485,7 +485,7 @@ static struct regmap_config phycfg = { .reg_bits = 8, .val_bits = 16, .max_register = 0x1f, - .cache_type = REGCACHE_RBTREE, + .cache_type = REGCACHE_MAPLE, .val_format_endian = REGMAP_ENDIAN_LITTLE, .readable_reg = encx24j600_phymap_readable, .writeable_reg = encx24j600_phymap_writeable, diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index a2b3f4433ca8..8a6ae171e375 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1055,7 +1055,7 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev, } static int lan743x_ethtool_get_eee(struct net_device *netdev, - struct ethtool_eee *eee) + struct ethtool_keee *eee) { struct lan743x_adapter *adapter = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; @@ -1092,7 +1092,7 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev, } static int lan743x_ethtool_set_eee(struct net_device *netdev, - struct ethtool_eee *eee) + struct ethtool_keee *eee) { struct lan743x_adapter *adapter; struct phy_device *phydev; diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 45e209a7d083..bd8aa83b47e5 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1196,7 +1196,7 @@ static int lan743x_sgmii_config(struct lan743x_adapter *adapter) ret = lan743x_is_sgmii_2_5G_mode(adapter, &status); if (ret < 0) { netif_err(adapter, drv, adapter->netdev, - "erro %d SGMII get mode failed\n", ret); + "error %d SGMII get mode failed\n", ret); return ret; } diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 2f04bc77a118..2801f08bf1c9 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -1712,13 +1712,13 @@ bool lan743x_ptp_request_tx_timestamp(struct lan743x_adapter *adapter) struct lan743x_ptp *ptp = &adapter->ptp; bool result = false; - spin_lock_bh(&ptp->tx_ts_lock); + spin_lock(&ptp->tx_ts_lock); if (ptp->pending_tx_timestamps < LAN743X_PTP_NUMBER_OF_TX_TIMESTAMPS) { /* request granted */ ptp->pending_tx_timestamps++; result = true; } - spin_unlock_bh(&ptp->tx_ts_lock); + spin_unlock(&ptp->tx_ts_lock); return result; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c index 41fa2523d91d..5f2cd9a8cf8f 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c @@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) /* Now, set PGIDs for each active LAG */ for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { - struct net_device *bond = lan966x->ports[lag]->bond; + struct lan966x_port *port = lan966x->ports[lag]; int num_active_ports = 0; + struct net_device *bond; unsigned long bond_mask; u8 aggr_idx[16]; - if (!bond || (visited & BIT(lag))) + if (!port || !port->bond || (visited & BIT(lag))) continue; + bond = port->bond; bond_mask = lan966x_lag_get_mask(lan966x, bond); for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { struct lan966x_port *port = lan966x->ports[p]; + if (!port) + continue; + lan_wr(ANA_PGID_PGID_SET(bond_mask), lan966x, ANA_PGID(p)); if (port->lag_tx_active) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c index 92108d354051..2e83bbb9477e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c @@ -168,9 +168,10 @@ static void lan966x_port_link_up(struct lan966x_port *port) lan966x_taprio_speed_set(port, config->speed); /* Also the GIGA_MODE_ENA(1) needs to be set regardless of the - * port speed for QSGMII ports. + * port speed for QSGMII or SGMII ports. */ - if (phy_interface_num_ports(config->portmode) == 4) + if (phy_interface_num_ports(config->portmode) == 4 || + config->portmode == PHY_INTERFACE_MODE_SGMII) mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA_SET(1); lan_wr(config->duplex | mode, diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c index ac525ff1503e..3a01e13bd10b 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c @@ -25,6 +25,8 @@ static void lan966x_vcap_is1_port_keys(struct lan966x_port *port, for (int l = 0; l < admin->lookups; ++l) { out->prf(out->dst, "\n Lookup %d: ", l); + val = lan_rd(lan966x, ANA_VCAP_S1_CFG(port->chip_port, l)); + out->prf(out->dst, "\n other: "); switch (ANA_VCAP_S1_CFG_KEY_OTHER_CFG_GET(val)) { case VCAP_IS1_PS_OTHER_NORMAL: diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index d1f7fc8b1b71..3c066b62e689 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -757,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sparx5); sparx5->pdev = pdev; sparx5->dev = &pdev->dev; + spin_lock_init(&sparx5->tx_lock); /* Do switch core reset if available */ reset = devm_reset_control_get_optional_shared(&pdev->dev, "switch"); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 6f565c0c0c3d..316fed5f2735 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -280,6 +280,7 @@ struct sparx5 { int xtr_irq; /* Frame DMA */ int fdma_irq; + spinlock_t tx_lock; /* lock for frame transmission */ struct sparx5_rx rx; struct sparx5_tx tx; /* PTP */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 6db6ac6a3bbc..ac7e1cffbcec 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -244,10 +244,12 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) } skb_tx_timestamp(skb); + spin_lock(&sparx5->tx_lock); if (sparx5->fdma_irq > 0) ret = sparx5_fdma_xmit(sparx5, ifh, skb); else ret = sparx5_inject(sparx5, ifh, skb, dev); + spin_unlock(&sparx5->tx_lock); if (ret == -EBUSY) goto busy; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index d33b27214539..1332db9a08eb 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1249,15 +1249,47 @@ void mana_gd_free_res_map(struct gdma_resource *r) r->size = 0; } +static int irq_setup(unsigned int *irqs, unsigned int len, int node) +{ + const struct cpumask *next, *prev = cpu_none_mask; + cpumask_var_t cpus __free(free_cpumask_var); + int cpu, weight; + + if (!alloc_cpumask_var(&cpus, GFP_KERNEL)) + return -ENOMEM; + + rcu_read_lock(); + for_each_numa_hop_mask(next, node) { + weight = cpumask_weight_andnot(next, prev); + while (weight > 0) { + cpumask_andnot(cpus, next, prev); + for_each_cpu(cpu, cpus) { + if (len-- == 0) + goto done; + irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu)); + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + --weight; + } + } + prev = next; + } +done: + rcu_read_unlock(); + return 0; +} + static int mana_gd_setup_irqs(struct pci_dev *pdev) { - unsigned int max_queues_per_port = num_online_cpus(); struct gdma_context *gc = pci_get_drvdata(pdev); + unsigned int max_queues_per_port; struct gdma_irq_context *gic; unsigned int max_irqs, cpu; - int nvec, irq; + int start_irq_index = 1; + int nvec, *irqs, irq; int err, i = 0, j; + cpus_read_lock(); + max_queues_per_port = num_online_cpus(); if (max_queues_per_port > MANA_MAX_NUM_QUEUES) max_queues_per_port = MANA_MAX_NUM_QUEUES; @@ -1265,8 +1297,18 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) max_irqs = max_queues_per_port + 1; nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); - if (nvec < 0) + if (nvec < 0) { + cpus_read_unlock(); return nvec; + } + if (nvec <= num_online_cpus()) + start_irq_index = 0; + + irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL); + if (!irqs) { + err = -ENOMEM; + goto free_irq_vector; + } gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context), GFP_KERNEL); @@ -1294,17 +1336,41 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) goto free_irq; } - err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); - if (err) - goto free_irq; - - cpu = cpumask_local_spread(i, gc->numa_node); - irq_set_affinity_and_hint(irq, cpumask_of(cpu)); + if (!i) { + err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); + if (err) + goto free_irq; + + /* If number of IRQ is one extra than number of online CPUs, + * then we need to assign IRQ0 (hwc irq) and IRQ1 to + * same CPU. + * Else we will use different CPUs for IRQ0 and IRQ1. + * Also we are using cpumask_local_spread instead of + * cpumask_first for the node, because the node can be + * mem only. + */ + if (start_irq_index) { + cpu = cpumask_local_spread(i, gc->numa_node); + irq_set_affinity_and_hint(irq, cpumask_of(cpu)); + } else { + irqs[start_irq_index] = irq; + } + } else { + irqs[i - start_irq_index] = irq; + err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0, + gic->name, gic); + if (err) + goto free_irq; + } } + err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node); + if (err) + goto free_irq; + gc->max_num_msix = nvec; gc->num_msix_usable = nvec; - + cpus_read_unlock(); return 0; free_irq: @@ -1317,8 +1383,10 @@ free_irq: } kfree(gc->irq_contexts); + kfree(irqs); gc->irq_contexts = NULL; free_irq_vector: + cpus_read_unlock(); pci_free_irq_vectors(pdev); return err; } diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2194f2a7ab27..ed2fb44500b0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -3078,5 +3078,5 @@ void ocelot_deinit_port(struct ocelot *ocelot, int port) } EXPORT_SYMBOL(ocelot_deinit_port); -MODULE_DESCRIPTION("Microsemi Ocelot (VSC7514) Switch driver"); +MODULE_DESCRIPTION("Microsemi Ocelot switch family library"); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 2b383d92d7f5..2c3f62907958 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -460,7 +460,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, set_tun->ttl = ip6_dst_hoplimit(dst); dst_release(dst); } else { - set_tun->ttl = net->ipv6.devconf_all->hop_limit; + set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit); } #endif } else { diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2967bab72505..15180538b80a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1424,10 +1424,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_ mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask); return; + /* Both struct tcphdr and struct udphdr start with + * __be16 source; + * __be16 dest; + * so we can use the same code for both. + */ case FLOW_ACT_MANGLE_HDR_TYPE_TCP: case FLOW_ACT_MANGLE_HDR_TYPE_UDP: - mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val); - mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask); + if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) { + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val << 16); + /* The mask of mangle action is inverse mask, + * so clear the dest tp port with 0xFFFF to + * instead of rotate-left operation. + */ + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF); + } + if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) { + mangle_action->mangle.offset = 0; + mangle_action->mangle.val = + (__force u32)cpu_to_be32(mangle_action->mangle.val); + mangle_action->mangle.mask = + (__force u32)cpu_to_be32(mangle_action->mangle.mask); + } return; default: @@ -1864,10 +1884,30 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv, { struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_fl_ct_flow_entry *ct_entry; + struct flow_action_entry *ct_goto; struct nfp_fl_ct_zone_entry *zt; + struct flow_action_entry *act; bool wildcarded = false; struct flow_match_ct ct; - struct flow_action_entry *ct_goto; + int i; + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_REDIRECT_INGRESS: + case FLOW_ACTION_MIRRED: + case FLOW_ACTION_MIRRED_INGRESS: + if (act->dev->rtnl_link_ops && + !strcmp(act->dev->rtnl_link_ops->kind, "openvswitch")) { + NL_SET_ERR_MSG_MOD(extack, + "unsupported offload: out port is openvswitch internal port"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + } flow_rule_match_ct(rule, &ct); if (!ct.mask->ct_zone) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index e522845c7c21..0d7d138d6e0d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -1084,7 +1084,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, u16 nfp_mac_idx = 0; entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); - if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { + if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) { if (entry->bridge_count || !nfp_flower_is_supported_bridge(netdev)) { nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 3b3210d823e8..f28e769e6fda 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2776,6 +2776,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) case NFP_NFD_VER_NFD3: netdev->netdev_ops = &nfp_nfd3_netdev_ops; netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY; + netdev->xdp_features |= NETDEV_XDP_ACT_REDIRECT; break; case NFP_NFD_VER_NFDK: netdev->netdev_ops = &nfp_nfdk_netdev_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 33b4c2856316..3f10c5365c80 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); const u32 barcfg_msix_xpb = NFP_PCIE_BAR_PCIE2CPP_MapType( NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | - NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) | NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( NFP_CPP_TARGET_ISLAND_XPB); const u32 barcfg_explicit[4] = { diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 7a549b834e97..31f896c4aa26 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1761,7 +1761,7 @@ static void nv_get_stats(int cpu, struct fe_priv *np, /* * nv_get_stats64: dev->ndo_get_stats64 function * Get latest stats value from the nic. - * Called with read_lock(&dev_base_lock) held for read - + * Called with rcu_read_lock() held - * only synchronized against unregister_netdevice. */ static void @@ -3090,7 +3090,7 @@ static void set_bufsize(struct net_device *dev) /* * nv_change_mtu: dev->change_mtu function - * Called with dev_base_lock held for read. + * Called with RTNL held for read. */ static int nv_change_mtu(struct net_device *dev, int new_mtu) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index c49aa358e424..6ba8d4aca0a0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -93,6 +93,7 @@ static void ionic_unmap_bars(struct ionic *ionic) bars[i].len = 0; } } + ionic->num_bars = 0; } void __iomem *ionic_bus_map_dbpage(struct ionic *ionic, int page_num) @@ -215,15 +216,17 @@ out: static void ionic_clear_pci(struct ionic *ionic) { - ionic->idev.dev_info_regs = NULL; - ionic->idev.dev_cmd_regs = NULL; - ionic->idev.intr_status = NULL; - ionic->idev.intr_ctrl = NULL; - - ionic_unmap_bars(ionic); - pci_release_regions(ionic->pdev); + if (ionic->num_bars) { + ionic->idev.dev_info_regs = NULL; + ionic->idev.dev_cmd_regs = NULL; + ionic->idev.intr_status = NULL; + ionic->idev.intr_ctrl = NULL; + + ionic_unmap_bars(ionic); + pci_release_regions(ionic->pdev); + } - if (atomic_read(&ionic->pdev->enable_cnt) > 0) + if (pci_is_enabled(ionic->pdev)) pci_disable_device(ionic->pdev); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 1e7c71f7f081..746072b4dbd0 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -319,22 +319,32 @@ do_check_time: u8 ionic_dev_cmd_status(struct ionic_dev *idev) { + if (!idev->dev_cmd_regs) + return (u8)PCI_ERROR_RESPONSE; return ioread8(&idev->dev_cmd_regs->comp.comp.status); } bool ionic_dev_cmd_done(struct ionic_dev *idev) { + if (!idev->dev_cmd_regs) + return false; return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; } void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) { + if (!idev->dev_cmd_regs) + return; memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); } void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) { idev->opcode = cmd->cmd.opcode; + + if (!idev->dev_cmd_regs) + return; + memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); iowrite32(0, &idev->dev_cmd_regs->done); iowrite32(1, &idev->dev_cmd_regs->doorbell); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 2667e1cde16b..bfcfc2d7bcbd 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -8,6 +8,7 @@ #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/skbuff.h> +#include <linux/bpf_trace.h> #include "ionic_if.h" #include "ionic_regs.h" @@ -195,6 +196,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, #define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ __GFP_COMP | __GFP_MEMALLOC) +#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \ + (VLAN_ETH_HLEN + \ + XDP_PACKET_HEADROOM + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))) + struct ionic_buf_info { struct page *page; dma_addr_t dma_addr; @@ -222,6 +228,8 @@ struct ionic_desc_info { struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; ionic_desc_cb cb; void *cb_arg; + struct xdp_frame *xdpf; + enum xdp_action act; }; #define IONIC_QUEUE_NAME_MAX_SZ 16 @@ -256,6 +264,9 @@ struct ionic_queue { struct ionic_txq_sg_desc *txq_sgl; struct ionic_rxq_sg_desc *rxq_sgl; }; + struct xdp_rxq_info *xdp_rxq_info; + struct ionic_queue *partner; + bool xdp_flush; dma_addr_t base_pa; dma_addr_t cmb_base_pa; dma_addr_t sg_base_pa; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index cd3c0b01402e..91183965a6b7 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -90,18 +90,23 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_dev *idev; unsigned int offset; unsigned int size; regs->version = IONIC_DEV_CMD_REG_VERSION; + idev = &lif->ionic->idev; + if (!idev->dev_info_regs) + return; + offset = 0; size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32); memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size); offset += size; size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32); - memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size); + memcpy_fromio(p + offset, idev->dev_cmd_regs->words, size); } static void ionic_get_link_ext_stats(struct net_device *netdev, @@ -721,6 +726,11 @@ static int ionic_set_channels(struct net_device *netdev, ionic_init_queue_params(lif, &qparam); + if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) { + netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n"); + return -EOPNOTSUPP; + } + if (ch->rx_count != ch->tx_count) { netdev_info(netdev, "The rx and tx count must be equal\n"); return -EINVAL; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c index 5f40324cd243..3c209c1a2337 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_fw.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c @@ -109,6 +109,11 @@ int ionic_firmware_update(struct ionic_lif *lif, const struct firmware *fw, dl = priv_to_devlink(ionic); devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); + if (!idev->dev_cmd_regs) { + err = -ENXIO; + goto err_out; + } + buf_sz = sizeof(idev->dev_cmd_regs->data); netdev_dbg(netdev, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index cf2d5ad7b68c..1496893c28be 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -46,6 +46,9 @@ static int ionic_start_queues(struct ionic_lif *lif); static void ionic_stop_queues(struct ionic_lif *lif); static void ionic_lif_queue_identify(struct ionic_lif *lif); +static int ionic_xdp_queues_config(struct ionic_lif *lif); +static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q); + static void ionic_dim_work(struct work_struct *work) { struct dim *dim = container_of(work, struct dim, work); @@ -422,6 +425,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) qcq->sg_base_pa = 0; } + ionic_xdp_unregister_rxq_info(&qcq->q); ionic_qcq_intr_free(lif, qcq); vfree(qcq->cq.info); @@ -862,8 +866,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .type = q->type, .ver = lif->qtype_info[q->type].version, .index = cpu_to_le32(q->index), - .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | - IONIC_QINIT_F_SG), + .flags = cpu_to_le16(IONIC_QINIT_F_IRQ), .intr_index = cpu_to_le16(cq->bound_intr->index), .pid = cpu_to_le16(q->pid), .ring_size = ilog2(q->num_descs), @@ -875,6 +878,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) }; int err; + q->partner = &lif->txqcqs[q->index]->q; + q->partner->partner = q; + + if (!lif->xdp_prog || + (lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags)) + ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); + if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); @@ -1640,6 +1650,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif) netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | + NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG; + return 0; } @@ -1777,6 +1793,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif) return err; } +static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, + struct bpf_prog *xdp_prog) +{ + if (!xdp_prog) + return true; + + if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) + return true; + + if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags) + return true; + + return false; +} + static int ionic_change_mtu(struct net_device *netdev, int new_mtu) { struct ionic_lif *lif = netdev_priv(netdev); @@ -1789,8 +1820,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) .mtu = cpu_to_le32(new_mtu), }, }; + struct bpf_prog *xdp_prog; int err; + xdp_prog = READ_ONCE(lif->xdp_prog); + if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog)) + return -EINVAL; + err = ionic_adminq_post_wait(lif, &ctx); if (err) return err; @@ -2166,6 +2202,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif) int derr = 0; int i, err; + err = ionic_xdp_queues_config(lif); + if (err) + return err; + for (i = 0; i < lif->nxqs; i++) { if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); @@ -2211,6 +2251,8 @@ err_out: derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); } + ionic_xdp_queues_config(lif); + return err; } @@ -2668,11 +2710,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif) ionic_vf_start(ionic); } +static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q) +{ + struct xdp_rxq_info *xi; + + if (!q->xdp_rxq_info) + return; + + xi = q->xdp_rxq_info; + q->xdp_rxq_info = NULL; + + xdp_rxq_info_unreg(xi); + kfree(xi); +} + +static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id) +{ + struct xdp_rxq_info *rxq_info; + int err; + + rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); + if (!rxq_info) + return -ENOMEM; + + err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id); + if (err) { + dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n", + q->index, err); + goto err_out; + } + + err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL); + if (err) { + dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n", + q->index, err); + xdp_rxq_info_unreg(rxq_info); + goto err_out; + } + + q->xdp_rxq_info = rxq_info; + + return 0; + +err_out: + kfree(rxq_info); + return err; +} + +static int ionic_xdp_queues_config(struct ionic_lif *lif) +{ + unsigned int i; + int err; + + if (!lif->rxqcqs) + return 0; + + /* There's no need to rework memory if not going to/from NULL program. + * If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info + * This way we don't need to keep an *xdp_prog in every queue struct. + */ + if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info) + return 0; + + for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) { + struct ionic_queue *q = &lif->rxqcqs[i]->q; + + if (q->xdp_rxq_info) { + ionic_xdp_unregister_rxq_info(q); + continue; + } + + err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id); + if (err) { + dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n", + i, err); + goto err_out; + } + } + + return 0; + +err_out: + for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) + ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q); + + return err; +} + +static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct bpf_prog *old_prog; + u32 maxfs; + + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { +#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts" + NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT); + netdev_info(lif->netdev, XDP_ERR_SPLIT); + return -EOPNOTSUPP; + } + + if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) { +#define XDP_ERR_MTU "MTU is too large for XDP without frags support" + NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU); + netdev_info(lif->netdev, XDP_ERR_MTU); + return -EINVAL; + } + + maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; + if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags)) + maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); + netdev->max_mtu = maxfs; + + if (!netif_running(netdev)) { + old_prog = xchg(&lif->xdp_prog, bpf->prog); + } else { + mutex_lock(&lif->queue_lock); + ionic_stop_queues_reconfig(lif); + old_prog = xchg(&lif->xdp_prog, bpf->prog); + ionic_start_queues_reconfig(lif); + mutex_unlock(&lif->queue_lock); + } + + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf) +{ + switch (bpf->command) { + case XDP_SETUP_PROG: + return ionic_xdp_config(netdev, bpf); + default: + return -EINVAL; + } +} + static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, .ndo_eth_ioctl = ionic_eth_ioctl, .ndo_start_xmit = ionic_start_xmit, + .ndo_bpf = ionic_xdp, + .ndo_xdp_xmit = ionic_xdp_xmit, .ndo_get_stats64 = ionic_get_stats64, .ndo_set_rx_mode = ionic_ndo_set_rx_mode, .ndo_set_features = ionic_set_features, @@ -2755,6 +2937,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) swap(a->q.base, b->q.base); swap(a->q.base_pa, b->q.base_pa); swap(a->q.info, b->q.info); + swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info); + swap(a->q.partner, b->q.partner); swap(a->q_base, b->q_base); swap(a->q_base_pa, b->q_base_pa); swap(a->q_size, b->q_size); @@ -3391,9 +3575,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif) napi_enable(&qcq->napi); - if (qcq->flags & IONIC_QCQ_F_INTR) + if (qcq->flags & IONIC_QCQ_F_INTR) { + irq_set_affinity_hint(qcq->intr.vector, + &qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); + } qcq->flags |= IONIC_QCQ_F_INITED; @@ -3559,7 +3746,10 @@ int ionic_lif_init(struct ionic_lif *lif) goto err_out_notifyq_deinit; } - err = ionic_init_nic_features(lif); + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + err = ionic_set_nic_features(lif, lif->netdev->features); + else + err = ionic_init_nic_features(lif); if (err) goto err_out_notifyq_deinit; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 61548b3eea93..42006de8069d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -37,6 +37,7 @@ struct ionic_tx_stats { u64 dma_map_err; u64 hwstamp_valid; u64 hwstamp_invalid; + u64 xdp_frames; }; struct ionic_rx_stats { @@ -51,6 +52,11 @@ struct ionic_rx_stats { u64 alloc_err; u64 hwstamp_valid; u64 hwstamp_invalid; + u64 xdp_drop; + u64 xdp_aborted; + u64 xdp_pass; + u64 xdp_tx; + u64 xdp_redirect; }; #define IONIC_QCQ_F_INITED BIT(0) @@ -135,6 +141,12 @@ struct ionic_lif_sw_stats { u64 hw_rx_over_errors; u64 hw_rx_missed_errors; u64 hw_tx_aborted_errors; + u64 xdp_drop; + u64 xdp_aborted; + u64 xdp_pass; + u64 xdp_tx; + u64 xdp_redirect; + u64 xdp_frames; }; enum ionic_lif_state_flags { @@ -230,6 +242,7 @@ struct ionic_lif { struct ionic_phc *phc; struct dentry *dentry; + struct bpf_prog *xdp_prog; }; struct ionic_phc { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 165ab08ad2dd..2f479de329fe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -416,6 +416,9 @@ static void ionic_dev_cmd_clean(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; + if (!idev->dev_cmd_regs) + return; + iowrite32(0, &idev->dev_cmd_regs->doorbell); memset_io(&idev->dev_cmd_regs->cmd, 0, sizeof(idev->dev_cmd_regs->cmd)); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c index 1f6022fb7679..0107599a9dd4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c @@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = { IONIC_LIF_STAT_DESC(hw_rx_over_errors), IONIC_LIF_STAT_DESC(hw_rx_missed_errors), IONIC_LIF_STAT_DESC(hw_tx_aborted_errors), + IONIC_LIF_STAT_DESC(xdp_drop), + IONIC_LIF_STAT_DESC(xdp_aborted), + IONIC_LIF_STAT_DESC(xdp_pass), + IONIC_LIF_STAT_DESC(xdp_tx), + IONIC_LIF_STAT_DESC(xdp_redirect), + IONIC_LIF_STAT_DESC(xdp_frames), }; static const struct ionic_stat_desc ionic_port_stats_desc[] = { @@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = { IONIC_TX_STAT_DESC(csum_none), IONIC_TX_STAT_DESC(csum), IONIC_TX_STAT_DESC(vlan_inserted), + IONIC_TX_STAT_DESC(xdp_frames), }; static const struct ionic_stat_desc ionic_rx_stats_desc[] = { @@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { IONIC_RX_STAT_DESC(hwstamp_invalid), IONIC_RX_STAT_DESC(dropped), IONIC_RX_STAT_DESC(vlan_stripped), + IONIC_RX_STAT_DESC(xdp_drop), + IONIC_RX_STAT_DESC(xdp_aborted), + IONIC_RX_STAT_DESC(xdp_pass), + IONIC_RX_STAT_DESC(xdp_tx), + IONIC_RX_STAT_DESC(xdp_redirect), }; #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) @@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num, stats->tx_csum += txstats->csum; stats->tx_hwstamp_valid += txstats->hwstamp_valid; stats->tx_hwstamp_invalid += txstats->hwstamp_invalid; + stats->xdp_frames += txstats->xdp_frames; } static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, @@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, stats->rx_csum_error += rxstats->csum_error; stats->rx_hwstamp_valid += rxstats->hwstamp_valid; stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid; + stats->xdp_drop += rxstats->xdp_drop; + stats->xdp_aborted += rxstats->xdp_aborted; + stats->xdp_pass += rxstats->xdp_pass; + stats->xdp_tx += rxstats->xdp_tx; + stats->xdp_redirect += rxstats->xdp_redirect; } static void ionic_get_lif_stats(struct ionic_lif *lif, diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 54cd96b035d6..56a7ad5bff17 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -10,6 +10,23 @@ #include "ionic_lif.h" #include "ionic_txrx.h" +static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs); + +static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, + void *data, size_t len); + +static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, + const skb_frag_t *frag, + size_t offset, size_t len); + +static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q, + struct ionic_desc_info *desc_info); + +static void ionic_tx_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info, + struct ionic_cq_info *cq_info, + void *cb_arg); + static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { @@ -88,6 +105,21 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) return netdev_get_tx_queue(q->lif->netdev, q->index); } +static void *ionic_rx_buf_va(struct ionic_buf_info *buf_info) +{ + return page_address(buf_info->page) + buf_info->page_offset; +} + +static dma_addr_t ionic_rx_buf_pa(struct ionic_buf_info *buf_info) +{ + return buf_info->dma_addr + buf_info->page_offset; +} + +static unsigned int ionic_rx_buf_size(struct ionic_buf_info *buf_info) +{ + return min_t(u32, IONIC_MAX_BUF_LEN, IONIC_PAGE_SIZE - buf_info->page_offset); +} + static int ionic_rx_page_alloc(struct ionic_queue *q, struct ionic_buf_info *buf_info) { @@ -162,7 +194,7 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, if (page_to_nid(buf_info->page) != numa_mem_id()) return false; - size = ALIGN(used, IONIC_PAGE_SPLIT_SZ); + size = ALIGN(used, q->xdp_rxq_info ? IONIC_PAGE_SIZE : IONIC_PAGE_SPLIT_SZ); buf_info->page_offset += size; if (buf_info->page_offset >= IONIC_PAGE_SIZE) return false; @@ -174,7 +206,10 @@ static bool ionic_rx_buf_recycle(struct ionic_queue *q, static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_rxq_comp *comp) + unsigned int headroom, + unsigned int len, + unsigned int num_sg_elems, + bool synced) { struct net_device *netdev = q->lif->netdev; struct ionic_buf_info *buf_info; @@ -183,12 +218,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, struct sk_buff *skb; unsigned int i; u16 frag_len; - u16 len; stats = q_to_rx_stats(q); buf_info = &desc_info->bufs[0]; - len = le16_to_cpu(comp->len); prefetchw(buf_info->page); @@ -200,24 +233,26 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, return NULL; } - i = comp->num_sg_elems + 1; + i = num_sg_elems + 1; do { if (unlikely(!buf_info->page)) { dev_kfree_skb(skb); return NULL; } - frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, - IONIC_PAGE_SIZE - buf_info->page_offset)); + if (headroom) + frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); + else + frag_len = min_t(u16, len, ionic_rx_buf_size(buf_info)); len -= frag_len; - dma_sync_single_for_cpu(dev, - buf_info->dma_addr + buf_info->page_offset, - frag_len, DMA_FROM_DEVICE); + if (!synced) + dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), + headroom, frag_len, DMA_FROM_DEVICE); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - buf_info->page, buf_info->page_offset, frag_len, - IONIC_PAGE_SIZE); + buf_info->page, buf_info->page_offset + headroom, + frag_len, IONIC_PAGE_SIZE); if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { dma_unmap_page(dev, buf_info->dma_addr, @@ -225,6 +260,10 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, buf_info->page = NULL; } + /* only needed on the first buffer */ + if (headroom) + headroom = 0; + buf_info++; i--; @@ -235,19 +274,19 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, - struct ionic_rxq_comp *comp) + unsigned int headroom, + unsigned int len, + bool synced) { struct net_device *netdev = q->lif->netdev; struct ionic_buf_info *buf_info; struct ionic_rx_stats *stats; struct device *dev = q->dev; struct sk_buff *skb; - u16 len; stats = q_to_rx_stats(q); buf_info = &desc_info->bufs[0]; - len = le16_to_cpu(comp->len); skb = napi_alloc_skb(&q_to_qcq(q)->napi, len); if (unlikely(!skb)) { @@ -262,11 +301,12 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, return NULL; } - dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset, - len, DMA_FROM_DEVICE); - skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len); - dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset, - len, DMA_FROM_DEVICE); + if (!synced) + dma_sync_single_range_for_cpu(dev, ionic_rx_buf_pa(buf_info), + headroom, len, DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, ionic_rx_buf_va(buf_info) + headroom, len); + dma_sync_single_range_for_device(dev, ionic_rx_buf_pa(buf_info), + headroom, len, DMA_FROM_DEVICE); skb_put(skb, len); skb->protocol = eth_type_trans(skb, q->lif->netdev); @@ -274,6 +314,315 @@ static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q, return skb; } +static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, + struct ionic_desc_info *desc_info) +{ + unsigned int nbufs = desc_info->nbufs; + struct ionic_buf_info *buf_info; + struct device *dev = q->dev; + int i; + + if (!nbufs) + return; + + buf_info = desc_info->bufs; + dma_unmap_single(dev, buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + if (desc_info->act == XDP_TX) + __free_pages(buf_info->page, 0); + buf_info->page = NULL; + + buf_info++; + for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) { + dma_unmap_page(dev, buf_info->dma_addr, + buf_info->len, DMA_TO_DEVICE); + if (desc_info->act == XDP_TX) + __free_pages(buf_info->page, 0); + buf_info->page = NULL; + } + + if (desc_info->act == XDP_REDIRECT) + xdp_return_frame(desc_info->xdpf); + + desc_info->nbufs = 0; + desc_info->xdpf = NULL; + desc_info->act = 0; +} + +static int ionic_xdp_post_frame(struct net_device *netdev, + struct ionic_queue *q, struct xdp_frame *frame, + enum xdp_action act, struct page *page, int off, + bool ring_doorbell) +{ + struct ionic_desc_info *desc_info; + struct ionic_buf_info *buf_info; + struct ionic_tx_stats *stats; + struct ionic_txq_desc *desc; + size_t len = frame->len; + dma_addr_t dma_addr; + u64 cmd; + + desc_info = &q->info[q->head_idx]; + desc = desc_info->txq_desc; + buf_info = desc_info->bufs; + stats = q_to_tx_stats(q); + + dma_addr = ionic_tx_map_single(q, frame->data, len); + if (dma_mapping_error(q->dev, dma_addr)) { + stats->dma_map_err++; + return -EIO; + } + buf_info->dma_addr = dma_addr; + buf_info->len = len; + buf_info->page = page; + buf_info->page_offset = off; + + desc_info->nbufs = 1; + desc_info->xdpf = frame; + desc_info->act = act; + + if (xdp_frame_has_frags(frame)) { + struct ionic_txq_sg_elem *elem; + struct skb_shared_info *sinfo; + struct ionic_buf_info *bi; + skb_frag_t *frag; + int i; + + bi = &buf_info[1]; + sinfo = xdp_get_shared_info_from_frame(frame); + frag = sinfo->frags; + elem = desc_info->txq_sg_desc->elems; + for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) { + dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag)); + if (dma_mapping_error(q->dev, dma_addr)) { + stats->dma_map_err++; + ionic_tx_desc_unmap_bufs(q, desc_info); + return -EIO; + } + bi->dma_addr = dma_addr; + bi->len = skb_frag_size(frag); + bi->page = skb_frag_page(frag); + + elem->addr = cpu_to_le64(bi->dma_addr); + elem->len = cpu_to_le16(bi->len); + elem++; + + desc_info->nbufs++; + } + } + + cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, + 0, (desc_info->nbufs - 1), buf_info->dma_addr); + desc->cmd = cpu_to_le64(cmd); + desc->len = cpu_to_le16(len); + desc->csum_start = 0; + desc->csum_offset = 0; + + stats->xdp_frames++; + stats->pkts++; + stats->bytes += len; + + ionic_txq_post(q, ring_doorbell, ionic_tx_clean, NULL); + + return 0; +} + +int ionic_xdp_xmit(struct net_device *netdev, int n, + struct xdp_frame **xdp_frames, u32 flags) +{ + struct ionic_lif *lif = netdev_priv(netdev); + struct ionic_queue *txq; + struct netdev_queue *nq; + int nxmit; + int space; + int cpu; + int qi; + + if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* AdminQ is assumed on cpu 0, while we attempt to affinitize the + * TxRx queue pairs 0..n-1 on cpus 1..n. We try to keep with that + * affinitization here, but of course irqbalance and friends might + * have juggled things anyway, so we have to check for the 0 case. + */ + cpu = smp_processor_id(); + qi = cpu ? (cpu - 1) % lif->nxqs : cpu; + + txq = &lif->txqcqs[qi]->q; + nq = netdev_get_tx_queue(netdev, txq->index); + __netif_tx_lock(nq, cpu); + txq_trans_cond_update(nq); + + if (netif_tx_queue_stopped(nq) || + unlikely(ionic_maybe_stop_tx(txq, 1))) { + __netif_tx_unlock(nq); + return -EIO; + } + + space = min_t(int, n, ionic_q_space_avail(txq)); + for (nxmit = 0; nxmit < space ; nxmit++) { + if (ionic_xdp_post_frame(netdev, txq, xdp_frames[nxmit], + XDP_REDIRECT, + virt_to_page(xdp_frames[nxmit]->data), + 0, false)) { + nxmit--; + break; + } + } + + if (flags & XDP_XMIT_FLUSH) + ionic_dbell_ring(lif->kern_dbpage, txq->hw_type, + txq->dbval | txq->head_idx); + + ionic_maybe_stop_tx(txq, 4); + __netif_tx_unlock(nq); + + return nxmit; +} + +static bool ionic_run_xdp(struct ionic_rx_stats *stats, + struct net_device *netdev, + struct bpf_prog *xdp_prog, + struct ionic_queue *rxq, + struct ionic_buf_info *buf_info, + int len) +{ + u32 xdp_action = XDP_ABORTED; + struct xdp_buff xdp_buf; + struct ionic_queue *txq; + struct netdev_queue *nq; + struct xdp_frame *xdpf; + int remain_len; + int frag_len; + int err = 0; + + xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info); + frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN); + xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), + XDP_PACKET_HEADROOM, frag_len, false); + + dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), + XDP_PACKET_HEADROOM, len, + DMA_FROM_DEVICE); + + prefetchw(&xdp_buf.data_hard_start); + + /* We limit MTU size to one buffer if !xdp_has_frags, so + * if the recv len is bigger than one buffer + * then we know we have frag info to gather + */ + remain_len = len - frag_len; + if (remain_len) { + struct skb_shared_info *sinfo; + struct ionic_buf_info *bi; + skb_frag_t *frag; + + bi = buf_info; + sinfo = xdp_get_shared_info_from_buff(&xdp_buf); + sinfo->nr_frags = 0; + sinfo->xdp_frags_size = 0; + xdp_buff_set_frags_flag(&xdp_buf); + + do { + if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { + err = -ENOSPC; + goto out_xdp_abort; + } + + frag = &sinfo->frags[sinfo->nr_frags]; + sinfo->nr_frags++; + bi++; + frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi)); + dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi), + 0, frag_len, DMA_FROM_DEVICE); + skb_frag_fill_page_desc(frag, bi->page, 0, frag_len); + sinfo->xdp_frags_size += frag_len; + remain_len -= frag_len; + + if (page_is_pfmemalloc(bi->page)) + xdp_buff_set_frag_pfmemalloc(&xdp_buf); + } while (remain_len > 0); + } + + xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf); + + switch (xdp_action) { + case XDP_PASS: + stats->xdp_pass++; + return false; /* false = we didn't consume the packet */ + + case XDP_DROP: + ionic_rx_page_free(rxq, buf_info); + stats->xdp_drop++; + break; + + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(&xdp_buf); + if (!xdpf) + goto out_xdp_abort; + + txq = rxq->partner; + nq = netdev_get_tx_queue(netdev, txq->index); + __netif_tx_lock(nq, smp_processor_id()); + txq_trans_cond_update(nq); + + if (netif_tx_queue_stopped(nq) || + unlikely(ionic_maybe_stop_tx(txq, 1))) { + __netif_tx_unlock(nq); + goto out_xdp_abort; + } + + dma_unmap_page(rxq->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + + err = ionic_xdp_post_frame(netdev, txq, xdpf, XDP_TX, + buf_info->page, + buf_info->page_offset, + true); + __netif_tx_unlock(nq); + if (err) { + netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); + goto out_xdp_abort; + } + stats->xdp_tx++; + + /* the Tx completion will free the buffers */ + break; + + case XDP_REDIRECT: + /* unmap the pages before handing them to a different device */ + dma_unmap_page(rxq->dev, buf_info->dma_addr, + IONIC_PAGE_SIZE, DMA_FROM_DEVICE); + + err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); + if (err) { + netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); + goto out_xdp_abort; + } + buf_info->page = NULL; + rxq->xdp_flush = true; + stats->xdp_redirect++; + break; + + case XDP_ABORTED: + default: + goto out_xdp_abort; + } + + return true; + +out_xdp_abort: + trace_xdp_exception(netdev, xdp_prog, xdp_action); + ionic_rx_page_free(rxq, buf_info); + stats->xdp_aborted++; + + return true; +} + static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, struct ionic_cq_info *cq_info, @@ -283,7 +632,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_qcq *qcq = q_to_qcq(q); struct ionic_rx_stats *stats; struct ionic_rxq_comp *comp; + struct bpf_prog *xdp_prog; + unsigned int headroom; struct sk_buff *skb; + u16 len; comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp); @@ -294,13 +646,21 @@ static void ionic_rx_clean(struct ionic_queue *q, return; } + len = le16_to_cpu(comp->len); stats->pkts++; - stats->bytes += le16_to_cpu(comp->len); + stats->bytes += len; + + xdp_prog = READ_ONCE(q->lif->xdp_prog); + if (xdp_prog && + ionic_run_xdp(stats, netdev, xdp_prog, q, desc_info->bufs, len)) + return; - if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) - skb = ionic_rx_copybreak(q, desc_info, comp); + headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; + if (len <= q->lif->rx_copybreak) + skb = ionic_rx_copybreak(q, desc_info, headroom, len, !!xdp_prog); else - skb = ionic_rx_frags(q, desc_info, comp); + skb = ionic_rx_frags(q, desc_info, headroom, len, + comp->num_sg_elems, !!xdp_prog); if (unlikely(!skb)) { stats->dropped++; @@ -367,7 +727,7 @@ static void ionic_rx_clean(struct ionic_queue *q, } } - if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak) + if (len <= q->lif->rx_copybreak) napi_gro_receive(&qcq->napi, skb); else napi_gro_frags(&qcq->napi); @@ -424,8 +784,9 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int frag_len; unsigned int nfrags; unsigned int n_fill; - unsigned int i, j; unsigned int len; + unsigned int i; + unsigned int j; n_fill = ionic_q_space_avail(q); @@ -434,9 +795,12 @@ void ionic_rx_fill(struct ionic_queue *q) if (n_fill < fill_threshold) return; - len = netdev->mtu + ETH_HLEN + VLAN_HLEN; + len = netdev->mtu + VLAN_ETH_HLEN; for (i = n_fill; i; i--) { + unsigned int headroom; + unsigned int buf_len; + nfrags = 0; remain_len = len; desc_info = &q->info[q->head_idx]; @@ -451,10 +815,18 @@ void ionic_rx_fill(struct ionic_queue *q) } } - /* fill main descriptor - buf[0] */ - desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN, - IONIC_PAGE_SIZE - buf_info->page_offset)); + /* fill main descriptor - buf[0] + * XDP uses space in the first buffer, so account for + * head room, tail room, and ip header in the first frag size. + */ + headroom = q->xdp_rxq_info ? XDP_PACKET_HEADROOM : 0; + if (q->xdp_rxq_info) + buf_len = IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN; + else + buf_len = ionic_rx_buf_size(buf_info); + frag_len = min_t(u16, len, buf_len); + + desc->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info) + headroom); desc->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -472,10 +844,8 @@ void ionic_rx_fill(struct ionic_queue *q) } } - sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset); - frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN, - IONIC_PAGE_SIZE - - buf_info->page_offset)); + sg_elem->addr = cpu_to_le64(ionic_rx_buf_pa(buf_info)); + frag_len = min_t(u16, remain_len, ionic_rx_buf_size(buf_info)); sg_elem->len = cpu_to_le16(frag_len); remain_len -= frag_len; buf_info++; @@ -579,6 +949,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done = ionic_cq_service(cq, budget, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -598,6 +971,14 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) return work_done; } +static void ionic_xdp_do_flush(struct ionic_cq *cq) +{ + if (cq->bound_q->xdp_flush) { + xdp_do_flush(); + cq->bound_q->xdp_flush = false; + } +} + int ionic_rx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); @@ -607,6 +988,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) u32 work_done = 0; u32 flags = 0; + if (unlikely(!budget)) + return budget; + lif = cq->bound_q->lif; idev = &lif->ionic->idev; @@ -615,6 +999,7 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) ionic_rx_fill(cq->bound_q); + ionic_xdp_do_flush(cq); if (work_done < budget && napi_complete_done(napi, work_done)) { ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR); flags |= IONIC_INTR_CRED_UNMASK; @@ -656,11 +1041,15 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, ionic_tx_service, NULL, NULL); + if (unlikely(!budget)) + return budget; + rx_work_done = ionic_cq_service(rxcq, budget, ionic_rx_service, NULL, NULL); ionic_rx_fill(rxcq->bound_q); + ionic_xdp_do_flush(rxcq); if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) { ionic_dim_update(rxqcq, 0); flags |= IONIC_INTR_CRED_UNMASK; @@ -796,6 +1185,16 @@ static void ionic_tx_clean(struct ionic_queue *q, struct sk_buff *skb = cb_arg; u16 qi; + if (desc_info->xdpf) { + ionic_xdp_tx_desc_clean(q->partner, desc_info); + stats->clean++; + + if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q->index))) + netif_wake_subqueue(q->lif->netdev, q->index); + + return; + } + ionic_tx_desc_unmap_bufs(q, desc_info); if (!skb) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index d7cbaad8a6fb..82fc38e0f573 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -17,4 +17,5 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); +int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags); #endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 35ec9aab3dc7..51fa880eaf6c 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c @@ -1186,7 +1186,6 @@ static int netxen_p3_has_mn(struct netxen_adapter *adapter) { u32 capability, flashed_ver; - capability = 0; /* NX2031 always had MN */ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) @@ -1197,7 +1196,6 @@ netxen_p3_has_mn(struct netxen_adapter *adapter) flashed_ver = NETXEN_DECODE_VERSION(flashed_ver); if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) { - capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY); if (capability & NX_PEG_TUNE_MN_PRESENT) return 1; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 5a5dbbb8d8aa..9a1660a12c57 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1794,8 +1794,6 @@ qed_rdma_create_srq(void *rdma_cxt, goto err; opaque_fid = p_hwfn->hw_info.opaque_fid; - - opaque_fid = p_hwfn->hw_info.opaque_fid; init_data.opaque_fid = opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 0e240b5ab8d4..ae3ebf0cf999 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1776,7 +1776,7 @@ static int qede_get_tunable(struct net_device *dev, return 0; } -static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) +static int qede_get_eee(struct net_device *dev, struct ethtool_keee *edata) { struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; @@ -1789,18 +1789,26 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - if (current_link.eee.adv_caps & QED_EEE_1G_ADV) - edata->advertised = ADVERTISED_1000baseT_Full; - if (current_link.eee.adv_caps & QED_EEE_10G_ADV) - edata->advertised |= ADVERTISED_10000baseT_Full; - if (current_link.sup_caps & QED_EEE_1G_ADV) - edata->supported = ADVERTISED_1000baseT_Full; - if (current_link.sup_caps & QED_EEE_10G_ADV) - edata->supported |= ADVERTISED_10000baseT_Full; - if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) - edata->lp_advertised = ADVERTISED_1000baseT_Full; - if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) - edata->lp_advertised |= ADVERTISED_10000baseT_Full; + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->advertised, + current_link.eee.adv_caps & QED_EEE_1G_ADV); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + edata->advertised, + current_link.eee.adv_caps & QED_EEE_10G_ADV); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->supported, + current_link.sup_caps & QED_EEE_1G_ADV); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + edata->supported, + current_link.sup_caps & QED_EEE_10G_ADV); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->lp_advertised, + current_link.eee.lp_adv_caps & QED_EEE_1G_ADV); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + edata->lp_advertised, + current_link.eee.lp_adv_caps & QED_EEE_10G_ADV); edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; edata->eee_enabled = current_link.eee.enable; @@ -1810,11 +1818,14 @@ static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } -static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) +static int qede_set_eee(struct net_device *dev, struct ethtool_keee *edata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {}; + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = {}; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; + bool unsupp; if (!edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); @@ -1832,21 +1843,26 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; - if (!(edata->advertised & (ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full)) || - ((edata->advertised & (ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full)) != - edata->advertised)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + supported); + + unsupp = linkmode_andnot(tmp, edata->advertised, supported); + if (unsupp) { DP_VERBOSE(edev, QED_MSG_DEBUG, - "Invalid advertised capabilities %d\n", - edata->advertised); + "Invalid advertised capabilities %*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, edata->advertised); return -EINVAL; } - if (edata->advertised & ADVERTISED_1000baseT_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + edata->advertised)) params.eee.adv_caps = QED_EEE_1G_ADV; - if (edata->advertised & ADVERTISED_10000baseT_Full) - params.eee.adv_caps |= QED_EEE_10G_ADV; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + edata->advertised)) + params.eee.adv_caps = QED_EEE_10G_ADV; + params.eee.enable = edata->eee_enabled; params.eee.tx_lpi_enable = edata->tx_lpi_enabled; params.eee.tx_lpi_timer = edata->tx_lpi_timer; diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index 4292c89bd35c..6263e4cf47fa 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c @@ -1,22 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * */ /* This module implements the Qualcomm Atheros SPI protocol for diff --git a/drivers/net/ethernet/qualcomm/qca_7k.h b/drivers/net/ethernet/qualcomm/qca_7k.h index 356de8ec5d48..828ee9c27578 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.h +++ b/drivers/net/ethernet/qualcomm/qca_7k.h @@ -1,21 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - * */ /* Qualcomm Atheros SPI register definition. diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.c b/drivers/net/ethernet/qualcomm/qca_7k_common.c index 6b511f05df61..5302da587620 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k_common.c +++ b/drivers/net/ethernet/qualcomm/qca_7k_common.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* * Copyright (c) 2011, 2012, Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Atheros ethernet framing. Every Ethernet frame is surrounded @@ -162,5 +149,5 @@ EXPORT_SYMBOL_GPL(qcafrm_fsm_decode); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 common"); MODULE_AUTHOR("Qualcomm Atheros Communications"); -MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); +MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/qualcomm/qca_7k_common.h b/drivers/net/ethernet/qualcomm/qca_7k_common.h index 928554f11e35..44ed66fdb407 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k_common.h +++ b/drivers/net/ethernet/qualcomm/qca_7k_common.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* * Copyright (c) 2011, 2012, Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Atheros Ethernet framing. Every Ethernet frame is surrounded by an atheros @@ -107,9 +94,6 @@ struct qcafrm_handle { /* Offset in buffer (borrowed for length too) */ u16 offset; - - /* Frame length as kept by this module */ - u16 len; }; u16 qcafrm_create_header(u8 *buf, u16 len); @@ -128,17 +112,6 @@ static inline void qcafrm_fsm_init_uart(struct qcafrm_handle *handle) handle->state = handle->init; } -/* Gather received bytes and try to extract a full Ethernet frame - * by following a simple state machine. - * - * Return: QCAFRM_GATHER No Ethernet frame fully received yet. - * QCAFRM_NOHEAD Header expected but not found. - * QCAFRM_INVLEN QCA7K frame length is invalid - * QCAFRM_NOTAIL Footer expected but not found. - * > 0 Number of byte in the fully received - * Ethernet frame - */ - s32 qcafrm_fsm_decode(struct qcafrm_handle *handle, u8 *buf, u16 buf_len, u8 recv_byte); #endif /* _QCA_FRAMING_H */ diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index 1822f2ad8f0d..ff3b89e9028e 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file contains debugging routines for use in the QCA7K driver. @@ -255,7 +242,7 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct qcaspi *qca = netdev_priv(dev); ring->rx_max_pending = QCASPI_RX_MAX_FRAMES; - ring->tx_max_pending = TX_RING_MAX_LEN; + ring->tx_max_pending = QCASPI_TX_RING_MAX_LEN; ring->rx_pending = QCASPI_RX_MAX_FRAMES; ring->tx_pending = qca->txr.count; } @@ -275,8 +262,8 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, if (qca->spi_thread) kthread_park(qca->spi_thread); - qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); - qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); + qca->txr.count = max_t(u32, ring->tx_pending, QCASPI_TX_RING_MIN_LEN); + qca->txr.count = min_t(u16, qca->txr.count, QCASPI_TX_RING_MAX_LEN); if (qca->spi_thread) kthread_unpark(qca->spi_thread); diff --git a/drivers/net/ethernet/qualcomm/qca_debug.h b/drivers/net/ethernet/qualcomm/qca_debug.h index 46a785844421..0d98cef3abc4 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.h +++ b/drivers/net/ethernet/qualcomm/qca_debug.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file contains debugging routines for use in the QCA7K driver. diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 5f3c11fb3fa2..5799ecc88a87 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This module implements the Qualcomm Atheros SPI protocol for @@ -359,7 +346,7 @@ qcaspi_receive(struct qcaspi *qca) /* Read the packet size. */ qcaspi_read_register(qca, SPI_REG_RDBUF_BYTE_AVA, &available); - netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", + netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %04x\n", available); if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) { @@ -476,7 +463,7 @@ qcaspi_flush_tx_ring(struct qcaspi *qca) * has been replaced by netif_tx_lock_bh() and so on. */ netif_tx_lock_bh(qca->net_dev); - for (i = 0; i < TX_RING_MAX_LEN; i++) { + for (i = 0; i < QCASPI_TX_RING_MAX_LEN; i++) { if (qca->txr.skb[i]) { dev_kfree_skb(qca->txr.skb[i]); qca->txr.skb[i] = NULL; @@ -687,7 +674,7 @@ static int qcaspi_netdev_open(struct net_device *dev) { struct qcaspi *qca = netdev_priv(dev); - int ret = 0; + struct task_struct *thread; if (!qca) return -EINVAL; @@ -697,23 +684,18 @@ qcaspi_netdev_open(struct net_device *dev) qca->sync = QCASPI_SYNC_UNKNOWN; qcafrm_fsm_init_spi(&qca->frm_handle); - qca->spi_thread = kthread_run((void *)qcaspi_spi_thread, - qca, "%s", dev->name); + thread = kthread_run((void *)qcaspi_spi_thread, + qca, "%s", dev->name); - if (IS_ERR(qca->spi_thread)) { + if (IS_ERR(thread)) { netdev_err(dev, "%s: unable to start kernel thread.\n", QCASPI_DRV_NAME); - return PTR_ERR(qca->spi_thread); + return PTR_ERR(thread); } - ret = request_irq(qca->spi_dev->irq, qcaspi_intr_handler, 0, - dev->name, qca); - if (ret) { - netdev_err(dev, "%s: unable to get IRQ %d (irqval=%d).\n", - QCASPI_DRV_NAME, qca->spi_dev->irq, ret); - kthread_stop(qca->spi_thread); - return ret; - } + qca->spi_thread = thread; + + enable_irq(qca->spi_dev->irq); /* SPI thread takes care of TX queue */ @@ -728,10 +710,12 @@ qcaspi_netdev_close(struct net_device *dev) netif_stop_queue(dev); qcaspi_write_register(qca, SPI_REG_INTR_ENABLE, 0, wr_verify); - free_irq(qca->spi_dev->irq, qca); + disable_irq(qca->spi_dev->irq); - kthread_stop(qca->spi_thread); - qca->spi_thread = NULL; + if (qca->spi_thread) { + kthread_stop(qca->spi_thread); + qca->spi_thread = NULL; + } qcaspi_flush_tx_ring(qca); return 0; @@ -831,8 +815,8 @@ qcaspi_netdev_init(struct net_device *dev) qca->clkspeed = qcaspi_clkspeed; qca->burst_len = qcaspi_burst_len; qca->spi_thread = NULL; - qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + - QCAFRM_FOOTER_LEN + 4) * 4; + qca->buffer_size = (QCAFRM_MAX_MTU + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + + QCAFRM_FOOTER_LEN + QCASPI_HW_PKT_LEN) * QCASPI_RX_MAX_FRAMES; memset(&qca->stats, 0, sizeof(struct qcaspi_stats)); @@ -881,6 +865,8 @@ qcaspi_netdev_setup(struct net_device *dev) qcaspi_set_ethtool_ops(dev); dev->watchdog_timeo = QCASPI_TX_TIMEOUT; dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->needed_tailroom = ALIGN(QCAFRM_FOOTER_LEN + QCAFRM_MIN_LEN, 4); + dev->needed_headroom = ALIGN(QCAFRM_HEADER_LEN, 4); dev->tx_queue_len = 100; /* MTU range: 46 - 1500 */ @@ -891,7 +877,7 @@ qcaspi_netdev_setup(struct net_device *dev) memset(qca, 0, sizeof(struct qcaspi)); memset(&qca->txr, 0, sizeof(qca->txr)); - qca->txr.count = TX_RING_MAX_LEN; + qca->txr.count = QCASPI_TX_RING_MAX_LEN; } static const struct of_device_id qca_spi_of_match[] = { @@ -984,6 +970,15 @@ qca_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, qcaspi_devs); + ret = devm_request_irq(&spi->dev, spi->irq, qcaspi_intr_handler, + IRQF_NO_AUTOEN, qca->net_dev->name, qca); + if (ret) { + dev_err(&spi->dev, "Unable to get IRQ %d (irqval=%d).\n", + spi->irq, ret); + free_netdev(qcaspi_devs); + return ret; + } + ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev); if (ret) { eth_hw_addr_random(qca->net_dev); @@ -998,8 +993,8 @@ qca_spi_probe(struct spi_device *spi) qcaspi_read_register(qca, SPI_REG_SIGNATURE, &signature); if (signature != QCASPI_GOOD_SIGNATURE) { - dev_err(&spi->dev, "Invalid signature (0x%04X)\n", - signature); + dev_err(&spi->dev, "Invalid signature (expected 0x%04x, read 0x%04x)\n", + QCASPI_GOOD_SIGNATURE, signature); free_netdev(qcaspi_devs); return -EFAULT; } @@ -1048,6 +1043,6 @@ module_spi_driver(qca_spi_driver); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 SPI Driver"); MODULE_AUTHOR("Qualcomm Atheros Communications"); -MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); +MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(QCASPI_DRV_VERSION); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 3067356106f0..d59cb2352cee 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -1,20 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2014, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Qualcomm Atheros SPI register definition. @@ -39,8 +26,9 @@ #define QCASPI_GOOD_SIGNATURE 0xAA55 -#define TX_RING_MAX_LEN 10 -#define TX_RING_MIN_LEN 2 +#define QCASPI_TX_RING_MAX_LEN 10 +#define QCASPI_TX_RING_MIN_LEN 2 +#define QCASPI_RX_MAX_FRAMES 4 /* sync related constants */ #define QCASPI_SYNC_UNKNOWN 0 @@ -54,7 +42,7 @@ #define QCASPI_EVENT_CPUON 1 struct tx_ring { - struct sk_buff *skb[TX_RING_MAX_LEN]; + struct sk_buff *skb[QCASPI_TX_RING_MAX_LEN]; u16 head; u16 tail; u16 size; diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index 223321897b96..321fd8d00730 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -1,20 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc. * Copyright (c) 2017, I2SE GmbH - * - * Permission to use, copy, modify, and/or distribute this software - * for any purpose with or without fee is hereby granted, provided - * that the above copyright notice and this permission notice appear - * in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL - * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL - * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, - * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This module implements the Qualcomm Atheros UART protocol for @@ -410,6 +397,6 @@ module_serdev_device_driver(qca_uart_driver); MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver"); MODULE_AUTHOR("Qualcomm Atheros Communications"); -MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>"); +MODULE_AUTHOR("Stefan Wahren <wahrenst@gmx.net>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(QCAUART_DRV_VERSION); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 046b5f7d8e7c..9d2a9562c96f 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -98,7 +98,7 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); - return priv->real_dev->ifindex; + return READ_ONCE(priv->real_dev->ifindex); } static int rmnet_vnd_init(struct net_device *dev) diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 81567fcf3957..4c043052198d 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -68,6 +68,7 @@ enum mac_version { /* support for RTL_GIGA_MAC_VER_60 has been removed */ RTL_GIGA_MAC_VER_61, RTL_GIGA_MAC_VER_63, + RTL_GIGA_MAC_VER_65, RTL_GIGA_MAC_NONE }; @@ -84,3 +85,6 @@ void r8169_get_led_name(struct rtl8169_private *tp, int idx, int rtl8168_get_led_mode(struct rtl8169_private *tp); int rtl8168_led_mod_ctrl(struct rtl8169_private *tp, u16 mask, u16 val); void rtl8168_init_leds(struct net_device *ndev); +int rtl8125_get_led_mode(struct rtl8169_private *tp, int index); +int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode); +void rtl8125_init_leds(struct net_device *ndev); diff --git a/drivers/net/ethernet/realtek/r8169_leds.c b/drivers/net/ethernet/realtek/r8169_leds.c index 007d077edcad..7c5dc9d0df85 100644 --- a/drivers/net/ethernet/realtek/r8169_leds.c +++ b/drivers/net/ethernet/realtek/r8169_leds.c @@ -18,12 +18,14 @@ #define RTL8168_LED_CTRL_LINK_100 BIT(1) #define RTL8168_LED_CTRL_LINK_10 BIT(0) -#define RTL8168_NUM_LEDS 3 +#define RTL8125_LED_CTRL_ACT BIT(9) +#define RTL8125_LED_CTRL_LINK_2500 BIT(5) +#define RTL8125_LED_CTRL_LINK_1000 BIT(3) +#define RTL8125_LED_CTRL_LINK_100 BIT(1) +#define RTL8125_LED_CTRL_LINK_10 BIT(0) -#define RTL8168_SUPPORTED_MODES \ - (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK_100) | \ - BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_RX) | \ - BIT(TRIGGER_NETDEV_TX)) +#define RTL8168_NUM_LEDS 3 +#define RTL8125_NUM_LEDS 4 struct r8169_led_classdev { struct led_classdev led; @@ -33,28 +35,35 @@ struct r8169_led_classdev { #define lcdev_to_r8169_ldev(lcdev) container_of(lcdev, struct r8169_led_classdev, led) +static bool r8169_trigger_mode_is_valid(unsigned long flags) +{ + bool rx, tx; + + if (flags & BIT(TRIGGER_NETDEV_HALF_DUPLEX)) + return false; + if (flags & BIT(TRIGGER_NETDEV_FULL_DUPLEX)) + return false; + + rx = flags & BIT(TRIGGER_NETDEV_RX); + tx = flags & BIT(TRIGGER_NETDEV_TX); + + return rx == tx; +} + static int rtl8168_led_hw_control_is_supported(struct led_classdev *led_cdev, unsigned long flags) { struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev); struct rtl8169_private *tp = netdev_priv(ldev->ndev); int shift = ldev->index * 4; - bool rx, tx; - - if (flags & ~RTL8168_SUPPORTED_MODES) - goto nosupp; - rx = flags & BIT(TRIGGER_NETDEV_RX); - tx = flags & BIT(TRIGGER_NETDEV_TX); - if (rx != tx) - goto nosupp; + if (!r8169_trigger_mode_is_valid(flags)) { + /* Switch LED off to indicate that mode isn't supported */ + rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0); + return -EOPNOTSUPP; + } return 0; - -nosupp: - /* Switch LED off to indicate that mode isn't supported */ - rtl8168_led_mod_ctrl(tp, 0x000f << shift, 0); - return -EOPNOTSUPP; } static int rtl8168_led_hw_control_set(struct led_classdev *led_cdev, @@ -129,7 +138,6 @@ static void rtl8168_setup_ldev(struct r8169_led_classdev *ldev, r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE); led_cdev->name = led_name; - led_cdev->default_trigger = "netdev"; led_cdev->hw_control_trigger = "netdev"; led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN; led_cdev->hw_control_is_supported = rtl8168_led_hw_control_is_supported; @@ -155,3 +163,102 @@ void rtl8168_init_leds(struct net_device *ndev) for (i = 0; i < RTL8168_NUM_LEDS; i++) rtl8168_setup_ldev(leds + i, ndev, i); } + +static int rtl8125_led_hw_control_is_supported(struct led_classdev *led_cdev, + unsigned long flags) +{ + struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev); + struct rtl8169_private *tp = netdev_priv(ldev->ndev); + + if (!r8169_trigger_mode_is_valid(flags)) { + /* Switch LED off to indicate that mode isn't supported */ + rtl8125_set_led_mode(tp, ldev->index, 0); + return -EOPNOTSUPP; + } + + return 0; +} + +static int rtl8125_led_hw_control_set(struct led_classdev *led_cdev, + unsigned long flags) +{ + struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev); + struct rtl8169_private *tp = netdev_priv(ldev->ndev); + u16 mode = 0; + + if (flags & BIT(TRIGGER_NETDEV_LINK_10)) + mode |= RTL8125_LED_CTRL_LINK_10; + if (flags & BIT(TRIGGER_NETDEV_LINK_100)) + mode |= RTL8125_LED_CTRL_LINK_100; + if (flags & BIT(TRIGGER_NETDEV_LINK_1000)) + mode |= RTL8125_LED_CTRL_LINK_1000; + if (flags & BIT(TRIGGER_NETDEV_LINK_2500)) + mode |= RTL8125_LED_CTRL_LINK_2500; + if (flags & (BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX))) + mode |= RTL8125_LED_CTRL_ACT; + + return rtl8125_set_led_mode(tp, ldev->index, mode); +} + +static int rtl8125_led_hw_control_get(struct led_classdev *led_cdev, + unsigned long *flags) +{ + struct r8169_led_classdev *ldev = lcdev_to_r8169_ldev(led_cdev); + struct rtl8169_private *tp = netdev_priv(ldev->ndev); + int mode; + + mode = rtl8125_get_led_mode(tp, ldev->index); + if (mode < 0) + return mode; + + if (mode & RTL8125_LED_CTRL_LINK_10) + *flags |= BIT(TRIGGER_NETDEV_LINK_10); + if (mode & RTL8125_LED_CTRL_LINK_100) + *flags |= BIT(TRIGGER_NETDEV_LINK_100); + if (mode & RTL8125_LED_CTRL_LINK_1000) + *flags |= BIT(TRIGGER_NETDEV_LINK_1000); + if (mode & RTL8125_LED_CTRL_LINK_2500) + *flags |= BIT(TRIGGER_NETDEV_LINK_2500); + if (mode & RTL8125_LED_CTRL_ACT) + *flags |= BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + + return 0; +} + +static void rtl8125_setup_led_ldev(struct r8169_led_classdev *ldev, + struct net_device *ndev, int index) +{ + struct rtl8169_private *tp = netdev_priv(ndev); + struct led_classdev *led_cdev = &ldev->led; + char led_name[LED_MAX_NAME_SIZE]; + + ldev->ndev = ndev; + ldev->index = index; + + r8169_get_led_name(tp, index, led_name, LED_MAX_NAME_SIZE); + led_cdev->name = led_name; + led_cdev->hw_control_trigger = "netdev"; + led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN; + led_cdev->hw_control_is_supported = rtl8125_led_hw_control_is_supported; + led_cdev->hw_control_set = rtl8125_led_hw_control_set; + led_cdev->hw_control_get = rtl8125_led_hw_control_get; + led_cdev->hw_control_get_device = r8169_led_hw_control_get_device; + + /* ignore errors */ + devm_led_classdev_register(&ndev->dev, led_cdev); +} + +void rtl8125_init_leds(struct net_device *ndev) +{ + /* bind resource mgmt to netdev */ + struct device *dev = &ndev->dev; + struct r8169_led_classdev *leds; + int i; + + leds = devm_kcalloc(dev, RTL8125_NUM_LEDS, sizeof(*leds), GFP_KERNEL); + if (!leds) + return; + + for (i = 0; i < RTL8125_NUM_LEDS; i++) + rtl8125_setup_led_ldev(leds + i, ndev, i); +} diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index dd73df6b17b0..0d2cbb32c870 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -55,6 +55,7 @@ #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" #define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" +#define FIRMWARE_8126A_2 "rtl_nic/rtl8126a-2.fw" #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -136,6 +137,7 @@ static const struct { [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, /* reserve 62 for CFG_METHOD_4 in the vendor driver */ [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, + [RTL_GIGA_MAC_VER_65] = {"RTL8126A", FIRMWARE_8126A_2}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -158,6 +160,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024 }, { 0x0001, 0x8168, PCI_ANY_ID, 0x2410 }, { PCI_VDEVICE(REALTEK, 0x8125) }, + { PCI_VDEVICE(REALTEK, 0x8126) }, { PCI_VDEVICE(REALTEK, 0x3000) }, {} }; @@ -327,13 +330,23 @@ enum rtl8168_registers { }; enum rtl8125_registers { + LEDSEL0 = 0x18, + INT_CFG0_8125 = 0x34, +#define INT_CFG0_ENABLE_8125 BIT(0) +#define INT_CFG0_CLKREQEN BIT(3) IntrMask_8125 = 0x38, IntrStatus_8125 = 0x3c, + INT_CFG1_8125 = 0x7a, + LEDSEL2 = 0x84, + LEDSEL1 = 0x86, TxPoll_8125 = 0x90, + LEDSEL3 = 0x96, MAC0_BKP = 0x19e0, EEE_TXIDLE_TIMER_8125 = 0x6048, }; +#define LEDSEL_MASK_8125 0x23f + #define RX_VLAN_INNER_8125 BIT(22) #define RX_VLAN_OUTER_8125 BIT(23) #define RX_VLAN_8125 (RX_VLAN_INNER_8125 | RX_VLAN_OUTER_8125) @@ -606,6 +619,7 @@ struct rtl8169_private { struct page *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ u16 cp_cmd; + u16 tx_lpi_timer; u32 irq_mask; int irq; struct clk *clk; @@ -629,7 +643,6 @@ struct rtl8169_private { struct rtl8169_counters *counters; struct rtl8169_tc_offsets tc_offset; u32 saved_wolopts; - int eee_adv; const char *fw_name; struct rtl_fw *rtl_fw; @@ -663,6 +676,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3); MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); MODULE_FIRMWARE(FIRMWARE_8125B_2); +MODULE_FIRMWARE(FIRMWARE_8126A_2); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -824,6 +838,51 @@ int rtl8168_get_led_mode(struct rtl8169_private *tp) return ret; } +static int rtl8125_get_led_reg(int index) +{ + static const int led_regs[] = { LEDSEL0, LEDSEL1, LEDSEL2, LEDSEL3 }; + + return led_regs[index]; +} + +int rtl8125_set_led_mode(struct rtl8169_private *tp, int index, u16 mode) +{ + int reg = rtl8125_get_led_reg(index); + struct device *dev = tp_to_dev(tp); + int ret; + u16 val; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + mutex_lock(&tp->led_lock); + val = RTL_R16(tp, reg) & ~LEDSEL_MASK_8125; + RTL_W16(tp, reg, val | mode); + mutex_unlock(&tp->led_lock); + + pm_runtime_put_sync(dev); + + return 0; +} + +int rtl8125_get_led_mode(struct rtl8169_private *tp, int index) +{ + int reg = rtl8125_get_led_reg(index); + struct device *dev = tp_to_dev(tp); + int ret; + + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; + + ret = RTL_R16(tp, reg); + + pm_runtime_put_sync(dev); + + return ret; +} + void r8169_get_led_name(struct rtl8169_private *tp, int idx, char *buf, int buf_len) { @@ -1140,7 +1199,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: r8168g_mdio_write(tp, location, val); break; default: @@ -1155,7 +1214,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1341,7 +1400,7 @@ static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable) case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26: case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30: case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65: if (enable) RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN); else @@ -1508,7 +1567,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_65: if (wolopts) rtl_mod_config2(tp, 0, PME_SIGNAL); else @@ -1974,30 +2033,65 @@ static int rtl_set_coalesce(struct net_device *dev, return 0; } -static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) +static void rtl_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + unsigned int timer_val = READ_ONCE(tp->dev->mtu) + ETH_HLEN + 0x20; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + tp->tx_lpi_timer = timer_val; + r8168_mac_ocp_write(tp, 0xe048, timer_val); + break; + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_65: + tp->tx_lpi_timer = timer_val; + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, timer_val); + break; + default: + break; + } +} + +static unsigned int r8169_get_tx_lpi_timer_us(struct rtl8169_private *tp) +{ + unsigned int speed = tp->phydev->speed; + unsigned int timer = tp->tx_lpi_timer; + + if (!timer || speed == SPEED_UNKNOWN) + return 0; + + /* tx_lpi_timer value is in bytes */ + return DIV_ROUND_CLOSEST(timer * BITS_PER_BYTE, speed); +} + +static int rtl8169_get_eee(struct net_device *dev, struct ethtool_keee *data) { struct rtl8169_private *tp = netdev_priv(dev); + int ret; if (!rtl_supports_eee(tp)) return -EOPNOTSUPP; - return phy_ethtool_get_eee(tp->phydev, data); + ret = phy_ethtool_get_eee(tp->phydev, data); + if (ret) + return ret; + + data->tx_lpi_timer = r8169_get_tx_lpi_timer_us(tp); + data->tx_lpi_enabled = data->tx_lpi_timer ? data->eee_enabled : false; + + return 0; } -static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) +static int rtl8169_set_eee(struct net_device *dev, struct ethtool_keee *data) { struct rtl8169_private *tp = netdev_priv(dev); - int ret; if (!rtl_supports_eee(tp)) return -EOPNOTSUPP; - ret = phy_ethtool_set_eee(tp->phydev, data); - - if (!ret) - tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, - MDIO_AN_EEE_ADV); - return ret; + return phy_ethtool_set_eee(tp->phydev, data); } static void rtl8169_get_ringparam(struct net_device *dev, @@ -2062,21 +2156,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_pauseparam = rtl8169_set_pauseparam, }; -static void rtl_enable_eee(struct rtl8169_private *tp) -{ - struct phy_device *phydev = tp->phydev; - int adv; - - /* respect EEE advertisement the user may have set */ - if (tp->eee_adv >= 0) - adv = tp->eee_adv; - else - adv = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE); - - if (adv >= 0) - phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); -} - static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { /* @@ -2095,6 +2174,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) u16 val; enum mac_version ver; } mac_info[] = { + /* 8126A family. */ + { 0x7cf, 0x649, RTL_GIGA_MAC_VER_65 }, + /* 8125B family. */ { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, @@ -2250,14 +2332,8 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); } -static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) -{ - RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); -} - static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) { - rtl8125_set_eee_txidle_timer(tp); r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); } @@ -2313,9 +2389,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp) /* We may have called phy_speed_down before */ phy_speed_up(tp->phydev); - if (rtl_supports_eee(tp)) - rtl_enable_eee(tp); - genphy_soft_reset(tp->phydev); } @@ -2368,6 +2441,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; case RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_65: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST | RX_PAUSE_SLOT_ON); break; @@ -2554,7 +2628,7 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; - case RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_63 ... RTL_GIGA_MAC_VER_65: RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); @@ -2797,7 +2871,7 @@ static void rtl_enable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: rtl_eri_set_bits(tp, 0xd4, 0x0c00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); break; default: @@ -2811,7 +2885,7 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_38: rtl_eri_clear_bits(tp, 0xd4, 0x1f00); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: r8168_mac_ocp_modify(tp, 0xc0ac, 0x1f80, 0); break; default: @@ -2821,6 +2895,8 @@ static void rtl_disable_exit_l1(struct rtl8169_private *tp) static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { + u8 val8; + if (tp->mac_version < RTL_GIGA_MAC_VER_32) return; @@ -2834,11 +2910,19 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) return; rtl_mod_config5(tp, 0, ASPM_en); - rtl_mod_config2(tp, 0, ClkReqEn); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_65: + val8 = RTL_R8(tp, INT_CFG0_8125) | INT_CFG0_CLKREQEN; + RTL_W8(tp, INT_CFG0_8125, val8); + break; + default: + rtl_mod_config2(tp, 0, ClkReqEn); + break; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: /* reset ephy tx/rx disable timer */ r8168_mac_ocp_modify(tp, 0xe094, 0xff00, 0); /* chip can trigger L1.2 */ @@ -2850,14 +2934,22 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) } else { switch (tp->mac_version) { case RTL_GIGA_MAC_VER_46 ... RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: r8168_mac_ocp_modify(tp, 0xe092, 0x00ff, 0); break; default: break; } - rtl_mod_config2(tp, ClkReqEn, 0); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_65: + val8 = RTL_R8(tp, INT_CFG0_8125) & ~INT_CFG0_CLKREQEN; + RTL_W8(tp, INT_CFG0_8125, val8); + break; + default: + rtl_mod_config2(tp, ClkReqEn, 0); + break; + } rtl_mod_config5(tp, ASPM_en, 0); } } @@ -3570,10 +3662,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) /* disable new tx descriptor format */ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); - if (tp->mac_version == RTL_GIGA_MAC_VER_63) + if (tp->mac_version == RTL_GIGA_MAC_VER_65) + RTL_W8(tp, 0xD8, RTL_R8(tp, 0xD8) & ~0x02); + + if (tp->mac_version == RTL_GIGA_MAC_VER_65) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + else if (tp->mac_version == RTL_GIGA_MAC_VER_63) r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); else - r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0300); if (tp->mac_version == RTL_GIGA_MAC_VER_63) r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); @@ -3586,6 +3683,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); + if (tp->mac_version == RTL_GIGA_MAC_VER_65) + r8168_mac_ocp_modify(tp, 0xea1c, 0x0300, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); @@ -3600,10 +3701,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); - if (tp->mac_version == RTL_GIGA_MAC_VER_63) - rtl8125b_config_eee_mac(tp); - else + if (tp->mac_version == RTL_GIGA_MAC_VER_61) rtl8125a_config_eee_mac(tp); + else + rtl8125b_config_eee_mac(tp); rtl_disable_rxdvgate(tp); } @@ -3647,6 +3748,12 @@ static void rtl_hw_start_8125b(struct rtl8169_private *tp) rtl_hw_start_8125_common(tp); } +static void rtl_hw_start_8126a(struct rtl8169_private *tp) +{ + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_start_8125_common(tp); +} + static void rtl_hw_config(struct rtl8169_private *tp) { static const rtl_generic_fct hw_configs[] = { @@ -3689,6 +3796,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, + [RTL_GIGA_MAC_VER_65] = rtl_hw_start_8126a, }; if (hw_configs[tp->mac_version]) @@ -3699,9 +3807,23 @@ static void rtl_hw_start_8125(struct rtl8169_private *tp) { int i; + RTL_W8(tp, INT_CFG0_8125, 0x00); + /* disable interrupt coalescing */ - for (i = 0xa00; i < 0xb00; i += 4) - RTL_W32(tp, i, 0); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + for (i = 0xa00; i < 0xb00; i += 4) + RTL_W32(tp, i, 0); + break; + case RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_65: + for (i = 0xa00; i < 0xa80; i += 4) + RTL_W32(tp, i, 0); + RTL_W16(tp, INT_CFG1_8125, 0x0000); + break; + default: + break; + } rtl_hw_config(tp); } @@ -3744,6 +3866,8 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_hw_aspm_clkreq_enable(tp, false); RTL_W16(tp, CPlusCmd, tp->cp_cmd); + rtl_set_eee_txidle_timer(tp); + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) rtl_hw_start_8169(tp); else if (rtl_is_8125(tp)) @@ -3777,15 +3901,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) dev->mtu = new_mtu; netdev_update_features(dev); rtl_jumbo_config(tp); - - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_61: - case RTL_GIGA_MAC_VER_63: - rtl8125_set_eee_txidle_timer(tp); - break; - default: - break; - } + rtl_set_eee_txidle_timer(tp); return 0; } @@ -3929,7 +4045,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_65: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -4080,8 +4196,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_61: - case RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: padto = max_t(unsigned int, padto, ETH_ZLEN); break; default: @@ -5058,7 +5173,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) } tp->phydev->mac_managed_pm = true; - + if (rtl_supports_eee(tp)) + phy_advertise_eee_all(tp->phydev); phy_support_asym_pause(tp->phydev); /* PHY will be woken up in rtl_open() */ @@ -5108,7 +5224,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_65: rtl_hw_init_8125(tp); break; default: @@ -5193,7 +5309,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->dev = dev; tp->pci_dev = pdev; tp->supports_gmii = ent->driver_data == RTL_CFG_NO_GBIT ? 0 : 1; - tp->eee_adv = -1; tp->ocp_base = OCP_STD_PHY_BASE; raw_spin_lock_init(&tp->cfg9346_usage_lock); @@ -5201,11 +5316,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) raw_spin_lock_init(&tp->mac_ocp_lock); mutex_init(&tp->led_lock); - dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev, - struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; - /* Get the *optional* external "ether_clk" used on some boards */ tp->clk = devm_clk_get_optional_enabled(&pdev->dev, "ether_clk"); if (IS_ERR(tp->clk)) @@ -5320,6 +5430,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + netdev_sw_irq_coalesce_default_on(dev); /* configure chip for default features */ @@ -5356,10 +5468,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - if (IS_ENABLED(CONFIG_R8169_LEDS) && - tp->mac_version > RTL_GIGA_MAC_VER_06 && - tp->mac_version < RTL_GIGA_MAC_VER_61) - rtl8168_init_leds(dev); + if (IS_ENABLED(CONFIG_R8169_LEDS)) { + if (rtl_is_8125(tp)) + rtl8125_init_leds(dev); + else if (tp->mac_version > RTL_GIGA_MAC_VER_06) + rtl8168_init_leds(dev); + } netdev_info(dev, "%s, %pM, XID %03x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, xid, tp->irq); diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index b50f16786c24..1f74317beb88 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1102,6 +1102,12 @@ static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, rtl8125b_config_eee_phy(phydev); } +static void rtl8126a_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); +} + void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, enum mac_version ver) { @@ -1152,6 +1158,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, + [RTL_GIGA_MAC_VER_65] = rtl8126a_hw_phy_config, }; if (phy_configs[ver]) diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index d6136fe5c206..b03fae7a0f72 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -34,6 +34,7 @@ config RAVB select MII select MDIO_BITBANG select PHYLIB + select RESET_CONTROLLER help Renesas Ethernet AVB device driver. diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index e0f8276cffed..35e642fc4b2a 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -205,7 +205,11 @@ enum ravb_reg { TLFRCR = 0x0758, RFCR = 0x0760, MAFCR = 0x0778, - CSR0 = 0x0800, /* RZ/G2L only */ + + /* TOE registers (RZ/G2L only) */ + CSR0 = 0x0800, + CSR1 = 0x0804, + CSR2 = 0x0808, }; @@ -978,6 +982,34 @@ enum CSR0_BIT { CSR0_RPE = 0x00000020, }; +enum CSR1_BIT { + CSR1_TIP4 = 0x00000001, + CSR1_TTCP4 = 0x00000010, + CSR1_TUDP4 = 0x00000020, + CSR1_TICMP4 = 0x00000040, + CSR1_TTCP6 = 0x00100000, + CSR1_TUDP6 = 0x00200000, + CSR1_TICMP6 = 0x00400000, + CSR1_THOP = 0x01000000, + CSR1_TROUT = 0x02000000, + CSR1_TAHD = 0x04000000, + CSR1_TDHD = 0x08000000, +}; + +enum CSR2_BIT { + CSR2_RIP4 = 0x00000001, + CSR2_RTCP4 = 0x00000010, + CSR2_RUDP4 = 0x00000020, + CSR2_RICMP4 = 0x00000040, + CSR2_RTCP6 = 0x00100000, + CSR2_RUDP6 = 0x00200000, + CSR2_RICMP6 = 0x00400000, + CSR2_RHOP = 0x01000000, + CSR2_RROUT = 0x02000000, + CSR2_RAHD = 0x04000000, + CSR2_RDHD = 0x08000000, +}; + #define DBAT_ENTRY_NUM 22 #define RX_QUEUE_OFFSET 4 #define NUM_RX_QUEUE 2 @@ -1089,10 +1121,6 @@ struct ravb_private { int msg_enable; int speed; int emac_irq; - int erra_irq; - int mgmta_irq; - int rx_irqs[NUM_RX_QUEUE]; - int tx_irqs[NUM_TX_QUEUE]; unsigned no_avb_link:1; unsigned avb_link_active_low:1; @@ -1106,6 +1134,8 @@ struct ravb_private { const struct ravb_hw_info *info; struct reset_control *rstc; + + u32 gti_tiv; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0e3731f50fc2..f9fb772b05c7 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -29,6 +29,7 @@ #include <linux/spinlock.h> #include <linux/reset.h> #include <linux/math64.h> +#include <net/ip.h> #include "ravb.h" @@ -38,16 +39,6 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) -static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { - "ch0", /* RAVB_BE */ - "ch1", /* RAVB_NC */ -}; - -static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { - "ch18", /* RAVB_BE */ - "ch19", /* RAVB_NC */ -}; - void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set) { @@ -96,13 +87,13 @@ static void ravb_set_rate_gbeth(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); switch (priv->speed) { - case 10: /* 10BASE */ + case 10: /* 10BASE */ ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); break; - case 100: /* 100BASE */ + case 100: /* 100BASE */ ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); break; - case 1000: /* 1000BASE */ + case 1000: /* 1000BASE */ ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); break; } @@ -522,6 +513,36 @@ error: return -ENOMEM; } +static void ravb_csum_init_gbeth(struct net_device *ndev) +{ + bool tx_enable = ndev->features & NETIF_F_HW_CSUM; + bool rx_enable = ndev->features & NETIF_F_RXCSUM; + + if (!(tx_enable || rx_enable)) + goto done; + + ravb_write(ndev, 0, CSR0); + if (ravb_wait(ndev, CSR0, CSR0_TPE | CSR0_RPE, 0)) { + netdev_err(ndev, "Timeout enabling hardware checksum\n"); + + if (tx_enable) + ndev->features &= ~NETIF_F_HW_CSUM; + + if (rx_enable) + ndev->features &= ~NETIF_F_RXCSUM; + } else { + if (tx_enable) + ravb_write(ndev, CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4, CSR1); + + if (rx_enable) + ravb_write(ndev, CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4, + CSR2); + } + +done: + ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); +} + static void ravb_emac_init_gbeth(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -553,7 +574,8 @@ static void ravb_emac_init_gbeth(struct net_device *ndev) /* E-MAC status register clear */ ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); - ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); + + ravb_csum_init_gbeth(ndev); /* E-MAC interrupt enable register */ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); @@ -734,6 +756,30 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) } } +static void ravb_rx_csum_gbeth(struct sk_buff *skb) +{ + __wsum csum_ip_hdr, csum_proto; + u8 *hw_csum; + + /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4 + * bytes appended to packet data. First 2 bytes is ip header checksum + * and last 2 bytes is protocol checksum. + */ + if (unlikely(skb->len < sizeof(__sum16) * 2)) + return; + + hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + + hw_csum -= sizeof(__sum16); + csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb_trim(skb, skb->len - 2 * sizeof(__sum16)); + + /* TODO: IPV6 Rx checksum */ + if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + static void ravb_rx_csum(struct sk_buff *skb) { u8 *hw_csum; @@ -772,29 +818,25 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) struct ravb_rx_desc *desc; struct sk_buff *skb; dma_addr_t dma_addr; + int rx_packets = 0; u8 desc_status; - int boguscnt; u16 pkt_len; u8 die_dt; int entry; int limit; + int i; entry = priv->cur_rx[q] % priv->num_rx_ring[q]; - boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; + limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; stats = &priv->stats[q]; - boguscnt = min(boguscnt, *quota); - limit = boguscnt; desc = &priv->gbeth_rx_ring[entry]; - while (desc->die_dt != DT_FEMPTY) { + for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { /* Descriptor type must be checked before all other reads */ dma_rmb(); desc_status = desc->msc; pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; - if (--boguscnt < 0) - break; - /* We use 0-byte descriptors to mark the DMA mapping errors */ if (!pkt_len) continue; @@ -819,8 +861,10 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) skb = ravb_get_skb_gbeth(ndev, entry, desc); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + ravb_rx_csum_gbeth(skb); napi_gro_receive(&priv->napi[q], skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; case DT_FSTART: @@ -846,9 +890,11 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) dev_kfree_skb(skb); priv->rx_1st_skb->protocol = eth_type_trans(priv->rx_1st_skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + ravb_rx_csum_gbeth(skb); napi_gro_receive(&priv->napi[q], priv->rx_1st_skb); - stats->rx_packets++; + rx_packets++; stats->rx_bytes += pkt_len; break; } @@ -887,9 +933,9 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) desc->die_dt = DT_FEMPTY; } - *quota -= limit - (++boguscnt); - - return boguscnt <= 0; + stats->rx_packets += rx_packets; + *quota -= rx_packets; + return *quota == 0; } /* Packet receive function for Ethernet AVB */ @@ -1092,11 +1138,23 @@ static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); + struct device *dev = &priv->pdev->dev; + irqreturn_t result = IRQ_HANDLED; + + pm_runtime_get_noresume(dev); + + if (unlikely(!pm_runtime_active(dev))) { + result = IRQ_NONE; + goto out_rpm_put; + } spin_lock(&priv->lock); ravb_emac_interrupt_unlocked(ndev); spin_unlock(&priv->lock); - return IRQ_HANDLED; + +out_rpm_put: + pm_runtime_put_noidle(dev); + return result; } /* Error interrupt handler */ @@ -1176,9 +1234,15 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + struct device *dev = &priv->pdev->dev; irqreturn_t result = IRQ_NONE; u32 iss; + pm_runtime_get_noresume(dev); + + if (unlikely(!pm_runtime_active(dev))) + goto out_rpm_put; + spin_lock(&priv->lock); /* Get interrupt status */ iss = ravb_read(ndev, ISS); @@ -1222,6 +1286,9 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) } spin_unlock(&priv->lock); + +out_rpm_put: + pm_runtime_put_noidle(dev); return result; } @@ -1230,9 +1297,15 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); + struct device *dev = &priv->pdev->dev; irqreturn_t result = IRQ_NONE; u32 iss; + pm_runtime_get_noresume(dev); + + if (unlikely(!pm_runtime_active(dev))) + goto out_rpm_put; + spin_lock(&priv->lock); /* Get interrupt status */ iss = ravb_read(ndev, ISS); @@ -1254,6 +1327,9 @@ static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) } spin_unlock(&priv->lock); + +out_rpm_put: + pm_runtime_put_noidle(dev); return result; } @@ -1261,8 +1337,14 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) { struct net_device *ndev = dev_id; struct ravb_private *priv = netdev_priv(ndev); + struct device *dev = &priv->pdev->dev; irqreturn_t result = IRQ_NONE; + pm_runtime_get_noresume(dev); + + if (unlikely(!pm_runtime_active(dev))) + goto out_rpm_put; + spin_lock(&priv->lock); /* Network control/Best effort queue RX/TX */ @@ -1270,6 +1352,9 @@ static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) result = IRQ_HANDLED; spin_unlock(&priv->lock); + +out_rpm_put: + pm_runtime_put_noidle(dev); return result; } @@ -1288,25 +1373,16 @@ static int ravb_poll(struct napi_struct *napi, int budget) struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - bool gptp = info->gptp || info->ccc_gac; - struct ravb_rx_desc *desc; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); int quota = budget; - unsigned int entry; - if (!gptp) { - entry = priv->cur_rx[q] % priv->num_rx_ring[q]; - desc = &priv->gbeth_rx_ring[entry]; - } /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - if (gptp || desc->die_dt != DT_FEMPTY) { - if (ravb_rx(ndev, "a, q)) - goto out; - } + if (ravb_rx(ndev, "a, q)) + goto out; /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1736,89 +1812,159 @@ static const struct ethtool_ops ravb_ethtool_ops = { .set_wol = ravb_set_wol, }; -static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, - struct net_device *ndev, struct device *dev, - const char *ch) +static int ravb_set_config_mode(struct net_device *ndev) { - char *name; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int error; - name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); - if (!name) - return -ENOMEM; - error = request_irq(irq, handler, 0, name, ndev); - if (error) - netdev_err(ndev, "cannot request IRQ %s\n", name); + if (info->gptp) { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; + /* Set CSEL value */ + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); + } else if (info->ccc_gac) { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + } else { + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + } return error; } +static void ravb_set_gti(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + if (!(info->gptp || info->ccc_gac)) + return; + + ravb_write(ndev, priv->gti_tiv, GTI); + + /* Request GTI loading */ + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); +} + +static int ravb_compute_gti(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + struct device *dev = ndev->dev.parent; + unsigned long rate; + u64 inc; + + if (!(info->gptp || info->ccc_gac)) + return 0; + + if (info->gptp_ref_clk) + rate = clk_get_rate(priv->gptp_clk); + else + rate = clk_get_rate(priv->clk); + if (!rate) + return -EINVAL; + + inc = div64_ul(1000000000ULL << 20, rate); + + if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { + dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", + inc, GTI_TIV_MIN, GTI_TIV_MAX); + return -EINVAL; + } + priv->gti_tiv = inc; + + return 0; +} + +/* Set tx and rx clock internal delay modes */ +static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + bool explicit_delay = false; + u32 delay; + + if (!priv->info->internal_delay) + return; + + if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 1800, according to DT bindings */ + priv->rxcidm = !!delay; + explicit_delay = true; + } + if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { + /* Valid values are 0 and 2000, according to DT bindings */ + priv->txcidm = !!delay; + explicit_delay = true; + } + + if (explicit_delay) + return; + + /* Fall back to legacy rgmii-*id behavior */ + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { + priv->rxcidm = 1; + priv->rgmii_override = 1; + } + + if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + priv->txcidm = 1; + priv->rgmii_override = 1; + } +} + +static void ravb_set_delay_mode(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 set = 0; + + if (!priv->info->internal_delay) + return; + + if (priv->rxcidm) + set |= APSR_RDM; + if (priv->txcidm) + set |= APSR_TDM; + ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set); +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - struct platform_device *pdev = priv->pdev; - struct device *dev = &pdev->dev; + struct device *dev = &priv->pdev->dev; int error; napi_enable(&priv->napi[RAVB_BE]); if (info->nc_queues) napi_enable(&priv->napi[RAVB_NC]); - if (!info->multi_irqs) { - error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, - ndev->name, ndev); - if (error) { - netdev_err(ndev, "cannot request IRQ\n"); - goto out_napi_off; - } - } else { - error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, - dev, "ch22:multi"); - if (error) - goto out_napi_off; - error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, - dev, "ch24:emac"); - if (error) - goto out_free_irq; - error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, - ndev, dev, "ch0:rx_be"); - if (error) - goto out_free_irq_emac; - error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, - ndev, dev, "ch18:tx_be"); - if (error) - goto out_free_irq_be_rx; - error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, - ndev, dev, "ch1:rx_nc"); - if (error) - goto out_free_irq_be_tx; - error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, - ndev, dev, "ch19:tx_nc"); - if (error) - goto out_free_irq_nc_rx; - - if (info->err_mgmt_irqs) { - error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt, - ndev, dev, "err_a"); - if (error) - goto out_free_irq_nc_tx; - error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt, - ndev, dev, "mgmt_a"); - if (error) - goto out_free_irq_erra; - } - } + error = pm_runtime_resume_and_get(dev); + if (error < 0) + goto out_napi_off; + + /* Set AVB config mode */ + error = ravb_set_config_mode(ndev); + if (error) + goto out_rpm_put; + + ravb_set_delay_mode(ndev); + ravb_write(ndev, priv->desc_bat_dma, DBAT); /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq_mgmta; + goto out_set_reset; + ravb_emac_init(ndev); + ravb_set_gti(ndev); + /* Initialise PTP Clock driver */ - if (info->gptp) + if (info->gptp || info->ccc_gac) ravb_ptp_init(ndev, priv->pdev); /* PHY control start */ @@ -1832,29 +1978,14 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - if (info->gptp) + if (info->gptp || info->ccc_gac) ravb_ptp_stop(ndev); ravb_stop_dma(ndev); -out_free_irq_mgmta: - if (!info->multi_irqs) - goto out_free_irq; - if (info->err_mgmt_irqs) - free_irq(priv->mgmta_irq, ndev); -out_free_irq_erra: - if (info->err_mgmt_irqs) - free_irq(priv->erra_irq, ndev); -out_free_irq_nc_tx: - free_irq(priv->tx_irqs[RAVB_NC], ndev); -out_free_irq_nc_rx: - free_irq(priv->rx_irqs[RAVB_NC], ndev); -out_free_irq_be_tx: - free_irq(priv->tx_irqs[RAVB_BE], ndev); -out_free_irq_be_rx: - free_irq(priv->rx_irqs[RAVB_BE], ndev); -out_free_irq_emac: - free_irq(priv->emac_irq, ndev); -out_free_irq: - free_irq(ndev->irq, ndev); +out_set_reset: + ravb_set_opmode(ndev, CCC_OPC_RESET); +out_rpm_put: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); out_napi_off: if (info->nc_queues) napi_disable(&priv->napi[RAVB_NC]); @@ -1939,6 +2070,36 @@ out_unlock: rtnl_unlock(); } +static bool ravb_can_tx_csum_gbeth(struct sk_buff *skb) +{ + struct iphdr *ip = ip_hdr(skb); + + /* TODO: Need to add support for VLAN tag 802.1Q */ + if (skb_vlan_tag_present(skb)) + return false; + + /* TODO: Need to add hardware checksum for IPv6 */ + if (skb->protocol != htons(ETH_P_IP)) + return false; + + switch (ip->protocol) { + case IPPROTO_TCP: + break; + case IPPROTO_UDP: + /* If the checksum value in the UDP header field is 0, TOE does + * not calculate checksum for UDP part of this frame as it is + * optional function as per standards. + */ + if (udp_hdr(skb)->check == 0) + return false; + break; + default: + return false; + } + + return true; +} + /* Packet transmit function for Ethernet AVB */ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -1954,6 +2115,9 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 entry; u32 len; + if (skb->ip_summed == CHECKSUM_PARTIAL && !ravb_can_tx_csum_gbeth(skb)) + skb_checksum_help(skb); + spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * num_tx_desc) { @@ -2088,8 +2252,15 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; struct net_device_stats *nstats, *stats0, *stats1; + struct device *dev = &priv->pdev->dev; nstats = &ndev->stats; + + pm_runtime_get_noresume(dev); + + if (!pm_runtime_active(dev)) + goto out_rpm_put; + stats0 = &priv->stats[RAVB_BE]; if (info->tx_counters) { @@ -2131,6 +2302,8 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev) nstats->rx_over_errors += stats1->rx_over_errors; } +out_rpm_put: + pm_runtime_put_noidle(dev); return nstats; } @@ -2153,6 +2326,8 @@ static int ravb_close(struct net_device *ndev) struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; struct ravb_tstamp_skb *ts_skb, *ts_skb2; + struct device *dev = &priv->pdev->dev; + int error; netif_tx_stop_all_queues(ndev); @@ -2161,8 +2336,16 @@ static int ravb_close(struct net_device *ndev) ravb_write(ndev, 0, RIC2); ravb_write(ndev, 0, TIC); + /* PHY disconnect */ + if (ndev->phydev) { + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + } + /* Stop PTP Clock driver */ - if (info->gptp) + if (info->gptp || info->ccc_gac) ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ @@ -2179,29 +2362,8 @@ static int ravb_close(struct net_device *ndev) } } - /* PHY disconnect */ - if (ndev->phydev) { - phy_stop(ndev->phydev); - phy_disconnect(ndev->phydev); - if (of_phy_is_fixed_link(np)) - of_phy_deregister_fixed_link(np); - } - cancel_work_sync(&priv->work); - if (info->multi_irqs) { - free_irq(priv->tx_irqs[RAVB_NC], ndev); - free_irq(priv->rx_irqs[RAVB_NC], ndev); - free_irq(priv->tx_irqs[RAVB_BE], ndev); - free_irq(priv->rx_irqs[RAVB_BE], ndev); - free_irq(priv->emac_irq, ndev); - if (info->err_mgmt_irqs) { - free_irq(priv->erra_irq, ndev); - free_irq(priv->mgmta_irq, ndev); - } - } - free_irq(ndev->irq, ndev); - if (info->nc_queues) napi_disable(&priv->napi[RAVB_NC]); napi_disable(&priv->napi[RAVB_BE]); @@ -2211,6 +2373,17 @@ static int ravb_close(struct net_device *ndev) if (info->nc_queues) ravb_ring_free(ndev, RAVB_NC); + /* Update statistics. */ + ravb_get_stats(ndev); + + /* Set reset mode. */ + error = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (error) + return error; + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + return 0; } @@ -2334,11 +2507,58 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable) spin_unlock_irqrestore(&priv->lock, flags); } +static int ravb_endisable_csum_gbeth(struct net_device *ndev, enum ravb_reg reg, + u32 val, u32 mask) +{ + u32 csr0 = CSR0_TPE | CSR0_RPE; + int ret; + + ravb_write(ndev, csr0 & ~mask, CSR0); + ret = ravb_wait(ndev, CSR0, mask, 0); + if (!ret) + ravb_write(ndev, val, reg); + + ravb_write(ndev, csr0, CSR0); + + return ret; +} + static int ravb_set_features_gbeth(struct net_device *ndev, netdev_features_t features) { - /* Place holder */ - return 0; + netdev_features_t changed = ndev->features ^ features; + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + int ret = 0; + u32 val; + + spin_lock_irqsave(&priv->lock, flags); + if (changed & NETIF_F_RXCSUM) { + if (features & NETIF_F_RXCSUM) + val = CSR2_RIP4 | CSR2_RTCP4 | CSR2_RUDP4 | CSR2_RICMP4; + else + val = 0; + + ret = ravb_endisable_csum_gbeth(ndev, CSR2, val, CSR0_RPE); + if (ret) + goto done; + } + + if (changed & NETIF_F_HW_CSUM) { + if (features & NETIF_F_HW_CSUM) + val = CSR1_TIP4 | CSR1_TTCP4 | CSR1_TUDP4; + else + val = 0; + + ret = ravb_endisable_csum_gbeth(ndev, CSR1, val, CSR0_TPE); + if (ret) + goto done; + } + +done: + spin_unlock_irqrestore(&priv->lock, flags); + + return ret; } static int ravb_set_features_rcar(struct net_device *ndev, @@ -2349,8 +2569,6 @@ static int ravb_set_features_rcar(struct net_device *ndev, if (changed & NETIF_F_RXCSUM) ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); - ndev->features = features; - return 0; } @@ -2359,8 +2577,24 @@ static int ravb_set_features(struct net_device *ndev, { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + struct device *dev = &priv->pdev->dev; + int ret; + + pm_runtime_get_noresume(dev); + + if (pm_runtime_active(dev)) + ret = info->set_feature(ndev, features); + else + ret = 0; + + pm_runtime_put_noidle(dev); + + if (ret) + return ret; - return info->set_feature(ndev, features); + ndev->features = features; + + return 0; } static const struct net_device_ops ravb_netdev_ops = { @@ -2518,6 +2752,8 @@ static const struct ravb_hw_info gbeth_hw_info = { .emac_init = ravb_emac_init_gbeth, .gstrings_stats = ravb_gstrings_stats_gbeth, .gstrings_size = sizeof(ravb_gstrings_stats_gbeth), + .net_hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, + .net_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM, .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN), .tccr_mask = TCCR_TSRQ0, @@ -2541,100 +2777,91 @@ static const struct of_device_id ravb_match_table[] = { }; MODULE_DEVICE_TABLE(of, ravb_match_table); -static int ravb_set_gti(struct net_device *ndev) +static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name, + const char *ch, int *irq, irq_handler_t handler) { - struct ravb_private *priv = netdev_priv(ndev); - const struct ravb_hw_info *info = priv->info; - struct device *dev = ndev->dev.parent; - unsigned long rate; - uint64_t inc; - - if (info->gptp_ref_clk) - rate = clk_get_rate(priv->gptp_clk); - else - rate = clk_get_rate(priv->clk); - if (!rate) - return -EINVAL; + struct platform_device *pdev = priv->pdev; + struct net_device *ndev = priv->ndev; + struct device *dev = &pdev->dev; + const char *dev_name; + unsigned long flags; + int error, irq_num; - inc = div64_ul(1000000000ULL << 20, rate); + if (irq_name) { + dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!dev_name) + return -ENOMEM; - if (inc < GTI_TIV_MIN || inc > GTI_TIV_MAX) { - dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", - inc, GTI_TIV_MIN, GTI_TIV_MAX); - return -EINVAL; + irq_num = platform_get_irq_byname(pdev, irq_name); + flags = 0; + } else { + dev_name = ndev->name; + irq_num = platform_get_irq(pdev, 0); + flags = IRQF_SHARED; } + if (irq_num < 0) + return irq_num; - ravb_write(ndev, inc, GTI); + if (irq) + *irq = irq_num; - return 0; + error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", dev_name); + + return error; } -static int ravb_set_config_mode(struct net_device *ndev) +static int ravb_setup_irqs(struct ravb_private *priv) { - struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + struct net_device *ndev = priv->ndev; + const char *irq_name, *emac_irq_name; int error; - if (info->gptp) { - error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); - if (error) - return error; - /* Set CSEL value */ - ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); - } else if (info->ccc_gac) { - error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); + if (!info->multi_irqs) + return ravb_setup_irq(priv, NULL, NULL, &ndev->irq, ravb_interrupt); + + if (info->err_mgmt_irqs) { + irq_name = "dia"; + emac_irq_name = "line3"; } else { - error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + irq_name = "ch22"; + emac_irq_name = "ch24"; } - return error; -} - -/* Set tx and rx clock internal delay modes */ -static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) -{ - struct ravb_private *priv = netdev_priv(ndev); - bool explicit_delay = false; - u32 delay; + error = ravb_setup_irq(priv, irq_name, "ch22:multi", &ndev->irq, ravb_multi_interrupt); + if (error) + return error; - if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { - /* Valid values are 0 and 1800, according to DT bindings */ - priv->rxcidm = !!delay; - explicit_delay = true; - } - if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { - /* Valid values are 0 and 2000, according to DT bindings */ - priv->txcidm = !!delay; - explicit_delay = true; - } + error = ravb_setup_irq(priv, emac_irq_name, "ch24:emac", &priv->emac_irq, + ravb_emac_interrupt); + if (error) + return error; - if (explicit_delay) - return; + if (info->err_mgmt_irqs) { + error = ravb_setup_irq(priv, "err_a", "err_a", NULL, ravb_multi_interrupt); + if (error) + return error; - /* Fall back to legacy rgmii-*id behavior */ - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { - priv->rxcidm = 1; - priv->rgmii_override = 1; + error = ravb_setup_irq(priv, "mgmt_a", "mgmt_a", NULL, ravb_multi_interrupt); + if (error) + return error; } - if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || - priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { - priv->txcidm = 1; - priv->rgmii_override = 1; - } -} + error = ravb_setup_irq(priv, "ch0", "ch0:rx_be", NULL, ravb_be_interrupt); + if (error) + return error; -static void ravb_set_delay_mode(struct net_device *ndev) -{ - struct ravb_private *priv = netdev_priv(ndev); - u32 set = 0; + error = ravb_setup_irq(priv, "ch1", "ch1:rx_nc", NULL, ravb_nc_interrupt); + if (error) + return error; - if (priv->rxcidm) - set |= APSR_RDM; - if (priv->txcidm) - set |= APSR_TDM; - ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set); + error = ravb_setup_irq(priv, "ch18", "ch18:tx_be", NULL, ravb_be_interrupt); + if (error) + return error; + + return ravb_setup_irq(priv, "ch19", "ch19:tx_nc", NULL, ravb_nc_interrupt); } static int ravb_probe(struct platform_device *pdev) @@ -2644,9 +2871,8 @@ static int ravb_probe(struct platform_device *pdev) struct reset_control *rstc; struct ravb_private *priv; struct net_device *ndev; - int error, irq, q; struct resource *res; - int i; + int error, q; if (!np) { dev_err(&pdev->dev, @@ -2654,7 +2880,7 @@ static int ravb_probe(struct platform_device *pdev) return -EINVAL; } - rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(rstc), "failed to get cpg reset\n"); @@ -2673,25 +2899,6 @@ static int ravb_probe(struct platform_device *pdev) if (error) goto out_free_netdev; - pm_runtime_enable(&pdev->dev); - error = pm_runtime_resume_and_get(&pdev->dev); - if (error < 0) - goto out_rpm_disable; - - if (info->multi_irqs) { - if (info->err_mgmt_irqs) - irq = platform_get_irq_byname(pdev, "dia"); - else - irq = platform_get_irq_byname(pdev, "ch22"); - } else { - irq = platform_get_irq(pdev, 0); - } - if (irq < 0) { - error = irq; - goto out_release; - } - ndev->irq = irq; - SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); @@ -2706,10 +2913,43 @@ static int ravb_probe(struct platform_device *pdev) priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; } + error = ravb_setup_irqs(priv); + if (error) + goto out_reset_assert; + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + error = PTR_ERR(priv->clk); + goto out_reset_assert; + } + + if (info->gptp_ref_clk) { + priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); + if (IS_ERR(priv->gptp_clk)) { + error = PTR_ERR(priv->gptp_clk); + goto out_reset_assert; + } + } + + priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); + if (IS_ERR(priv->refclk)) { + error = PTR_ERR(priv->refclk); + goto out_reset_assert; + } + clk_prepare(priv->refclk); + + platform_set_drvdata(pdev, ndev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 100); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + error = pm_runtime_resume_and_get(&pdev->dev); + if (error < 0) + goto out_rpm_disable; + priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(priv->addr)) { error = PTR_ERR(priv->addr); - goto out_release; + goto out_rpm_put; } /* The Ether-specific entries in the device structure. */ @@ -2720,78 +2960,12 @@ static int ravb_probe(struct platform_device *pdev) error = of_get_phy_mode(np, &priv->phy_interface); if (error && error != -ENODEV) - goto out_release; + goto out_rpm_put; priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); priv->avb_link_active_low = of_property_read_bool(np, "renesas,ether-link-active-low"); - if (info->multi_irqs) { - if (info->err_mgmt_irqs) - irq = platform_get_irq_byname(pdev, "line3"); - else - irq = platform_get_irq_byname(pdev, "ch24"); - if (irq < 0) { - error = irq; - goto out_release; - } - priv->emac_irq = irq; - for (i = 0; i < NUM_RX_QUEUE; i++) { - irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); - if (irq < 0) { - error = irq; - goto out_release; - } - priv->rx_irqs[i] = irq; - } - for (i = 0; i < NUM_TX_QUEUE; i++) { - irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); - if (irq < 0) { - error = irq; - goto out_release; - } - priv->tx_irqs[i] = irq; - } - - if (info->err_mgmt_irqs) { - irq = platform_get_irq_byname(pdev, "err_a"); - if (irq < 0) { - error = irq; - goto out_release; - } - priv->erra_irq = irq; - - irq = platform_get_irq_byname(pdev, "mgmt_a"); - if (irq < 0) { - error = irq; - goto out_release; - } - priv->mgmta_irq = irq; - } - } - - priv->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) { - error = PTR_ERR(priv->clk); - goto out_release; - } - - priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); - if (IS_ERR(priv->refclk)) { - error = PTR_ERR(priv->refclk); - goto out_release; - } - clk_prepare_enable(priv->refclk); - - if (info->gptp_ref_clk) { - priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); - if (IS_ERR(priv->gptp_clk)) { - error = PTR_ERR(priv->gptp_clk); - goto out_disable_refclk; - } - clk_prepare_enable(priv->gptp_clk); - } - ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; @@ -2806,25 +2980,11 @@ static int ravb_probe(struct platform_device *pdev) ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; - /* Set AVB config mode */ - error = ravb_set_config_mode(ndev); + error = ravb_compute_gti(ndev); if (error) - goto out_disable_gptp_clk; + goto out_rpm_put; - if (info->gptp || info->ccc_gac) { - /* Set GTI value */ - error = ravb_set_gti(ndev); - if (error) - goto out_disable_gptp_clk; - - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - } - - if (info->internal_delay) { - ravb_parse_delay_mode(np, ndev); - ravb_set_delay_mode(ndev); - } + ravb_parse_delay_mode(np, ndev); /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; @@ -2835,22 +2995,22 @@ static int ravb_probe(struct platform_device *pdev) "Cannot allocate desc base address table (size %d bytes)\n", priv->desc_bat_size); error = -ENOMEM; - goto out_disable_gptp_clk; + goto out_rpm_put; } for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) priv->desc_bat[q].die_dt = DT_EOS; - ravb_write(ndev, priv->desc_bat_dma, DBAT); /* Initialise HW timestamp list */ INIT_LIST_HEAD(&priv->ts_skb_list); - /* Initialise PTP Clock driver */ - if (info->ccc_gac) - ravb_ptp_init(ndev, pdev); - /* Debug message level */ priv->msg_enable = RAVB_DEF_MSG_ENABLE; + /* Set config mode as this is needed for PHY initialization. */ + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + goto out_rpm_put; + /* Read and set MAC address */ ravb_read_mac_address(np, ndev); if (!is_valid_ether_addr(ndev->dev_addr)) { @@ -2863,9 +3023,14 @@ static int ravb_probe(struct platform_device *pdev) error = ravb_mdio_init(priv); if (error) { dev_err(&pdev->dev, "failed to initialize MDIO\n"); - goto out_dma_free; + goto out_reset_mode; } + /* Undo previous switch to config opmode. */ + error = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (error) + goto out_mdio_release; + netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); if (info->nc_queues) netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); @@ -2881,7 +3046,8 @@ static int ravb_probe(struct platform_device *pdev) netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); - platform_set_drvdata(pdev, ndev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); return 0; @@ -2890,22 +3056,19 @@ out_napi_del: netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); +out_mdio_release: ravb_mdio_release(priv); -out_dma_free: +out_reset_mode: + ravb_set_opmode(ndev, CCC_OPC_RESET); dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); - - /* Stop PTP Clock driver */ - if (info->ccc_gac) - ravb_ptp_stop(ndev); -out_disable_gptp_clk: - clk_disable_unprepare(priv->gptp_clk); -out_disable_refclk: - clk_disable_unprepare(priv->refclk); -out_release: +out_rpm_put: pm_runtime_put(&pdev->dev); out_rpm_disable: pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); + clk_unprepare(priv->refclk); +out_reset_assert: reset_control_assert(rstc); out_free_netdev: free_netdev(ndev); @@ -2917,6 +3080,12 @@ static void ravb_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + struct device *dev = &priv->pdev->dev; + int error; + + error = pm_runtime_resume_and_get(dev); + if (error < 0) + return; unregister_netdev(ndev); if (info->nc_queues) @@ -2925,20 +3094,13 @@ static void ravb_remove(struct platform_device *pdev) ravb_mdio_release(priv); - /* Stop PTP Clock driver */ - if (info->ccc_gac) - ravb_ptp_stop(ndev); - dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); - ravb_set_opmode(ndev, CCC_OPC_RESET); - - clk_disable_unprepare(priv->gptp_clk); - clk_disable_unprepare(priv->refclk); - - pm_runtime_put_sync(&pdev->dev); + pm_runtime_put_sync_suspend(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(dev); + clk_unprepare(priv->refclk); reset_control_assert(priv->rstc); free_netdev(ndev); platform_set_drvdata(pdev, NULL); @@ -2964,6 +3126,9 @@ static int ravb_wol_setup(struct net_device *ndev) /* Enable MagicPacket */ ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + if (priv->info->ccc_gac) + ravb_ptp_stop(ndev); + return enable_irq_wake(priv->emac_irq); } @@ -2971,6 +3136,20 @@ static int ravb_wol_restore(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; + + /* Set reset mode to rearm the WoL logic. */ + error = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (error) + return error; + + /* Set AVB config mode. */ + error = ravb_set_config_mode(ndev); + if (error) + return error; + + if (priv->info->ccc_gac) + ravb_ptp_init(ndev, priv->pdev); if (info->nc_queues) napi_enable(&priv->napi[RAVB_NC]); @@ -2984,102 +3163,96 @@ static int ravb_wol_restore(struct net_device *ndev) return disable_irq_wake(priv->emac_irq); } -static int __maybe_unused ravb_suspend(struct device *dev) +static int ravb_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ravb_private *priv = netdev_priv(ndev); int ret; if (!netif_running(ndev)) - return 0; + goto reset_assert; netif_device_detach(ndev); if (priv->wol_enabled) - ret = ravb_wol_setup(ndev); - else - ret = ravb_close(ndev); + return ravb_wol_setup(ndev); - if (priv->info->ccc_gac) - ravb_ptp_stop(ndev); + ret = ravb_close(ndev); + if (ret) + return ret; - return ret; + ret = pm_runtime_force_suspend(&priv->pdev->dev); + if (ret) + return ret; + +reset_assert: + return reset_control_assert(priv->rstc); } -static int __maybe_unused ravb_resume(struct device *dev) +static int ravb_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ravb_private *priv = netdev_priv(ndev); - const struct ravb_hw_info *info = priv->info; - int ret = 0; - - /* If WoL is enabled set reset mode to rearm the WoL logic */ - if (priv->wol_enabled) { - ret = ravb_set_opmode(ndev, CCC_OPC_RESET); - if (ret) - return ret; - } - - /* All register have been reset to default values. - * Restore all registers which where setup at probe time and - * reopen device if it was running before system suspended. - */ + int ret; - /* Set AVB config mode */ - ret = ravb_set_config_mode(ndev); + ret = reset_control_deassert(priv->rstc); if (ret) return ret; - if (info->gptp || info->ccc_gac) { - /* Set GTI value */ - ret = ravb_set_gti(ndev); + if (!netif_running(ndev)) + return 0; + + /* If WoL is enabled restore the interface. */ + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } else { + ret = pm_runtime_force_resume(dev); if (ret) return ret; - - /* Request GTI loading */ - ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); } - if (info->internal_delay) - ravb_set_delay_mode(ndev); + /* Reopening the interface will restore the device to the working state. */ + ret = ravb_open(ndev); + if (ret < 0) + goto out_rpm_put; - /* Restore descriptor base address table */ - ravb_write(ndev, priv->desc_bat_dma, DBAT); + ravb_set_rx_mode(ndev); + netif_device_attach(ndev); - if (priv->info->ccc_gac) - ravb_ptp_init(ndev, priv->pdev); + return 0; - if (netif_running(ndev)) { - if (priv->wol_enabled) { - ret = ravb_wol_restore(ndev); - if (ret) - return ret; - } - ret = ravb_open(ndev); - if (ret < 0) - return ret; - ravb_set_rx_mode(ndev); - netif_device_attach(ndev); +out_rpm_put: + if (!priv->wol_enabled) { + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } return ret; } -static int __maybe_unused ravb_runtime_nop(struct device *dev) +static int ravb_runtime_suspend(struct device *dev) { - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + + clk_disable(priv->refclk); + return 0; } +static int ravb_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct ravb_private *priv = netdev_priv(ndev); + + return clk_enable(priv->refclk); +} + static const struct dev_pm_ops ravb_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) - SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) + SYSTEM_SLEEP_PM_OPS(ravb_suspend, ravb_resume) + RUNTIME_PM_OPS(ravb_runtime_suspend, ravb_runtime_resume, NULL) }; static struct platform_driver ravb_driver = { @@ -3087,7 +3260,7 @@ static struct platform_driver ravb_driver = { .remove_new = ravb_remove, .driver = { .name = "ravb", - .pm = &ravb_dev_pm_ops, + .pm = pm_ptr(&ravb_dev_pm_ops), .of_match_table = ravb_match_table, }, }; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 9e59669a93dd..755db89db909 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -32,7 +32,6 @@ #include <net/fib_rules.h> #include <net/fib_notifier.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include <generated/utsrelease.h> #include "rocker_hw.h" #include "rocker.h" @@ -2227,7 +2226,6 @@ static void rocker_port_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { strscpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); - strscpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); } static struct rocker_port_stats { diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index d14e0cfc3a6b..1458939c3bf5 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -503,7 +503,6 @@ struct sxgbe_priv_data { bool tx_path_in_lpi_mode; int lpi_irq; int eee_enabled; - int eee_active; int tx_lpi_timer; }; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 8ba017ec9849..4a439b34114d 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -133,22 +133,20 @@ static const struct sxgbe_stats sxgbe_gstrings_stats[] = { #define SXGBE_STATS_LEN ARRAY_SIZE(sxgbe_gstrings_stats) static int sxgbe_get_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct sxgbe_priv_data *priv = netdev_priv(dev); if (!priv->hw_cap.eee) return -EOPNOTSUPP; - edata->eee_enabled = priv->eee_enabled; - edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer; return phy_ethtool_get_eee(dev->phydev, edata); } static int sxgbe_set_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct sxgbe_priv_data *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 71439825ea4e..ecbe3994f2b1 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -130,7 +130,6 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) if (phy_init_eee(ndev->phydev, true)) return false; - priv->eee_active = 1; timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0); priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); add_timer(&priv->eee_ctrl_timer); diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 175bd9cdfdac..551f890db90a 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -595,7 +595,7 @@ void efx_stop_all(struct efx_nic *efx) efx_stop_datapath(efx); } -/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +/* Context: process, rcu_read_lock or RTNL held, non-blocking. */ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { struct efx_nic *efx = efx_netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index e001f27085c6..1cb32aedd89c 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2085,7 +2085,7 @@ int ef4_net_stop(struct net_device *net_dev) return 0; } -/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +/* Context: process, rcu_read_lock or RTNL held, non-blocking. */ static void ef4_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c index e4b294b8e9ac..88e5bc347a44 100644 --- a/drivers/net/ethernet/sfc/siena/efx_common.c +++ b/drivers/net/ethernet/sfc/siena/efx_common.c @@ -605,7 +605,7 @@ static size_t efx_siena_update_stats_atomic(struct efx_nic *efx, u64 *full_stats return efx->type->update_stats(efx, full_stats, core_stats); } -/* Context: process, dev_base_lock or RTNL held, non-blocking. */ +/* Context: process, rcu_read_lock or RTNL held, non-blocking. */ void efx_siena_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) { diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 85dcda51df05..4ec61f1ee71a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -165,9 +165,9 @@ config DWMAC_STARFIVE help Support for ethernet controllers on StarFive RISC-V SoCs - This selects the StarFive platform specific glue layer support for - the stmmac device driver. This driver is used for StarFive JH7110 - ethernet controller. + This selects the StarFive platform specific glue layer support + for the stmmac device driver. This driver is used for the + StarFive JH7100 and JH7110 ethernet controllers. config DWMAC_STI tristate "STi GMAC support" diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 721c1f8e892f..a6fefe675ef1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -59,28 +59,51 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_q_tx_stats { + u64_stats_t tx_bytes; + u64_stats_t tx_set_ic_bit; + u64_stats_t tx_tso_frames; + u64_stats_t tx_tso_nfrags; +}; + +struct stmmac_napi_tx_stats { + u64_stats_t tx_packets; + u64_stats_t tx_pkt_n; + u64_stats_t poll; + u64_stats_t tx_clean; + u64_stats_t tx_set_ic_bit; +}; + struct stmmac_txq_stats { - u64 tx_bytes; - u64 tx_packets; - u64 tx_pkt_n; - u64 tx_normal_irq_n; - u64 napi_poll; - u64 tx_clean; - u64 tx_set_ic_bit; - u64 tx_tso_frames; - u64 tx_tso_nfrags; - struct u64_stats_sync syncp; + /* Updates protected by tx queue lock. */ + struct u64_stats_sync q_syncp; + struct stmmac_q_tx_stats q; + + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_tx_stats napi; } ____cacheline_aligned_in_smp; +struct stmmac_napi_rx_stats { + u64_stats_t rx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_pkt_n; + u64_stats_t poll; +}; + struct stmmac_rxq_stats { - u64 rx_bytes; - u64 rx_packets; - u64 rx_pkt_n; - u64 rx_normal_irq_n; - u64 napi_poll; - struct u64_stats_sync syncp; + /* Updates protected by NAPI poll logic. */ + struct u64_stats_sync napi_syncp; + struct stmmac_napi_rx_stats napi; } ____cacheline_aligned_in_smp; +/* Updates on each CPU protected by not allowing nested irqs. */ +struct stmmac_pcpu_stats { + struct u64_stats_sync syncp; + u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -202,9 +225,12 @@ struct stmmac_extra_stats { unsigned long mtl_est_hlbf; unsigned long mtl_est_btre; unsigned long mtl_est_btrlm; + unsigned long max_sdu_txq_drop[MTL_MAX_TX_QUEUES]; + unsigned long mtl_est_txq_hlbf[MTL_MAX_TX_QUEUES]; /* per queue statistics */ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; + struct stmmac_pcpu_stats __percpu *pcpu_stats; unsigned long rx_dropped; unsigned long rx_errors; unsigned long tx_dropped; @@ -216,6 +242,7 @@ struct stmmac_safety_stats { unsigned long mac_errors[32]; unsigned long mtl_errors[32]; unsigned long dma_errors[32]; + unsigned long dma_dpp_errors[32]; }; /* Number of fields in Safety Stats */ @@ -344,6 +371,7 @@ enum request_irq_err { REQ_IRQ_ERR_ALL, REQ_IRQ_ERR_TX, REQ_IRQ_ERR_RX, + REQ_IRQ_ERR_SFTY, REQ_IRQ_ERR_SFTY_UE, REQ_IRQ_ERR_SFTY_CE, REQ_IRQ_ERR_LPI, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 8f730ada71f9..6b65420e11b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -353,6 +353,10 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) plat_dat->flags |= STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY; + /* Default TX Q0 to use TSO and rest TXQ for TBS */ + for (int i = 1; i < plat_dat->tx_queues_to_use; i++) + plat_dat->tx_queues_cfg[i].tbs_en = 1; + plat_dat->host_dma_width = dwmac->ops->addr_width; plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 31631e3f89d0..e254b21fdb59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -106,6 +106,7 @@ struct qcom_ethqos { struct clk *link_clk; struct phy *serdes_phy; unsigned int speed; + int serdes_speed; phy_interface_t phy_mode; const struct ethqos_emac_por *por; @@ -169,6 +170,9 @@ static void rgmii_dump(void *priv) static void ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) { + if (!phy_interface_mode_is_rgmii(ethqos->phy_mode)) + return; + switch (speed) { case SPEED_1000: ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ; @@ -606,19 +610,39 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) */ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) { + struct net_device *dev = platform_get_drvdata(ethqos->pdev); + struct stmmac_priv *priv = netdev_priv(dev); int val; val = readl(ethqos->mac_base + MAC_CTRL_REG); switch (ethqos->speed) { + case SPEED_2500: + val &= ~ETHQOS_MAC_CTRL_PORT_SEL; + rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, + RGMII_CONFIG2_RGMII_CLK_SEL_CFG, + RGMII_IO_MACRO_CONFIG2); + if (ethqos->serdes_speed != SPEED_2500) + phy_set_speed(ethqos->serdes_phy, SPEED_2500); + ethqos->serdes_speed = SPEED_2500; + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0); + break; case SPEED_1000: val &= ~ETHQOS_MAC_CTRL_PORT_SEL; rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); + if (ethqos->serdes_speed != SPEED_1000) + phy_set_speed(ethqos->serdes_phy, SPEED_1000); + ethqos->serdes_speed = SPEED_1000; + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_100: val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; + if (ethqos->serdes_speed != SPEED_1000) + phy_set_speed(ethqos->serdes_phy, SPEED_1000); + ethqos->serdes_speed = SPEED_1000; + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_10: val |= ETHQOS_MAC_CTRL_PORT_SEL; @@ -627,6 +651,10 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, SGMII_10M_RX_CLK_DVDR), RGMII_IO_MACRO_CONFIG); + if (ethqos->serdes_speed != SPEED_1000) + phy_set_speed(ethqos->serdes_phy, ethqos->speed); + ethqos->serdes_speed = SPEED_1000; + stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; } @@ -728,7 +756,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) struct stmmac_resources stmmac_res; struct device *dev = &pdev->dev; struct qcom_ethqos *ethqos; - int ret; + int ret, i; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -799,6 +827,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) "Failed to get serdes phy\n"); ethqos->speed = SPEED_1000; + ethqos->serdes_speed = SPEED_1000; ethqos_update_link_clk(ethqos, SPEED_1000); ethqos_set_func_clk_en(ethqos); @@ -822,6 +851,10 @@ static int qcom_ethqos_probe(struct platform_device *pdev) plat_dat->serdes_powerdown = qcom_ethqos_serdes_powerdown; } + /* Enable TSO on queue0 and enable TBS on rest of the queues */ + for (i = 1; i < plat_dat->tx_queues_to_use; i++) + plat_dat->tx_queues_cfg[i].tbs_en = 1; + return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 5d630affb4d1..4e1076faee0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -15,13 +15,20 @@ #include "stmmac_platform.h" -#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1 -#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4 -#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U +#define STARFIVE_DWMAC_PHY_INFT_RGMII 0x1 +#define STARFIVE_DWMAC_PHY_INFT_RMII 0x4 +#define STARFIVE_DWMAC_PHY_INFT_FIELD 0x7U + +#define JH7100_SYSMAIN_REGISTER49_DLYCHAIN 0xc8 + +struct starfive_dwmac_data { + unsigned int gtxclk_dlychain; +}; struct starfive_dwmac { struct device *dev; struct clk *clk_tx; + const struct starfive_dwmac_data *data; }; static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) @@ -67,6 +74,8 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: mode = STARFIVE_DWMAC_PHY_INFT_RGMII; break; @@ -89,6 +98,14 @@ static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) if (err) return dev_err_probe(dwmac->dev, err, "error setting phy mode\n"); + if (dwmac->data) { + err = regmap_write(regmap, JH7100_SYSMAIN_REGISTER49_DLYCHAIN, + dwmac->data->gtxclk_dlychain); + if (err) + return dev_err_probe(dwmac->dev, err, + "error selecting gtxclk delay chain\n"); + } + return 0; } @@ -114,6 +131,8 @@ static int starfive_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; + dwmac->data = device_get_match_data(&pdev->dev); + dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx"); if (IS_ERR(dwmac->clk_tx)) return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx), @@ -144,8 +163,13 @@ static int starfive_dwmac_probe(struct platform_device *pdev) return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } +static const struct starfive_dwmac_data jh7100_data = { + .gtxclk_dlychain = 4, +}; + static const struct of_device_id starfive_dwmac_match[] = { - { .compatible = "starfive,jh7110-dwmac" }, + { .compatible = "starfive,jh7100-dwmac", .data = &jh7100_data }, + { .compatible = "starfive,jh7110-dwmac" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, starfive_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 137741b94122..b21d99faa2d0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -441,8 +441,7 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; u32 v; @@ -455,9 +454,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_TX_INT) { ret |= handle_tx; - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_TX_DMA_STOP_INT) @@ -479,9 +478,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv, if (v & EMAC_RX_INT) { ret |= handle_rx; - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); } if (v & EMAC_RX_BUF_UA_INT) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h index 358e7dcb6a9a..17d9120db5fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -92,7 +92,7 @@ #define DMA_TBS_FTOV BIT(0) #define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV) -/* Following DMA defines are chanels oriented */ +/* Following DMA defines are channel-oriented */ #define DMA_CHAN_BASE_ADDR 0x00001100 #define DMA_CHAN_BASE_OFFSET 0x80 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9470d3fd2ded..0d185e54eb7e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -171,8 +171,7 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs; u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan)); u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan)); - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; if (dir == DMA_DIR_RX) @@ -201,15 +200,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, } /* TX/RX NORMAL interrupts */ if (likely(intr_status & DMA_CHAN_STATUS_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & DMA_CHAN_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 7907d62d3437..85e18f9a22f9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -162,8 +162,7 @@ static void show_rx_process_state(unsigned int status) int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); int ret = 0; /* read the status register (CSR5) */ u32 intr_status = readl(ioaddr + DMA_STATUS); @@ -215,16 +214,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr, u32 value = readl(ioaddr + DMA_INTR_ENA); /* to schedule NAPI on real RIE event. */ if (likely(value & DMA_INTR_ENA_RIE)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } } if (likely(intr_status & DMA_STATUS_TI)) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } if (unlikely(intr_status & DMA_STATUS_ERI)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 207ff1799f2c..6a2c7d22df1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -303,6 +303,8 @@ #define XGMAC_RXCEIE BIT(4) #define XGMAC_TXCEIE BIT(0) #define XGMAC_MTL_ECC_INT_STATUS 0x000010cc +#define XGMAC_MTL_DPP_CONTROL 0x000010e0 +#define XGMAC_DPP_DISABLE BIT(0) #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 @@ -385,6 +387,7 @@ #define XGMAC_DCEIE BIT(1) #define XGMAC_TCEIE BIT(0) #define XGMAC_DMA_ECC_INT_STATUS 0x0000306c +#define XGMAC_DMA_DPP_INT_STATUS 0x00003074 #define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x))) #define XGMAC_SPH BIT(24) #define XGMAC_PBLx8 BIT(16) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index eb48211d9b0e..1af2f89a0504 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -830,6 +830,44 @@ static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= { { false, "UNKNOWN", "Unknown Error" }, /* 31 */ }; +#define DPP_RX_ERR "Read Rx Descriptor Parity checker Error" +#define DPP_TX_ERR "Read Tx Descriptor Parity checker Error" + +static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = { + { true, "TDPES0", DPP_TX_ERR }, + { true, "TDPES1", DPP_TX_ERR }, + { true, "TDPES2", DPP_TX_ERR }, + { true, "TDPES3", DPP_TX_ERR }, + { true, "TDPES4", DPP_TX_ERR }, + { true, "TDPES5", DPP_TX_ERR }, + { true, "TDPES6", DPP_TX_ERR }, + { true, "TDPES7", DPP_TX_ERR }, + { true, "TDPES8", DPP_TX_ERR }, + { true, "TDPES9", DPP_TX_ERR }, + { true, "TDPES10", DPP_TX_ERR }, + { true, "TDPES11", DPP_TX_ERR }, + { true, "TDPES12", DPP_TX_ERR }, + { true, "TDPES13", DPP_TX_ERR }, + { true, "TDPES14", DPP_TX_ERR }, + { true, "TDPES15", DPP_TX_ERR }, + { true, "RDPES0", DPP_RX_ERR }, + { true, "RDPES1", DPP_RX_ERR }, + { true, "RDPES2", DPP_RX_ERR }, + { true, "RDPES3", DPP_RX_ERR }, + { true, "RDPES4", DPP_RX_ERR }, + { true, "RDPES5", DPP_RX_ERR }, + { true, "RDPES6", DPP_RX_ERR }, + { true, "RDPES7", DPP_RX_ERR }, + { true, "RDPES8", DPP_RX_ERR }, + { true, "RDPES9", DPP_RX_ERR }, + { true, "RDPES10", DPP_RX_ERR }, + { true, "RDPES11", DPP_RX_ERR }, + { true, "RDPES12", DPP_RX_ERR }, + { true, "RDPES13", DPP_RX_ERR }, + { true, "RDPES14", DPP_RX_ERR }, + { true, "RDPES15", DPP_RX_ERR }, +}; + static void dwxgmac3_handle_dma_err(struct net_device *ndev, void __iomem *ioaddr, bool correctable, struct stmmac_safety_stats *stats) @@ -841,6 +879,13 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev, dwxgmac3_log_error(ndev, value, correctable, "DMA", dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats); + + value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS); + writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS); + + dwxgmac3_log_error(ndev, value, false, "DMA_DPP", + dwxgmac3_dma_dpp_errors, + STAT_OFF(dma_dpp_errors), stats); } static int @@ -881,6 +926,12 @@ dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp, value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL); + /* 5. Enable Data Path Parity Protection */ + value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL); + /* already enabled by default, explicit enable it again */ + value &= ~XGMAC_DPP_DISABLE; + writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL); + return 0; } @@ -914,7 +965,11 @@ static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev, ret |= !corr; } - err = dma & (XGMAC_DEUIS | XGMAC_DECIS); + /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in + * DMA_Safety_Interrupt_Status, so we handle DMA Data Path + * Parity Errors here + */ + err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS); corr = dma & XGMAC_DECIS; if (err) { dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats); @@ -930,6 +985,7 @@ static const struct dwxgmac3_error { { dwxgmac3_mac_errors }, { dwxgmac3_mtl_errors }, { dwxgmac3_dma_errors }, + { dwxgmac3_dma_dpp_errors }, }; static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 3cde695fec91..dd2ab6185c40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -337,8 +337,7 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, struct stmmac_extra_stats *x, u32 chan, u32 dir) { - struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan]; - struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan]; + struct stmmac_pcpu_stats *stats = this_cpu_ptr(priv->xstats.pcpu_stats); u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan)); u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan)); int ret = 0; @@ -367,15 +366,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, /* TX/RX NORMAL interrupts */ if (likely(intr_status & XGMAC_NIS)) { if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&rxq_stats->syncp); - rxq_stats->rx_normal_irq_n++; - u64_stats_update_end(&rxq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_rx; } if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&txq_stats->syncp); - txq_stats->tx_normal_irq_n++; - u64_stats_update_end(&txq_stats->syncp); + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); ret |= handle_tx; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 1bd34b2a47e8..29367105df54 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -224,7 +224,7 @@ static const struct stmmac_hwif_entry { .regs = { .ptp_off = PTP_GMAC4_OFFSET, .mmc_off = MMC_GMAC4_OFFSET, - .est_off = EST_XGMAC_OFFSET, + .est_off = EST_GMAC4_OFFSET, }, .desc = &dwmac4_desc_ops, .dma = &dwmac410_dma_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 14c9d2637dfe..dff02d75d519 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -86,10 +86,6 @@ struct stmmac_counters { unsigned int mmc_rx_discard_octets_gb; unsigned int mmc_rx_align_err_frames; - /* IPC */ - unsigned int mmc_rx_ipc_intr_mask; - unsigned int mmc_rx_ipc_intr; - /* IPv4 */ unsigned int mmc_rx_ipv4_gd; unsigned int mmc_rx_ipv4_hderr; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 8597c6abae8d..7eb477faa75a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -316,9 +316,6 @@ static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW); mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB); mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR); - /* IPC */ - mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK); - mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR); /* IPv4 */ mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD); mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f155e4841c62..dddcaa9220cc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -31,6 +31,7 @@ struct stmmac_resources { int wol_irq; int lpi_irq; int irq; + int sfty_irq; int sfty_ce_irq; int sfty_ue_irq; int rx_irq[MTL_MAX_RX_QUEUES]; @@ -298,6 +299,7 @@ struct stmmac_priv { void __iomem *ptpaddr; void __iomem *estaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + int sfty_irq; int sfty_ce_irq; int sfty_ue_irq; int rx_irq[MTL_MAX_RX_QUEUES]; @@ -306,6 +308,7 @@ struct stmmac_priv { char int_name_mac[IFNAMSIZ + 9]; char int_name_wol[IFNAMSIZ + 9]; char int_name_lpi[IFNAMSIZ + 9]; + char int_name_sfty[IFNAMSIZ + 10]; char int_name_sfty_ce[IFNAMSIZ + 10]; char int_name_sfty_ue[IFNAMSIZ + 10]; char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c index 4da6ccc17c20..c9693f77e1f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_est.c @@ -81,6 +81,7 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, u32 status, value, feqn, hbfq, hbfs, btrl, btrl_max; void __iomem *est_addr = priv->estaddr; u32 txqcnt_mask = BIT(txqcnt) - 1; + int i; status = readl(est_addr + EST_STATUS); @@ -125,6 +126,11 @@ static void est_irq_status(struct stmmac_priv *priv, struct net_device *dev, x->mtl_est_hlbf++; + for (i = 0; i < txqcnt; i++) { + if (feqn & BIT(i)) + x->mtl_est_txq_hlbf[i]++; + } + /* Clear Interrupt */ writel(feqn, est_addr + EST_FRM_SZ_ERR); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 42d27b97dd1d..e1537a57815f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -243,8 +243,6 @@ static const struct stmmac_stats stmmac_mmc[] = { STMMAC_MMC_STAT(mmc_rx_discard_frames_gb), STMMAC_MMC_STAT(mmc_rx_discard_octets_gb), STMMAC_MMC_STAT(mmc_rx_align_err_frames), - STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask), - STMMAC_MMC_STAT(mmc_rx_ipc_intr), STMMAC_MMC_STAT(mmc_rx_ipv4_gd), STMMAC_MMC_STAT(mmc_rx_ipv4_hderr), STMMAC_MMC_STAT(mmc_rx_ipv4_nopay), @@ -549,44 +547,79 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static u64 stmmac_get_rx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->rx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + +static u64 stmmac_get_tx_normal_irq_n(struct stmmac_priv *priv, int q) +{ + u64 total; + int cpu; + + total = 0; + for_each_possible_cpu(cpu) { + struct stmmac_pcpu_stats *pcpu; + unsigned int start; + u64 irq_n; + + pcpu = per_cpu_ptr(priv->xstats.pcpu_stats, cpu); + do { + start = u64_stats_fetch_begin(&pcpu->syncp); + irq_n = u64_stats_read(&pcpu->tx_normal_irq_n[q]); + } while (u64_stats_fetch_retry(&pcpu->syncp, start)); + total += irq_n; + } + return total; +} + static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) { u32 tx_cnt = priv->plat->tx_queues_to_use; u32 rx_cnt = priv->plat->rx_queues_to_use; unsigned int start; - int q, stat; - char *p; + int q; for (q = 0; q < tx_cnt; q++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; - struct stmmac_txq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + pkt_n = u64_stats_read(&txq_stats->napi.tx_pkt_n); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n); - for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_tx_normal_irq_n(priv, q); } for (q = 0; q < rx_cnt; q++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; - struct stmmac_rxq_stats snapshot; + u64 pkt_n; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + pkt_n = u64_stats_read(&rxq_stats->napi.rx_pkt_n); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); - p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n); - for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { - *data++ = (*(u64 *)p); - p += sizeof(u64); - } + *data++ = pkt_n; + *data++ = stmmac_get_rx_normal_irq_n(priv, q); } } @@ -645,39 +678,49 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, pos = j; for (i = 0; i < rx_queues_count; i++) { struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i]; - struct stmmac_rxq_stats snapshot; + struct stmmac_napi_rx_stats snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - snapshot = *rxq_stats; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); - - data[j++] += snapshot.rx_pkt_n; - data[j++] += snapshot.rx_normal_irq_n; - normal_irq_n += snapshot.rx_normal_irq_n; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + snapshot = rxq_stats->napi; + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&snapshot.rx_pkt_n); + n_irq = stmmac_get_rx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + napi_poll += u64_stats_read(&snapshot.poll); } pos = j; for (i = 0; i < tx_queues_count; i++) { struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i]; - struct stmmac_txq_stats snapshot; + struct stmmac_napi_tx_stats napi_snapshot; + struct stmmac_q_tx_stats q_snapshot; + u64 n_irq; j = pos; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - snapshot = *txq_stats; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); - - data[j++] += snapshot.tx_pkt_n; - data[j++] += snapshot.tx_normal_irq_n; - normal_irq_n += snapshot.tx_normal_irq_n; - data[j++] += snapshot.tx_clean; - data[j++] += snapshot.tx_set_ic_bit; - data[j++] += snapshot.tx_tso_frames; - data[j++] += snapshot.tx_tso_nfrags; - napi_poll += snapshot.napi_poll; + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + q_snapshot = txq_stats->q; + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + napi_snapshot = txq_stats->napi; + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); + + data[j++] += u64_stats_read(&napi_snapshot.tx_pkt_n); + n_irq = stmmac_get_tx_normal_irq_n(priv, i); + data[j++] += n_irq; + normal_irq_n += n_irq; + data[j++] += u64_stats_read(&napi_snapshot.tx_clean); + data[j++] += u64_stats_read(&q_snapshot.tx_set_ic_bit) + + u64_stats_read(&napi_snapshot.tx_set_ic_bit); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_frames); + data[j++] += u64_stats_read(&q_snapshot.tx_tso_nfrags); + napi_poll += u64_stats_read(&napi_snapshot.poll); } normal_irq_n += priv->xstats.rx_early_irq; data[j++] = normal_irq_n; @@ -852,15 +895,13 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) } static int stmmac_ethtool_op_get_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); if (!priv->dma_cap.eee) return -EOPNOTSUPP; - edata->eee_enabled = priv->eee_enabled; - edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer; edata->tx_lpi_enabled = priv->tx_lpi_enabled; @@ -868,7 +909,7 @@ static int stmmac_ethtool_op_get_eee(struct net_device *dev, } static int stmmac_ethtool_op_set_eee(struct net_device *dev, - struct ethtool_eee *edata) + struct ethtool_keee *edata) { struct stmmac_priv *priv = netdev_priv(dev); int ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index b334eb16da23..24cd80490d19 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2482,7 +2482,6 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) struct xdp_desc xdp_desc; bool work_done = true; u32 tx_set_ic_bit = 0; - unsigned long flags; /* Avoids TX time-out as we are sharing with slow path */ txq_trans_cond_update(nq); @@ -2507,6 +2506,13 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) if (!xsk_tx_peek_desc(pool, &xdp_desc)) break; + if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdp_desc.len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + continue; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -2566,9 +2572,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); entry = tx_q->cur_tx; } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit += tx_set_ic_bit; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); + u64_stats_update_end(&txq_stats->napi_syncp); if (tx_desc) { stmmac_flush_tx_descriptors(priv, queue); @@ -2616,7 +2622,6 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, unsigned int bytes_compl = 0, pkts_compl = 0; unsigned int entry, xmits = 0, count = 0; u32 tx_packets = 0, tx_errors = 0; - unsigned long flags; __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); @@ -2674,7 +2679,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, } if (skb) { stmmac_get_tx_hwtstamp(priv, p, skb); - } else { + } else if (tx_q->xsk_pool && + xp_tx_metadata_enabled(tx_q->xsk_pool)) { struct stmmac_xsk_tx_complete tx_compl = { .priv = priv, .desc = p, @@ -2782,11 +2788,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, if (tx_q->dirty_tx != tx_q->cur_tx) *pending_packets = true; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_packets += tx_packets; - txq_stats->tx_pkt_n += tx_packets; - txq_stats->tx_clean++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); + u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); + u64_stats_inc(&txq_stats->napi.tx_clean); + u64_stats_update_end(&txq_stats->napi_syncp); priv->xstats.tx_errors += tx_errors; @@ -3592,6 +3598,10 @@ static void stmmac_free_irq(struct net_device *dev, if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) free_irq(priv->wol_irq, dev); fallthrough; + case REQ_IRQ_ERR_SFTY: + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) + free_irq(priv->sfty_irq, dev); + fallthrough; case REQ_IRQ_ERR_WOL: free_irq(dev->irq, dev); fallthrough; @@ -3662,6 +3672,23 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) } } + /* Request the common Safety Feature Correctible/Uncorrectible + * Error line in case of another line is used + */ + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { + int_name = priv->int_name_sfty; + sprintf(int_name, "%s:%s", dev->name, "safety"); + ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, + 0, int_name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: alloc sfty MSI %d (error: %d)\n", + __func__, priv->sfty_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY; + goto irq_error; + } + } + /* Request the Safety Feature Correctible Error line in * case of another line is used */ @@ -3799,6 +3826,21 @@ static int stmmac_request_irq_single(struct net_device *dev) } } + /* Request the common Safety Feature Correctible/Uncorrectible + * Error line in case of another line is used + */ + if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { + ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + netdev_err(priv->dev, + "%s: ERROR: allocating the sfty IRQ %d (%d)\n", + __func__, priv->sfty_irq, ret); + irq_err = REQ_IRQ_ERR_SFTY; + goto irq_error; + } + } + return 0; irq_error: @@ -3932,6 +3974,9 @@ static int __stmmac_open(struct net_device *dev, priv->rx_copybreak = STMMAC_RX_COPYBREAK; buf_sz = dma_conf->dma_buf_sz; + for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) + if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) + dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); stmmac_reset_queues_param(priv); @@ -4004,8 +4049,10 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv) { set_bit(__FPE_REMOVING, &priv->fpe_task_state); - if (priv->fpe_wq) + if (priv->fpe_wq) { destroy_workqueue(priv->fpe_wq); + priv->fpe_wq = NULL; + } netdev_info(priv->dev, "FPE workqueue stop"); } @@ -4210,7 +4257,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; u8 proto_hdr_len, hdr; - unsigned long flags; u32 pay_len, mss; dma_addr_t des; int i; @@ -4375,13 +4421,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; - txq_stats->tx_tso_frames++; - txq_stats->tx_tso_nfrags += nfrags; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); + u64_stats_inc(&txq_stats->q.tx_tso_frames); + u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4480,7 +4526,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) struct stmmac_tx_queue *tx_q; bool has_vlan, set_ic; int entry, first_tx; - unsigned long flags; dma_addr_t des; tx_q = &priv->dma_conf.tx_queue[queue]; @@ -4498,6 +4543,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) return stmmac_tso_xmit(skb, dev); } + if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + skb->len > priv->plat->est->max_sdu[queue]){ + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, @@ -4650,11 +4702,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); } - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_bytes += skb->len; + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_add(&txq_stats->q.tx_bytes, skb->len); if (set_ic) - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); if (priv->sarc_type) stmmac_set_desc_sarc(priv, first, priv->sarc_type); @@ -4715,6 +4767,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dma_map_err: netdev_err(priv->dev, "Tx DMA map failed\n"); +max_sdu_err: dev_kfree_skb(skb); priv->xstats.tx_dropped++; return NETDEV_TX_OK; @@ -4871,6 +4924,13 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) return STMMAC_XDP_CONSUMED; + if (priv->plat->est && priv->plat->est->enable && + priv->plat->est->max_sdu[queue] && + xdpf->len > priv->plat->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + return STMMAC_XDP_CONSUMED; + } + if (likely(priv->extend_desc)) tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); else if (tx_q->tbs & STMMAC_TBS_AVAIL) @@ -4918,12 +4978,11 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, set_ic = false; if (set_ic) { - unsigned long flags; tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, tx_desc); - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->tx_set_ic_bit++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->q_syncp); + u64_stats_inc(&txq_stats->q.tx_set_ic_bit); + u64_stats_update_end(&txq_stats->q_syncp); } stmmac_enable_dma_transmission(priv, priv->ioaddr); @@ -5073,7 +5132,6 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, unsigned int len = xdp->data_end - xdp->data; enum pkt_hash_types hash_type; int coe = priv->hw->rx_csum; - unsigned long flags; struct sk_buff *skb; u32 hash; @@ -5103,10 +5161,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, skb_record_rx_queue(skb, queue); napi_gro_receive(&ch->rxtx_napi, skb); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n++; - rxq_stats->rx_bytes += len; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.rx_pkt_n); + u64_stats_add(&rxq_stats->napi.rx_bytes, len); + u64_stats_update_end(&rxq_stats->napi_syncp); } static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -5188,7 +5246,6 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct bpf_prog *prog; bool failure = false; - unsigned long flags; int xdp_status = 0; int status = 0; @@ -5343,9 +5400,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5383,7 +5440,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) unsigned int desc_size; struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; - unsigned long flags; int xdp_status = 0; int buf_sz; @@ -5643,11 +5699,11 @@ drain_data: stmmac_rx_refill(priv, queue); - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->rx_packets += rx_packets; - rxq_stats->rx_bytes += rx_bytes; - rxq_stats->rx_pkt_n += count; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); + u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); + u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); + u64_stats_update_end(&rxq_stats->napi_syncp); priv->xstats.rx_dropped += rx_dropped; priv->xstats.rx_errors += rx_errors; @@ -5662,13 +5718,12 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget) struct stmmac_priv *priv = ch->priv_data; struct stmmac_rxq_stats *rxq_stats; u32 chan = ch->index; - unsigned long flags; int work_done; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); work_done = stmmac_rx(priv, budget, chan); if (work_done < budget && napi_complete_done(napi, work_done)) { @@ -5690,13 +5745,12 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget) struct stmmac_txq_stats *txq_stats; bool pending_packets = false; u32 chan = ch->index; - unsigned long flags; int work_done; txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets); work_done = min(work_done, budget); @@ -5726,17 +5780,16 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget) struct stmmac_rxq_stats *rxq_stats; struct stmmac_txq_stats *txq_stats; u32 chan = ch->index; - unsigned long flags; rxq_stats = &priv->xstats.rxq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp); - rxq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags); + u64_stats_update_begin(&rxq_stats->napi_syncp); + u64_stats_inc(&rxq_stats->napi.poll); + u64_stats_update_end(&rxq_stats->napi_syncp); txq_stats = &priv->xstats.txq_stats[chan]; - flags = u64_stats_update_begin_irqsave(&txq_stats->syncp); - txq_stats->napi_poll++; - u64_stats_update_end_irqrestore(&txq_stats->syncp, flags); + u64_stats_update_begin(&txq_stats->napi_syncp); + u64_stats_inc(&txq_stats->napi.poll); + u64_stats_update_end(&txq_stats->napi_syncp); tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets); tx_done = min(tx_done, budget); @@ -6011,10 +6064,8 @@ static void stmmac_common_interrupt(struct stmmac_priv *priv) priv->tx_path_in_lpi_mode = false; } - for (queue = 0; queue < queues_count; queue++) { - status = stmmac_host_mtl_irq_status(priv, priv->hw, - queue); - } + for (queue = 0; queue < queues_count; queue++) + stmmac_host_mtl_irq_status(priv, priv->hw, queue); /* PCS link status */ if (priv->hw->pcs && @@ -6049,8 +6100,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; - /* Check if a fatal error happened */ - if (stmmac_safety_feat_interrupt(priv)) + /* Check ASP error if it isn't delivered via an individual IRQ */ + if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) return IRQ_HANDLED; /* To handle Common interrupts */ @@ -6067,11 +6118,6 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6087,11 +6133,6 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct stmmac_priv *priv = netdev_priv(dev); - if (unlikely(!dev)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6113,11 +6154,6 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data) dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -6144,11 +6180,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data) dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]); priv = container_of(dma_conf, struct stmmac_priv, dma_conf); - if (unlikely(!data)) { - netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); - return IRQ_NONE; - } - /* Check if adapter is up */ if (test_bit(STMMAC_DOWN, &priv->state)) return IRQ_HANDLED; @@ -7062,10 +7093,13 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 tx_bytes; do { - start = u64_stats_fetch_begin(&txq_stats->syncp); - tx_packets = txq_stats->tx_packets; - tx_bytes = txq_stats->tx_bytes; - } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + start = u64_stats_fetch_begin(&txq_stats->q_syncp); + tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); + } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); + do { + start = u64_stats_fetch_begin(&txq_stats->napi_syncp); + tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); + } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; @@ -7077,10 +7111,10 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 u64 rx_bytes; do { - start = u64_stats_fetch_begin(&rxq_stats->syncp); - rx_packets = rxq_stats->rx_packets; - rx_bytes = rxq_stats->rx_bytes; - } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); + rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); + rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); + } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -7474,9 +7508,16 @@ int stmmac_dvr_probe(struct device *device, priv->dev = ndev; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) - u64_stats_init(&priv->xstats.rxq_stats[i].syncp); - for (i = 0; i < MTL_MAX_TX_QUEUES; i++) - u64_stats_init(&priv->xstats.txq_stats[i].syncp); + u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); + for (i = 0; i < MTL_MAX_TX_QUEUES; i++) { + u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); + u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); + } + + priv->xstats.pcpu_stats = + devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats); + if (!priv->xstats.pcpu_stats) + return -ENOMEM; stmmac_set_ethtool_ops(ndev); priv->pause = pause; @@ -7489,6 +7530,7 @@ int stmmac_dvr_probe(struct device *device, priv->dev->irq = res->irq; priv->wol_irq = res->wol_irq; priv->lpi_irq = res->lpi_irq; + priv->sfty_irq = res->sfty_irq; priv->sfty_ce_irq = res->sfty_ce_irq; priv->sfty_ue_irq = res->sfty_ue_irq; for (i = 0; i < MTL_MAX_RX_QUEUES; i++) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index aefc121464b5..13a30e6df4c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -110,6 +110,8 @@ static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, /* Enable and restart the Auto-Negotiation */ if (ane) value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN; + else + value &= ~GMAC_AN_CTRL_ANE; /* In case of MAC-2-MAC connection, block is configured to operate * according to MAC conf register. diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 70eadc83ca68..54797edc9b38 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -743,6 +743,14 @@ int stmmac_get_platform_resources(struct platform_device *pdev, dev_info(&pdev->dev, "IRQ eth_lpi not found\n"); } + stmmac_res->sfty_irq = + platform_get_irq_byname_optional(pdev, "sfty"); + if (stmmac_res->sfty_irq < 0) { + if (stmmac_res->sfty_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(&pdev->dev, "IRQ sfty not found\n"); + } + stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0); return PTR_ERR_OR_ZERO(stmmac_res->addr); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 26fa33e5ec34..cce00719937d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -915,8 +915,30 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, return time; } -static int tc_setup_taprio(struct stmmac_priv *priv, - struct tc_taprio_qopt_offload *qopt) +static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + struct plat_stmmacenet_data *plat = priv->plat; + u32 num_tc = qopt->mqprio.qopt.num_tc; + u32 offset, count, i, j; + + /* QueueMaxSDU received from the driver corresponds to the Linux traffic + * class. Map queueMaxSDU per Linux traffic class to DWMAC Tx queues. + */ + for (i = 0; i < num_tc; i++) { + if (!qopt->max_sdu[i]) + continue; + + offset = qopt->mqprio.qopt.offset[i]; + count = qopt->mqprio.qopt.count[i]; + + for (j = offset; j < offset + count; j++) + plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN; + } +} + +static int tc_taprio_configure(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; struct plat_stmmacenet_data *plat = priv->plat; @@ -968,8 +990,6 @@ static int tc_setup_taprio(struct stmmac_priv *priv, if (qopt->cmd == TAPRIO_CMD_DESTROY) goto disable; - else if (qopt->cmd != TAPRIO_CMD_REPLACE) - return -EOPNOTSUPP; if (qopt->num_entries >= dep) return -EINVAL; @@ -1045,6 +1065,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->ter = qopt->cycle_time_extension; + tc_taprio_map_maxsdu_txq(priv, qopt); + if (fpe && !priv->dma_cap.fpesel) { mutex_unlock(&priv->plat->est->lock); return -EOPNOTSUPP; @@ -1078,6 +1100,11 @@ disable: priv->plat->est->enable = false; stmmac_est_configure(priv, priv, priv->plat->est, priv->plat->clk_ptp_rate); + /* Reset taprio status */ + for (i = 0; i < priv->plat->tx_queues_to_use; i++) { + priv->xstats.max_sdu_txq_drop[i] = 0; + priv->xstats.mtl_est_txq_hlbf[i] = 0; + } mutex_unlock(&priv->plat->est->lock); } @@ -1095,6 +1122,57 @@ disable: return ret; } +static void tc_taprio_stats(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + u64 window_drops = 0; + int i = 0; + + for (i = 0; i < priv->plat->tx_queues_to_use; i++) + window_drops += priv->xstats.max_sdu_txq_drop[i] + + priv->xstats.mtl_est_txq_hlbf[i]; + qopt->stats.window_drops = window_drops; + + /* Transmission overrun doesn't happen for stmmac, hence always 0 */ + qopt->stats.tx_overruns = 0; +} + +static void tc_taprio_queue_stats(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + struct tc_taprio_qopt_queue_stats *q_stats = &qopt->queue_stats; + int queue = qopt->queue_stats.queue; + + q_stats->stats.window_drops = priv->xstats.max_sdu_txq_drop[queue] + + priv->xstats.mtl_est_txq_hlbf[queue]; + + /* Transmission overrun doesn't happen for stmmac, hence always 0 */ + q_stats->stats.tx_overruns = 0; +} + +static int tc_setup_taprio(struct stmmac_priv *priv, + struct tc_taprio_qopt_offload *qopt) +{ + int err = 0; + + switch (qopt->cmd) { + case TAPRIO_CMD_REPLACE: + case TAPRIO_CMD_DESTROY: + err = tc_taprio_configure(priv, qopt); + break; + case TAPRIO_CMD_STATS: + tc_taprio_stats(priv, qopt); + break; + case TAPRIO_CMD_QUEUE_STATS: + tc_taprio_queue_stats(priv, qopt); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + static int tc_setup_etf(struct stmmac_priv *priv, struct tc_etf_qopt_offload *qopt) { @@ -1126,6 +1204,7 @@ static int tc_query_caps(struct stmmac_priv *priv, return -EOPNOTSUPP; caps->gate_mask_per_txq = true; + caps->supports_queue_max_sdu = true; return 0; } diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index be01450c20dc..1530d13984d4 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -189,6 +189,7 @@ config TI_ICSSG_PRUETH select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER + depends on PTP_1588_CLOCK_OPTIONAL help Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. This subsystem is available starting with the AM65 platform. diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 35fceba01ea4..d6ce2c9f0a8d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -514,14 +514,14 @@ am65_cpsw_set_link_ksettings(struct net_device *ndev, return phylink_ethtool_ksettings_set(salve->phylink, ecmd); } -static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); return phylink_ethtool_get_eee(salve->phylink, edata); } -static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ea85c6dd5484..c0a5abd8d9a8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) } } + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index a557a477d039..f7b283353ba2 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -422,7 +422,7 @@ int cpsw_set_link_ksettings(struct net_device *ndev, return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd); } -int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; @@ -434,7 +434,7 @@ int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) return -EOPNOTSUPP; } -int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 498c50c6d1a7..087dcb67505a 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -773,6 +773,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->slave_num); return; } + + phy->mac_managed_pm = true; + slave->phy = phy; phy_attached_info(slave->phy); diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 0e27c433098d..7efa72502c86 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -496,8 +496,8 @@ int cpsw_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *ecmd); int cpsw_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *ecmd); -int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata); -int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata); +int cpsw_get_eee(struct net_device *ndev, struct ethtool_keee *edata); +int cpsw_set_eee(struct net_device *ndev, struct ethtool_keee *edata); int cpsw_nway_reset(struct net_device *ndev); void cpsw_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ering, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index bcccf43d368b..dbbea9146040 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -638,6 +638,16 @@ static void cpts_calc_mult_shift(struct cpts *cpts) freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC)); } +static void cpts_clk_unregister(void *clk) +{ + clk_hw_unregister_mux(clk); +} + +static void cpts_clk_del_provider(void *np) +{ + of_clk_del_provider(np); +} + static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) { struct device_node *refclk_np; @@ -687,9 +697,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) goto mux_fail; } - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))clk_hw_unregister_mux, - clk_hw); + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_unregister, clk_hw); if (ret) { dev_err(cpts->dev, "add clkmux unreg action %d", ret); goto mux_fail; @@ -699,8 +707,7 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node) if (ret) goto mux_fail; - ret = devm_add_action_or_reset(cpts->dev, - (void(*)(void *))of_clk_del_provider, + ret = devm_add_action_or_reset(cpts->dev, cpts_clk_del_provider, refclk_np); if (ret) { dev_err(cpts->dev, "add clkmux provider unreg action %d", ret); diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index a27ec1dcc8d5..9a7dd7efcf69 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -45,7 +45,7 @@ static int emac_set_link_ksettings(struct net_device *ndev, return phy_ethtool_set_link_ksettings(ndev, ecmd); } -static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +static int emac_get_eee(struct net_device *ndev, struct ethtool_keee *edata) { if (!ndev->phydev) return -EOPNOTSUPP; @@ -53,7 +53,7 @@ static int emac_get_eee(struct net_device *ndev, struct ethtool_eee *edata) return phy_ethtool_get_eee(ndev->phydev, edata); } -static int emac_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +static int emac_set_eee(struct net_device *ndev, struct ethtool_keee *edata) { if (!ndev->phydev) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 411898a4f38c..cf7b73f8f450 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1489,9 +1489,6 @@ static int emac_ndo_stop(struct net_device *ndev) /* Destroying the queued work in ndo_stop() */ cancel_delayed_work_sync(&emac->stats_work); - /* stop PRUs */ - prueth_emac_stop(emac); - if (prueth->emacs_initialized == 1) icss_iep_exit(emac->iep); @@ -1502,7 +1499,6 @@ static int emac_ndo_stop(struct net_device *ndev) free_irq(emac->rx_chns.irq[rx_flow], emac); prueth_ndev_del_tx_napi(emac, emac->tx_ch_num); - prueth_cleanup_tx_chns(emac); prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows); prueth_cleanup_tx_chns(emac); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index d5b75af163d3..5ee8e8980393 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -384,18 +384,18 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) dev_info(ctodev(card), "%s: ERROR status\n", __func__); - descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); - if (!descr->skb) { - descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ - return -ENOMEM; - } descr->hw_regs.dmac_cmd_status = 0; descr->hw_regs.result_size = 0; descr->hw_regs.valid_size = 0; descr->hw_regs.data_error = 0; descr->hw_regs.payload.dev_addr = 0; descr->hw_regs.payload.size = 0; - descr->skb = NULL; + + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); + if (!descr->skb) { + descr->hw_regs.payload.dev_addr = 0; /* tell DMAC don't touch memory */ + return -ENOMEM; + } offset = ((unsigned long)descr->skb->data) & (GELIC_NET_RXBUF_ALIGN - 1); @@ -698,7 +698,7 @@ gelic_card_get_next_tx_descr(struct gelic_card *card) } /** - * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field + * gelic_descr_set_tx_cmdstat - sets the tx descriptor command field * @descr: descriptor structure to fill out * @skb: packet to consider * @@ -1461,7 +1461,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev, } /** - * gelic_ether_setup_netdev - initialization of net_device + * gelic_net_setup_netdev - initialization of net_device * @netdev: net_device structure * @card: card structure * @@ -1518,14 +1518,16 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) return 0; } +#define GELIC_ALIGN (32) + /** * gelic_alloc_card_net - allocates net_device and card structure + * @netdev: interface device structure * * returns the card structure or NULL in case of errors * * the card and net_device structures are linked to each other */ -#define GELIC_ALIGN (32) static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev) { struct gelic_card *card; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 3318b50a5911..f165616f36fe 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -539,8 +539,7 @@ static int w5300_hw_probe(struct platform_device *pdev) eth_hw_addr_random(ndev); } - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, mem); + priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 765aa516aada..940452d0a4d2 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1114,8 +1114,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) ndev->irq = rc; - res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); - lp->base_addr = devm_ioremap_resource(&ofdev->dev, res); + lp->base_addr = devm_platform_get_and_ioremap_resource(ofdev, 0, &res); if (IS_ERR(lp->base_addr)) { rc = PTR_ERR(lp->base_addr); goto error; diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 9f505cf02d96..e9bc38fd2025 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1240,9 +1240,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); SelectPage(0); PutWord(XIRCREG0_TRS, (u_short)pktlen+2); - freespace = GetWord(XIRCREG0_TSO); - okay = freespace & 0x8000; - freespace &= 0x7fff; + freespace = GetWord(XIRCREG0_TSO) & 0x7fff; /* TRS doesn't work - (indeed it is eliminated with sil-rev 1) */ okay = pktlen +2 < freespace; pr_debug("%s: avail. tx space=%u%s\n", |