diff options
Diffstat (limited to 'drivers/net/ethernet/amazon/ena')
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_com.c | 124 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_com.h | 80 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_common_defs.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_eth_com.c | 26 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_eth_com.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_ethtool.c | 85 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 55 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_regs_defs.h | 2 |
11 files changed, 240 insertions, 183 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 8baf847e8622..336742f6e3c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -404,6 +404,10 @@ struct ena_admin_basic_stats { u32 rx_drops_low; u32 rx_drops_high; + + u32 tx_drops_low; + + u32 tx_drops_high; }; struct ena_admin_acq_get_stats_resp { @@ -764,8 +768,8 @@ enum ena_admin_os_type { ENA_ADMIN_OS_DPDK = 3, ENA_ADMIN_OS_FREEBSD = 4, ENA_ADMIN_OS_IPXE = 5, - ENA_ADMIN_OS_ESXI = 6, - ENA_ADMIN_OS_GROUPS_NUM = 6, + ENA_ADMIN_OS_ESXI = 6, + ENA_ADMIN_OS_GROUPS_NUM = 6, }; struct ena_admin_host_info { @@ -809,7 +813,8 @@ struct ena_admin_host_info { u16 reserved; - /* 1 :0 : reserved + /* 0 : reserved + * 1 : rx_offset * 2 : interrupt_moderation * 31:3 : reserved */ @@ -1017,6 +1022,10 @@ struct ena_admin_aenq_keep_alive_desc { u32 rx_drops_low; u32 rx_drops_high; + + u32 tx_drops_low; + + u32 tx_drops_high; }; struct ena_admin_ena_mmio_req_read_less_resp { @@ -1116,6 +1125,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3) #define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8 #define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8) +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1 +#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) @@ -1125,4 +1136,4 @@ struct ena_admin_ena_mmio_req_read_less_resp { /* aenq_link_change_desc */ #define ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK BIT(0) -#endif /*_ENA_ADMIN_H_ */ +#endif /* _ENA_ADMIN_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index a250046b8e18..432f143559a1 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -62,7 +62,9 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 -#define ENA_POLL_MS 5 +#define ENA_MIN_ADMIN_POLL_US 100 + +#define ENA_MAX_ADMIN_POLL_US 5000 /*****************************************************************************/ /*****************************************************************************/ @@ -200,17 +202,17 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, u16 command_id, bool capture) { - if (unlikely(!queue->comp_ctx)) { - pr_err("Completion context is NULL\n"); - return NULL; - } - if (unlikely(command_id >= queue->q_depth)) { pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", command_id, queue->q_depth); return NULL; } + if (unlikely(!queue->comp_ctx)) { + pr_err("Completion context is NULL\n"); + return NULL; + } + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { pr_err("Completion context is occupied\n"); return NULL; @@ -375,7 +377,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, io_sq->bounce_buf_ctrl.next_to_use = 0; size = io_sq->bounce_buf_ctrl.buffer_size * - io_sq->bounce_buf_ctrl.buffers_num; + io_sq->bounce_buf_ctrl.buffers_num; dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); @@ -523,9 +525,6 @@ static int ena_com_comp_status_to_errno(u8 comp_status) if (unlikely(comp_status != 0)) pr_err("admin command failed[%u]\n", comp_status); - if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) - return -EINVAL; - switch (comp_status) { case ENA_ADMIN_SUCCESS: return 0; @@ -540,7 +539,14 @@ static int ena_com_comp_status_to_errno(u8 comp_status) return -EINVAL; } - return 0; + return -EINVAL; +} + +static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) +{ + delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); + delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); + usleep_range(delay_us, 2 * delay_us); } static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, @@ -549,6 +555,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c unsigned long flags = 0; unsigned long timeout; int ret; + u32 exp = 0; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); @@ -572,7 +579,8 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - msleep(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, + admin_queue->ena_dev->ena_min_poll_delay_us); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { @@ -702,8 +710,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, /* The desc list entry size should be whole multiply of 8 * This requirement comes from __iowrite64_copy() */ - pr_err("illegal entry size %d\n", - llq_info->desc_list_entry_size); + pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size); return -EINVAL; } @@ -775,7 +782,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com if (admin_queue->auto_polling) admin_queue->polling = true; } else { - pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n", + pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", comp_ctx->cmd_opcode, comp_ctx->status); } /* Check if shifted to polling mode. @@ -943,12 +950,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, u16 exp_state) { - u32 val, i; + u32 val, exp = 0; + unsigned long timeout_stamp; - /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ - timeout = (timeout * 100) / ENA_POLL_MS; + /* Convert timeout from resolution of 100ms to us resolution. */ + timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout); - for (i = 0; i < timeout; i++) { + while (1) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { @@ -960,10 +968,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, exp_state) return 0; - msleep(ENA_POLL_MS); - } + if (time_is_before_jiffies(timeout_stamp)) + return -ETIME; - return -ETIME; + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); + } } static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, @@ -1067,16 +1076,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - struct ena_admin_get_feat_resp get_resp; - int rc; - rc = ena_com_get_feature_ex(ena_dev, &get_resp, - ENA_ADMIN_RSS_HASH_FUNCTION, - ena_dev->rss.hash_key_dma_addr, - sizeof(ena_dev->rss.hash_key), 0); - if (unlikely(rc)) { + if (!ena_com_check_supported_feature_id(ena_dev, + ENA_ADMIN_RSS_HASH_FUNCTION)) return -EOPNOTSUPP; - } rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), @@ -1290,13 +1293,9 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, u16 intr_delay_resolution) { - /* Initial value of intr_delay_resolution might be 0 */ - u16 prev_intr_delay_resolution = - ena_dev->intr_delay_resolution ? - ena_dev->intr_delay_resolution : - ENA_DEFAULT_INTR_DELAY_RESOLUTION; + u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; - if (!intr_delay_resolution) { + if (unlikely(!intr_delay_resolution)) { pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; } @@ -1450,11 +1449,13 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) { struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; unsigned long flags = 0; + u32 exp = 0; spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); - msleep(ENA_POLL_MS); + ena_delay_exponential_backoff_us(exp++, + ena_dev->ena_min_poll_delay_us); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -1802,6 +1803,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, if (ret) goto error; + admin_queue->ena_dev = ena_dev; admin_queue->running_state = true; return 0; @@ -2009,7 +2011,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) struct ena_admin_aenq_entry *aenq_e; struct ena_admin_aenq_common_desc *aenq_common; struct ena_com_aenq *aenq = &dev->aenq; - unsigned long long timestamp; + u64 timestamp; ena_aenq_handler handler_cb; u16 masked_head, processed = 0; u8 phase; @@ -2027,9 +2029,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) */ dma_rmb(); - timestamp = - (unsigned long long)aenq_common->timestamp_low | - ((unsigned long long)aenq_common->timestamp_high << 32); + timestamp = (u64)aenq_common->timestamp_low | + ((u64)aenq_common->timestamp_high << 32); pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrom, timestamp); @@ -2059,8 +2060,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - writel_relaxed((u32)aenq->head, - dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, @@ -2282,12 +2282,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, enum ena_admin_hash_functions func, const u8 *key, u16 key_len, u32 init_val) { - struct ena_rss *rss = &ena_dev->rss; + struct ena_admin_feature_rss_flow_hash_control *hash_key; struct ena_admin_get_feat_resp get_resp; - struct ena_admin_feature_rss_flow_hash_control *hash_key = - rss->hash_key; + enum ena_admin_hash_functions old_func; + struct ena_rss *rss = &ena_dev->rss; int rc; + hash_key = rss->hash_key; + /* Make sure size is a mult of DWs */ if (unlikely(key_len & 0x3)) return -EINVAL; @@ -2299,7 +2301,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, if (unlikely(rc)) return rc; - if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { + if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { pr_err("Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } @@ -2325,26 +2327,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return -EINVAL; } + old_func = rss->hash_func; rss->hash_func = func; rc = ena_com_set_hash_function(ena_dev); /* Restore the old function */ if (unlikely(rc)) - ena_com_get_hash_function(ena_dev, NULL, NULL); + rss->hash_func = old_func; return rc; } int ena_com_get_hash_function(struct ena_com_dev *ena_dev, - enum ena_admin_hash_functions *func, - u8 *key) + enum ena_admin_hash_functions *func) { struct ena_rss *rss = &ena_dev->rss; struct ena_admin_get_feat_resp get_resp; - struct ena_admin_feature_rss_flow_hash_control *hash_key = - rss->hash_key; int rc; + if (unlikely(!func)) + return -EINVAL; + rc = ena_com_get_feature_ex(ena_dev, &get_resp, ENA_ADMIN_RSS_HASH_FUNCTION, rss->hash_key_dma_addr, @@ -2357,8 +2360,15 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, if (rss->hash_func) rss->hash_func--; - if (func) - *func = rss->hash_func; + *func = rss->hash_func; + + return 0; +} + +int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) +{ + struct ena_admin_feature_rss_flow_hash_control *hash_key = + ena_dev->rss.hash_key; if (key) memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); @@ -2641,10 +2651,10 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size) * ignore this error and have indirection table support only. */ rc = ena_com_hash_key_allocate(ena_dev); - if (unlikely(rc) && rc != -EOPNOTSUPP) - goto err_hash_key; - else if (rc != -EOPNOTSUPP) + if (likely(!rc)) ena_com_hash_key_fill_default_key(ena_dev); + else if (rc != -EOPNOTSUPP) + goto err_hash_key; rc = ena_com_hash_ctrl_init(ena_dev); if (unlikely(rc)) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 469f298199a7..bc187adf54e4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -54,9 +54,9 @@ #undef pr_fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define ENA_MAX_NUM_IO_QUEUES 128U +#define ENA_MAX_NUM_IO_QUEUES 128U /* We need to queues for each IO (on for Tx and one for Rx) */ -#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) +#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES)) #define ENA_MAX_HANDLERS 256 @@ -73,13 +73,15 @@ /*****************************************************************************/ /* ENA adaptive interrupt moderation settings */ -#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 -#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 -#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 +#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64 +#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0 +#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1 -#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF +#define ENA_HASH_KEY_SIZE 40 -#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 +#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF + +#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1 struct ena_llq_configurations { enum ena_admin_llq_header_location llq_header_location; @@ -237,6 +239,7 @@ struct ena_com_stats_admin { struct ena_com_admin_queue { void *q_dmadev; + struct ena_com_dev *ena_dev; spinlock_t q_lock; /* spinlock for the admin queue */ struct ena_comp_ctx *comp_ctx; @@ -349,6 +352,8 @@ struct ena_com_dev { struct ena_intr_moder_entry *intr_moder_tbl; struct ena_com_llq_info llq_info; + + u32 ena_min_poll_delay_us; }; struct ena_com_dev_get_features_ctx { @@ -393,7 +398,7 @@ struct ena_aenq_handlers { */ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev); -/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism +/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism * @ena_dev: ENA communication layer struct * @readless_supported: readless mode (enable/disable) */ @@ -501,18 +506,6 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev); */ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling); -/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode - * @ena_dev: ENA communication layer struct - * - * Get the admin completion mode. - * If polling mode is on, ena_com_execute_admin_command will perform a - * polling on the admin completion queue for the commands completion, - * otherwise it will wait on wait event. - * - * @return state - */ -bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev); - /* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode * @ena_dev: ENA communication layer struct * @polling: Enable/Disable polling mode @@ -527,7 +520,7 @@ void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, /* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler * @ena_dev: ENA communication layer struct * - * This method go over the admin completion queue and wake up all the pending + * This method goes over the admin completion queue and wakes up all the pending * threads that wait on the commands wait event. * * @note: Should be called after MSI-X interrupt. @@ -537,7 +530,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev); /* ena_com_aenq_intr_handler - AENQ interrupt handler * @ena_dev: ENA communication layer struct * - * This method go over the async event notification queue and call the proper + * This method goes over the async event notification queue and calls the proper * aenq handler. */ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); @@ -554,14 +547,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); /* ena_com_wait_for_abort_completion - Wait for admin commands abort. * @ena_dev: ENA communication layer struct * - * This method wait until all the outstanding admin commands will be completed. + * This method waits until all the outstanding admin commands are completed. */ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev); /* ena_com_validate_version - Validate the device parameters * @ena_dev: ENA communication layer struct * - * This method validate the device parameters are the same as the saved + * This method verifies the device parameters are the same as the saved * parameters in ena_dev. * This method is useful after device reset, to validate the device mac address * and the device offloads are the same as before the reset. @@ -695,23 +688,32 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, */ int ena_com_set_hash_function(struct ena_com_dev *ena_dev); -/* ena_com_get_hash_function - Retrieve the hash function and the hash key - * from the device. +/* ena_com_get_hash_function - Retrieve the hash function from the device. * @ena_dev: ENA communication layer struct * @func: hash function - * @key: hash key * - * Retrieve the hash function and the hash key from the device. + * Retrieve the hash function from the device. * - * @note: If the caller called ena_com_fill_hash_function but didn't flash + * @note: If the caller called ena_com_fill_hash_function but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. */ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, - enum ena_admin_hash_functions *func, - u8 *key); + enum ena_admin_hash_functions *func); +/* ena_com_get_hash_key - Retrieve the hash key + * @ena_dev: ENA communication layer struct + * @key: hash key + * + * Retrieve the hash key. + * + * @note: If the caller called ena_com_fill_hash_key but didn't flush + * it to the device, the new configuration will be lost. + * + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key); /* ena_com_fill_hash_ctrl - Fill RSS hash control * @ena_dev: ENA communication layer struct. * @proto: The protocol to configure. @@ -746,7 +748,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev); * * Retrieve the hash control from the device. * - * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash + * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -798,7 +800,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); * * Retrieve the RSS indirection table from the device. * - * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash + * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush * it to the device, the new configuration will be lost. * * @return: 0 on Success and negative value otherwise. @@ -824,14 +826,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, /* ena_com_delete_debug_area - Free the debug area resources. * @ena_dev: ENA communication layer struct * - * Free the allocate debug area. + * Free the allocated debug area. */ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); /* ena_com_delete_host_info - Free the host info resources. * @ena_dev: ENA communication layer struct * - * Free the allocate host info. + * Free the allocated host info. */ void ena_com_delete_host_info(struct ena_com_dev *ena_dev); @@ -872,9 +874,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, * @cmd_completion: command completion return value. * @cmd_comp_size: command completion size. - * Submit an admin command and then wait until the device will return a + * Submit an admin command and then wait until the device returns a * completion. - * The completion will be copyed into cmd_comp. + * The completion will be copied into cmd_comp. * * @return - 0 on success, negative value on failure. */ @@ -937,7 +939,7 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * /* ena_com_config_dev_mode - Configure the placement policy of the device. * @ena_dev: ENA communication layer struct * @llq_features: LLQ feature descriptor, retrieve via - * ena_com_get_dev_attr_feat. + * ena_com_get_dev_attr_feat. * @ena_llq_config: The default driver LLQ parameters configurations */ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, @@ -963,7 +965,7 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d * @intr_reg: interrupt register to update. * @rx_delay_interval: Rx interval in usecs * @tx_delay_interval: Tx interval in usecs - * @unmask: unask enable/disable + * @unmask: unmask enable/disable * * Prepare interrupt update register with the supplied parameters. */ diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h index 23beb7e7ed7b..8a8ded0de9ac 100644 --- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h @@ -45,4 +45,4 @@ struct ena_common_mem_addr { u16 reserved16; }; -#endif /*_ENA_COMMON_H_ */ +#endif /* _ENA_COMMON_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 2845ac277724..ec8ea25e988d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -519,7 +519,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, struct ena_eth_io_rx_cdesc_base *cdesc = NULL; u16 cdesc_idx = 0; u16 nb_hw_desc; - u16 i; + u16 i = 0; WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); @@ -538,13 +538,19 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, return -ENOSPC; } - for (i = 0; i < nb_hw_desc; i++) { + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx); + ena_rx_ctx->pkt_offset = cdesc->offset; + + do { + ena_buf[i].len = cdesc->length; + ena_buf[i].req_id = cdesc->req_id; + + if (++i >= nb_hw_desc) + break; + cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i); - ena_buf->len = cdesc->length; - ena_buf->req_id = cdesc->req_id; - ena_buf++; - } + } while (1); /* Update SQ head ptr */ io_sq->next_to_comp += nb_hw_desc; @@ -578,10 +584,10 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, desc->length = ena_buf->len; - desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK; - desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK; - desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK; - desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; + desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK | + ENA_ETH_IO_RX_DESC_LAST_MASK | + (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) | + ENA_ETH_IO_RX_DESC_COMP_REQ_MASK; desc->req_id = req_id; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 77986c0ea52c..8b1afd3b32f2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -73,6 +73,7 @@ struct ena_com_rx_ctx { u32 hash; u16 descs; int max_bufs; + u8 pkt_offset; }; int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, @@ -95,7 +96,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, writel(intr_reg->intr_control, io_cq->unmask_reg); } -static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq) +static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq) { u16 tail, next_to_comp, cnt; @@ -113,7 +114,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, int temp; if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return ena_com_free_desc(io_sq) >= required_buffers; + return ena_com_free_q_entries(io_sq) >= required_buffers; /* This calculation doesn't need to be 100% accurate. So to reduce * the calculation overhead just Subtract 2 lines from the free descs @@ -122,7 +123,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq, */ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2; - return ena_com_free_desc(io_sq) > temp; + return ena_com_free_q_entries(io_sq) > temp; } static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h index 00e0f056a741..d105c9c56192 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h @@ -264,7 +264,9 @@ struct ena_eth_io_rx_cdesc_base { u16 sub_qid; - u16 reserved; + u8 offset; + + u8 reserved; }; /* 8-word format */ @@ -412,4 +414,4 @@ struct ena_eth_io_numa_node_cfg_reg { #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 #define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) -#endif /*_ENA_ETH_IO_H_ */ +#endif /* _ENA_ETH_IO_H_ */ diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 9cc28b4b2627..e340b65af08c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -83,6 +83,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(bad_req_id), ENA_STAT_TX_ENTRY(llq_buffer_copy), ENA_STAT_TX_ENTRY(missed_tx), + ENA_STAT_TX_ENTRY(unmask_interrupt), }; static const struct ena_stats ena_stats_rx_strings[] = { @@ -205,7 +206,7 @@ int ena_get_sset_count(struct net_device *netdev, int sset) if (sset != ETH_SS_STATS) return -EOPNOTSUPP; - return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; } @@ -259,7 +260,6 @@ static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data) for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) { ena_stats = &ena_stats_global_strings[i]; - memcpy(data, ena_stats->name, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } @@ -306,10 +306,8 @@ static int ena_get_coalesce(struct net_device *net_dev, struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; - if (!ena_com_interrupt_moderation_supported(ena_dev)) { - /* the devie doesn't support interrupt moderation */ + if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; - } coalesce->tx_coalesce_usecs = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) * @@ -325,7 +323,7 @@ static int ena_get_coalesce(struct net_device *net_dev, return 0; } -static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) +static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; @@ -336,7 +334,7 @@ static void ena_update_tx_rings_intr_moderation(struct ena_adapter *adapter) adapter->tx_ring[i].smoothed_interval = val; } -static void ena_update_rx_rings_intr_moderation(struct ena_adapter *adapter) +static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter) { unsigned int val; int i; @@ -354,24 +352,22 @@ static int ena_set_coalesce(struct net_device *net_dev, struct ena_com_dev *ena_dev = adapter->ena_dev; int rc; - if (!ena_com_interrupt_moderation_supported(ena_dev)) { - /* the devie doesn't support interrupt moderation */ + if (!ena_com_interrupt_moderation_supported(ena_dev)) return -EOPNOTSUPP; - } rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev, coalesce->tx_coalesce_usecs); if (rc) return rc; - ena_update_tx_rings_intr_moderation(adapter); + ena_update_tx_rings_nonadaptive_intr_moderation(adapter); rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev, coalesce->rx_coalesce_usecs); if (rc) return rc; - ena_update_rx_rings_intr_moderation(adapter); + ena_update_rx_rings_nonadaptive_intr_moderation(adapter); if (coalesce->use_adaptive_rx_coalesce && !ena_com_get_adaptive_moderation_enabled(ena_dev)) @@ -635,6 +631,32 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev) return ENA_HASH_KEY_SIZE; } +static int ena_indirection_table_set(struct ena_adapter *adapter, + const u32 *indir) +{ + struct ena_com_dev *ena_dev = adapter->ena_dev; + int i, rc; + + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + ENA_IO_RXQ_IDX(indir[i])); + if (unlikely(rc)) { + netif_err(adapter, drv, adapter->netdev, + "Cannot fill indirect table (index is too large)\n"); + return rc; + } + } + + rc = ena_com_indirect_table_set(ena_dev); + if (rc) { + netif_err(adapter, drv, adapter->netdev, + "Cannot set indirect table\n"); + return rc == -EPERM ? -EOPNOTSUPP : rc; + } + return rc; +} + static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir) { struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -672,17 +694,18 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, /* We call this function in order to check if the device * supports getting/setting the hash function. */ - rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key); + rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func); if (rc) { - if (rc == -EOPNOTSUPP) { - key = NULL; - hfunc = NULL; + if (rc == -EOPNOTSUPP) rc = 0; - } return rc; } + rc = ena_com_get_hash_key(adapter->ena_dev, key); + if (rc) + return rc; + switch (ena_func) { case ENA_ADMIN_TOEPLITZ: func = ETH_RSS_HASH_TOP; @@ -699,7 +722,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (hfunc) *hfunc = func; - return rc; + return 0; } static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, @@ -707,27 +730,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, { struct ena_adapter *adapter = netdev_priv(netdev); struct ena_com_dev *ena_dev = adapter->ena_dev; - enum ena_admin_hash_functions func; - int rc, i; + enum ena_admin_hash_functions func = 0; + int rc; if (indir) { - for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { - rc = ena_com_indirect_table_fill_entry(ena_dev, - i, - ENA_IO_RXQ_IDX(indir[i])); - if (unlikely(rc)) { - netif_err(adapter, drv, netdev, - "Cannot fill indirect table (index is too large)\n"); - return rc; - } - } - - rc = ena_com_indirect_table_set(ena_dev); - if (rc) { - netif_err(adapter, drv, netdev, - "Cannot set indirect table\n"); - return rc == -EPERM ? -EOPNOTSUPP : rc; - } + rc = ena_indirection_table_set(adapter, indir); + if (rc) + return rc; } switch (hfunc) { @@ -746,7 +755,7 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir, return -EOPNOTSUPP; } - if (key) { + if (key || func) { rc = ena_com_fill_hash_function(ena_dev, func, key, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 2cc765df8da3..a0af74c93971 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -263,7 +263,7 @@ static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring, dma_addr_t dma = 0; u32 size; - tx_info->xdpf = convert_to_xdp_frame(xdp); + tx_info->xdpf = xdp_convert_buff_to_frame(xdp); size = tx_info->xdpf->len; ena_buf = tx_info->bufs; @@ -1435,6 +1435,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); + /* The offset is non zero only for the first buffer */ + rx_info->page_offset = 0; netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx skb updated. len %d. data_len %d\n", @@ -1590,6 +1592,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, { u16 next_to_clean = rx_ring->next_to_clean; struct ena_com_rx_ctx ena_rx_ctx; + struct ena_rx_buffer *rx_info; struct ena_adapter *adapter; u32 res_budget, work_done; int rx_copybreak_pkt = 0; @@ -1606,6 +1609,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, "%s qid %d\n", __func__, rx_ring->qid); res_budget = budget; xdp.rxq = &rx_ring->xdp_rxq; + xdp.frame_sz = ENA_PAGE_SIZE; do { xdp_verdict = XDP_PASS; @@ -1613,6 +1617,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; ena_rx_ctx.max_bufs = rx_ring->sgl_size; ena_rx_ctx.descs = 0; + ena_rx_ctx.pkt_offset = 0; rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, rx_ring->ena_com_io_sq, &ena_rx_ctx); @@ -1622,6 +1627,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, if (unlikely(ena_rx_ctx.descs == 0)) break; + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; + rx_info->page_offset = ena_rx_ctx.pkt_offset; + netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n", rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, @@ -1683,7 +1691,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, rx_ring->next_to_clean = next_to_clean; - refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); refill_threshold = min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, ENA_RX_REFILL_THRESH_PACKET); @@ -1762,6 +1770,9 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring, tx_ring->smoothed_interval, true); + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.unmask_interrupt++; + u64_stats_update_end(&tx_ring->syncp); /* It is a shared MSI-X. * Tx and Rx CQ have pointer to it. * So we use one of them to reach the intr reg @@ -2231,7 +2242,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) rc = ena_rss_init_default(adapter); if (rc && (rc != -EOPNOTSUPP)) { netif_err(adapter, ifup, adapter->netdev, - "Failed to init RSS rc: %d\n", rc); + "Failed to init RSS rc: %d\n", rc); return rc; } } @@ -2304,7 +2315,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) if (rc) { netif_err(adapter, ifup, adapter->netdev, "Failed to create I/O TX queue num %d rc: %d\n", - qid, rc); + qid, rc); return rc; } @@ -2453,7 +2464,7 @@ static int create_queues_with_size_backoff(struct ena_adapter *adapter) * ones due to past queue allocation failures. */ set_io_rings_size(adapter, adapter->requested_tx_ring_size, - adapter->requested_rx_ring_size); + adapter->requested_rx_ring_size); while (1) { if (ena_xdp_present(adapter)) { @@ -2494,7 +2505,7 @@ err_setup_tx: if (rc != -ENOMEM) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with error code %d\n", - rc); + rc); return rc; } @@ -2517,7 +2528,7 @@ err_setup_tx: new_rx_ring_size = cur_rx_ring_size / 2; if (new_tx_ring_size < ENA_MIN_RING_SIZE || - new_rx_ring_size < ENA_MIN_RING_SIZE) { + new_rx_ring_size < ENA_MIN_RING_SIZE) { netif_err(adapter, ifup, adapter->netdev, "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n", ENA_MIN_RING_SIZE); @@ -3076,8 +3087,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb, return qid; } -static void ena_config_host_info(struct ena_com_dev *ena_dev, - struct pci_dev *pdev) +static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { struct ena_admin_host_info *host_info; int rc; @@ -3107,6 +3117,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, host_info->num_cpus = num_online_cpus(); host_info->driver_supported_features = + ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; rc = ena_com_set_host_attributes(ena_dev); @@ -3169,6 +3180,7 @@ static void ena_get_stats64(struct net_device *netdev, struct ena_ring *rx_ring, *tx_ring; unsigned int start; u64 rx_drops; + u64 tx_drops; int i; if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) @@ -3203,9 +3215,11 @@ static void ena_get_stats64(struct net_device *netdev, do { start = u64_stats_fetch_begin_irq(&adapter->syncp); rx_drops = adapter->dev_stats.rx_drops; + tx_drops = adapter->dev_stats.tx_drops; } while (u64_stats_fetch_retry_irq(&adapter->syncp, start)); stats->rx_dropped = rx_drops; + stats->tx_dropped = tx_drops; stats->multicast = 0; stats->collisions = 0; @@ -3433,6 +3447,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) ena_com_mmio_reg_read_request_destroy(ena_dev); + /* return reset reason to default value */ adapter->reset_reason = ENA_REGS_RESET_NORMAL; clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); @@ -3678,8 +3693,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter) for (i = 0; i < adapter->num_io_queues; i++) { rx_ring = &adapter->rx_ring[i]; - refill_required = - ena_com_free_desc(rx_ring->ena_com_io_sq); + refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); if (unlikely(refill_required == (rx_ring->ring_size - 1))) { rx_ring->empty_rx_queue++; @@ -3817,11 +3831,11 @@ static void ena_timer_service(struct timer_list *t) mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); } -static int ena_calc_max_io_queue_num(struct pci_dev *pdev, +static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { - int io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; + u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues; if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { struct ena_admin_queue_ext_feature_fields *max_queue_ext = @@ -3991,7 +4005,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) } } - rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, + rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE, 0xFFFFFFFF); if (unlikely(rc && (rc != -EOPNOTSUPP))) { dev_err(dev, "Cannot fill hash function\n"); @@ -4107,8 +4121,8 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) */ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; + struct ena_com_dev_get_features_ctx get_feat_ctx; struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; @@ -4152,6 +4166,8 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } + ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; + ena_dev->dmadev = &pdev->dev; rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state); @@ -4175,7 +4191,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) calc_queue_ctx.get_feat_ctx = &get_feat_ctx; calc_queue_ctx.pdev = pdev; - /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity. + /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. * Updated during device initialization with the real granularity */ ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; @@ -4219,12 +4235,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->num_io_queues = max_num_io_queues; adapter->max_num_io_queues = max_num_io_queues; + adapter->last_monitored_tx_qid = 0; adapter->xdp_first_ring = 0; adapter->xdp_num_queues = 0; - adapter->last_monitored_tx_qid = 0; - adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; adapter->wd_state = wd_state; @@ -4356,6 +4371,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) cancel_work_sync(&adapter->reset_task); rtnl_lock(); /* lock released inside the below if-else block */ + adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; ena_destroy_device(adapter, true); if (shutdown) { netif_device_detach(netdev); @@ -4514,14 +4530,17 @@ static void ena_keep_alive_wd(void *adapter_data, struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_admin_aenq_keep_alive_desc *desc; u64 rx_drops; + u64 tx_drops; desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e; adapter->last_keep_alive_jiffies = jiffies; rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; + tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; u64_stats_update_begin(&adapter->syncp); adapter->dev_stats.rx_drops = rx_drops; + adapter->dev_stats.tx_drops = tx_drops; u64_stats_update_end(&adapter->syncp); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 9e1860d81908..ba030d260940 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -50,12 +50,6 @@ #define DRV_MODULE_GEN_SUBMINOR 0 #define DRV_MODULE_NAME "ena" -#ifndef DRV_MODULE_GENERATION -#define DRV_MODULE_GENERATION \ - __stringify(DRV_MODULE_GEN_MAJOR) "." \ - __stringify(DRV_MODULE_GEN_MINOR) "." \ - __stringify(DRV_MODULE_GEN_SUBMINOR) "K" -#endif #define DEVICE_NAME "Elastic Network Adapter (ENA)" @@ -104,8 +98,6 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) -#define ENA_HASH_KEY_SIZE 40 - /* The number of tx packet completions that will be handled each NAPI poll * cycle is ring_size / ENA_TX_POLL_BUDGET_DIVIDER. */ @@ -137,6 +129,8 @@ #define ENA_IO_IRQ_FIRST_IDX 1 #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) +#define ENA_ADMIN_POLL_DELAY_US 100 + /* ENA device should send keep alive msg every 1 sec. * We wait for 6 sec just to be on the safe side. */ @@ -151,8 +145,9 @@ * The buffer size we share with the device is defined to be ENA_PAGE_SIZE */ -#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \ - VLAN_HLEN - XDP_PACKET_HEADROOM) +#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \ + VLAN_HLEN - XDP_PACKET_HEADROOM - \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \ ((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues)) @@ -248,6 +243,7 @@ struct ena_stats_tx { u64 bad_req_id; u64 llq_buffer_copy; u64 missed_tx; + u64 unmask_interrupt; }; struct ena_stats_rx { @@ -333,6 +329,7 @@ struct ena_stats_dev { u64 interface_down; u64 admin_q_pause; u64 rx_drops; + u64 tx_drops; }; enum ena_flags_t { diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 04fcafcc059c..b514bb1b855d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -154,4 +154,4 @@ enum ena_regs_reset_reason_types { #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16 #define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000 -#endif /*_ENA_REGS_H_ */ +#endif /* _ENA_REGS_H_ */ |