diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed.h | 31 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 334 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_hsi.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_int.c | 152 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_int.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_ll2.c | 30 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_ll2.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_main.c | 79 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.c | 65 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 50 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sp.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_spq.c | 47 |
14 files changed, 759 insertions, 88 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d9a03aba0e02..24a90163775e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -296,6 +296,12 @@ enum qed_wol_support { QED_WOL_SUPPORT_PME, }; +enum qed_db_rec_exec { + DB_REC_DRY_RUN, + DB_REC_REAL_DEAL, + DB_REC_ONCE, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -425,6 +431,14 @@ struct qed_qm_info { u8 num_pf_rls; }; +struct qed_db_recovery_info { + struct list_head list; + + /* Lock to protect the doorbell recovery mechanism list */ + spinlock_t lock; + u32 db_recovery_counter; +}; + struct storm_stats { u32 address; u32 len; @@ -522,6 +536,7 @@ struct qed_simd_fp_handler { enum qed_slowpath_wq_flag { QED_SLOWPATH_MFW_TLV_REQ, + QED_SLOWPATH_PERIODIC_DB_REC, }; struct qed_hwfn { @@ -640,6 +655,9 @@ struct qed_hwfn { /* L2-related */ struct qed_l2_info *p_l2_info; + /* Mechanism for recovering from doorbell drop */ + struct qed_db_recovery_info db_recovery_info; + /* Nvm images number and attributes */ struct qed_nvm_image_info nvm_info; @@ -652,11 +670,12 @@ struct qed_hwfn { struct delayed_work iov_task; unsigned long iov_task_flags; #endif - - struct z_stream_s *stream; + struct z_stream_s *stream; + bool slowpath_wq_active; struct workqueue_struct *slowpath_wq; struct delayed_work slowpath_task; unsigned long slowpath_task_flags; + u32 periodic_db_rec_count; }; struct pci_params { @@ -897,6 +916,12 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +/* doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, + enum qed_db_rec_exec db_exec); +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) @@ -931,4 +956,6 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, union qed_mfw_tlv_data *tlv_data); void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 88a8576ca9ce..8f6551421945 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -66,6 +66,318 @@ static DEFINE_SPINLOCK(qm_lock); +/******************** Doorbell Recovery *******************/ +/* The doorbell recovery mechanism consists of a list of entries which represent + * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each + * entity needs to register with the mechanism and provide the parameters + * describing it's doorbell, including a location where last used doorbell data + * can be found. The doorbell execute function will traverse the list and + * doorbell all of the registered entries. + */ +struct qed_db_recovery_entry { + struct list_head list_entry; + void __iomem *db_addr; + void *db_data; + enum qed_db_rec_width db_width; + enum qed_db_rec_space db_space; + u8 hwfn_idx; +}; + +/* Display a single doorbell recovery entry */ +static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + char *action) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", + action, + db_entry, + db_entry->db_addr, + db_entry->db_data, + db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", + db_entry->db_space == DB_REC_USER ? "user" : "kernel", + db_entry->hwfn_idx); +} + +/* Doorbell address sanity (address within doorbell bar range) */ +static bool qed_db_rec_sanity(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + /* Make sure doorbell address is within the doorbell bar */ + if (db_addr < cdev->doorbells || + (u8 __iomem *)db_addr > + (u8 __iomem *)cdev->doorbells + cdev->db_size) { + WARN(true, + "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", + db_addr, + cdev->doorbells, + (u8 __iomem *)cdev->doorbells + cdev->db_size); + return false; + } + + /* ake sure doorbell data pointer is not null */ + if (!db_data) { + WARN(true, "Illegal doorbell data pointer: %p", db_data); + return false; + } + + return true; +} + +/* Find hwfn according to the doorbell address */ +static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, + void __iomem *db_addr) +{ + struct qed_hwfn *p_hwfn; + + /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ + if (cdev->num_hwfns > 1) + p_hwfn = db_addr < cdev->hwfns[1].doorbells ? + &cdev->hwfns[0] : &cdev->hwfns[1]; + else + p_hwfn = QED_LEADING_HWFN(cdev); + + return p_hwfn; +} + +/* Add a new entry to the doorbell recovery mechanism */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space) +{ + struct qed_db_recovery_entry *db_entry; + struct qed_hwfn *p_hwfn; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Create entry */ + db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); + if (!db_entry) { + DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); + return -ENOMEM; + } + + /* Populate entry */ + db_entry->db_addr = db_addr; + db_entry->db_data = db_data; + db_entry->db_width = db_width; + db_entry->db_space = db_space; + db_entry->hwfn_idx = p_hwfn->my_id; + + /* Display */ + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + return 0; +} + +/* Remove an entry from the doorbell recovery mechanism */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + struct qed_db_recovery_entry *db_entry = NULL; + struct qed_hwfn *p_hwfn; + int rc = -EINVAL; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + /* search according to db_data addr since db_addr is not unique (roce) */ + if (db_entry->db_data == db_data) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); + list_del(&db_entry->list_entry); + rc = 0; + break; + } + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + if (rc == -EINVAL) + + DP_NOTICE(p_hwfn, + "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", + db_data, db_addr); + else + kfree(db_entry); + + return rc; +} + +/* Initialize the doorbell recovery mechanism */ +static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); + + /* Make sure db_size was set in cdev */ + if (!p_hwfn->cdev->db_size) { + DP_ERR(p_hwfn->cdev, "db_size not set\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); + spin_lock_init(&p_hwfn->db_recovery_info.lock); + p_hwfn->db_recovery_info.db_recovery_counter = 0; + + return 0; +} + +/* Destroy the doorbell recovery mechanism */ +static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); + if (!list_empty(&p_hwfn->db_recovery_info.list)) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); + while (!list_empty(&p_hwfn->db_recovery_info.list)) { + db_entry = + list_first_entry(&p_hwfn->db_recovery_info.list, + struct qed_db_recovery_entry, + list_entry); + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); + list_del(&db_entry->list_entry); + kfree(db_entry); + } + } + p_hwfn->db_recovery_info.db_recovery_counter = 0; +} + +/* Print the content of the doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, + "Displaying doorbell recovery database. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/* Ring the doorbell of a single doorbell recovery entry */ +static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + enum qed_db_rec_exec db_exec) +{ + if (db_exec != DB_REC_ONCE) { + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "%s doorbell address %p data %x\n", + db_exec == DB_REC_DRY_RUN ? + "would have rung" : "ringing", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "%s doorbell address %p data %llx\n", + db_exec == DB_REC_DRY_RUN ? + "would have rung" : "ringing", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + } + + /* Sanity */ + if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, + db_entry->db_data)) + return; + + /* Flush the write combined buffer. Since there are multiple doorbelling + * entities using the same address, if we don't flush, a transaction + * could be lost. + */ + wmb(); + + /* Ring the doorbell */ + if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + + /* Flush the write combined buffer. Next doorbell may come from a + * different entity to the same address... + */ + wmb(); +} + +/* Traverse the doorbell recovery entry list and ring all the doorbells */ +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, + enum qed_db_rec_exec db_exec) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + if (db_exec != DB_REC_ONCE) { + DP_NOTICE(p_hwfn, + "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; + } + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_ring(p_hwfn, db_entry, db_exec); + if (db_exec == DB_REC_ONCE) + break; + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/******************** Doorbell Recovery end ****************/ + #define QED_MIN_DPIS (4) #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) @@ -194,6 +506,9 @@ void qed_resc_free(struct qed_dev *cdev) qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); + + /* Destroy doorbell recovery mechanism */ + qed_db_recovery_teardown(p_hwfn); } } @@ -969,6 +1284,11 @@ int qed_resc_alloc(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; + /* Initialize the doorbell recovery mechanism */ + rc = qed_db_recovery_setup(p_hwfn); + if (rc) + goto alloc_err; + /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); if (rc) @@ -1468,6 +1788,14 @@ enum QED_ROCE_EDPM_MODE { QED_ROCE_EDPM_MODE_DISABLE = 2, }; +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) + return false; + + return true; +} + static int qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1537,13 +1865,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->wid_count = (u16) n_cpus; DP_INFO(p_hwfn, - "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, - ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? - "disabled" : "enabled"); + (!qed_edpm_enabled(p_hwfn)) ? + "disabled" : "enabled", PAGE_SIZE); if (rc) { DP_ERR(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index defdda1ffaa2..acccd85170aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -472,6 +472,34 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_width - doorbell is 32b pr 64b + * @param db_space - doorbell recovery addresses are user or kernel space + */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b38e12c9de9d..b13cfb449d8f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12655,6 +12655,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 #define DRV_MB_PARAM_NVM_LEN_OFFSET 24 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 @@ -12814,6 +12815,11 @@ struct public_drv_mb { union drv_union_data union_data; }; +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, MFW_DRV_MSG_FLR_FW_ACK_FAILED, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b22f464ea3fa..92340919d852 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -361,29 +361,147 @@ static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) return 0; } -#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) -#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) -#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) -#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) + +#define QED_DB_REC_COUNT 1000 +#define QED_DB_REC_INTERVAL 100 + +static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 count = QED_DB_REC_COUNT; + u32 usage = 1; + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interpreted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releasing the overflow sticky indication. + */ + while (count-- && usage) { + usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + udelay(QED_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->cdev, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); + return -EBUSY; + } + + return 0; +} + +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 overflow; + int rc; + + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); + if (!overflow) { + qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); + return 0; + } + + if (qed_edpm_enabled(p_hwfn)) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + return rc; + } + + /* Flush any pending (e)dpm as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* Release overflow sticky indication (stop silently dropping everything) */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* Repeat all last doorbells (doorbell drop recovery) */ + qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + + return 0; +} + static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) { - u32 reason; + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + int rc; - reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & - QED_DORQ_ATTENTION_REASON_MASK; - if (reason) { - u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS); + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); - DP_INFO(p_hwfn->cdev, - "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", - qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS_ADDRESS), - (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), - GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, - reason); + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again. + * The masked almost full indication may also be set. Ignoring. + */ + if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) + return 0; + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* Obtain data about db drop/overflow */ + first_drop_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); + address = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* Log info */ + DP_NOTICE(p_hwfn->cdev, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + rc = qed_db_rec_handler(p_hwfn, p_ptt); + qed_periodic_db_rec_start(p_hwfn); + if (rc) + return rc; + + /* Clear the doorbell drop details and prepare for next drop */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* Mark interrupt as handled (note: even if drop was due to a different + * reason than overflow we mark as handled) + */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* If there are no indications other than drop indications, success */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return 0; } + /* Some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 54b4ee0acfd7..d81a62ebd524 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -190,6 +190,16 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); +/** + * @brief - Doorbell Recovery handler. + * Run DB_REAL_DEAL doorbell recovery in case of PF overflow + * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * + * @param p_hwfn + * @param p_ptt + */ +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + #define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c6f4bab67a5f..90afd514ffe1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1085,7 +1085,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; - return qed_spq_post(p_hwfn, p_ent, NULL); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, + &p_tx->db_msg, DB_REC_WIDTH_32B, + DB_REC_KERNEL); + return rc; } static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -1119,9 +1126,11 @@ static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1542,6 +1551,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(p_ll2_conn->cid, DQ_DEMS_LEGACY); + /* prepare db data */ + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_TX_BD_PROD_CMD); + p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) @@ -1780,7 +1796,6 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_packet *p_pkt = NULL; - struct core_db_data db_msg = { 0, 0, 0 }; u16 bd_prod; /* If there are missing BDs, don't do anything now */ @@ -1809,24 +1824,19 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); } - SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_TX_BD_PROD_CMD); - db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db_msg.spq_prod = cpu_to_le16(bd_prod); + p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); /* Make sure the BDs data is updated before ringing the doorbell */ wmb(); - DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg)); + DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, p_ll2_conn->cid, - p_ll2_conn->input.conn_type, db_msg.spq_prod); + p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(void *cxt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 1a5c1ae01474..5f01fbd3c073 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -103,6 +103,7 @@ struct qed_ll2_tx_queue { struct qed_ll2_tx_packet cur_completing_packet; u16 cur_completing_bd_idx; void __iomem *doorbell_addr; + struct core_db_data db_msg; u16 bds_idx; u16 cur_send_frag_num; u16 cur_completing_frag_num; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index fff7f04d4525..6adf5bda9811 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -966,9 +966,47 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +#define QED_PERIODIC_DB_REC_COUNT 100 +#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 +#define QED_PERIODIC_DB_REC_INTERVAL \ + msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) +#define QED_PERIODIC_DB_REC_WAIT_COUNT 10 +#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ + (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) + +static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, + enum qed_slowpath_wq_flag wq_flag, + unsigned long delay) +{ + if (!hwfn->slowpath_wq_active) + return -EINVAL; + + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(wq_flag, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); + + return 0; +} + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) +{ + /* Reset periodic Doorbell Recovery counter */ + p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; + + /* Don't schedule periodic Doorbell Recovery if already scheduled */ + if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &p_hwfn->slowpath_task_flags)) + return; + + qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); +} + static void qed_slowpath_wq_stop(struct qed_dev *cdev) { - int i; + int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; if (IS_VF(cdev)) return; @@ -977,6 +1015,15 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev) if (!cdev->hwfns[i].slowpath_wq) continue; + /* Stop queuing new delayed works */ + cdev->hwfns[i].slowpath_wq_active = false; + + /* Wait until the last periodic doorbell recovery is executed */ + while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &cdev->hwfns[i].slowpath_task_flags) && + sleep_count--) + msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); + flush_workqueue(cdev->hwfns[i].slowpath_wq); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } @@ -989,7 +1036,10 @@ static void qed_slowpath_task(struct work_struct *work) struct qed_ptt *ptt = qed_ptt_acquire(hwfn); if (!ptt) { - queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + if (hwfn->slowpath_wq_active) + queue_delayed_work(hwfn->slowpath_wq, + &hwfn->slowpath_task, 0); + return; } @@ -997,6 +1047,15 @@ static void qed_slowpath_task(struct work_struct *work) &hwfn->slowpath_task_flags)) qed_mfw_process_tlv_req(hwfn, ptt); + if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &hwfn->slowpath_task_flags)) { + qed_db_rec_handler(hwfn, ptt); + if (hwfn->periodic_db_rec_count--) + qed_slowpath_delayed_work(hwfn, + QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); + } + qed_ptt_release(hwfn, ptt); } @@ -1023,6 +1082,7 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) } INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + hwfn->slowpath_wq_active = true; } return 0; @@ -1939,21 +1999,30 @@ exit: * 0B | 0x3 [command index] | * 4B | b'0: check_response? | b'1-31 reserved | * 8B | File-type | reserved | + * 12B | Image length in bytes | * \----------------------------------------------------------------------/ * Start a new file of the provided type */ static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, const u8 **data, bool *check_resp) { + u32 file_type, file_size = 0; int rc; *data += 4; *check_resp = !!(**data & BIT(0)); *data += 4; + file_type = **data; DP_VERBOSE(cdev, NETIF_MSG_DRV, - "About to start a new file of type %02x\n", **data); - rc = qed_mcp_nvm_put_file_begin(cdev, **data); + "About to start a new file of type %02x\n", file_type); + if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { + *data += 4; + file_size = *((u32 *)(*data)); + } + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, + (u8 *)(&file_size), 4); *data += 4; return rc; @@ -2315,6 +2384,8 @@ const struct qed_common_ops qed_common_ops_pass = { .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .db_recovery_add = &qed_db_recovery_add, + .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a96364df4320..e7f18e34ff0d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1619,7 +1619,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_sp_pf_update_stag(p_hwfn); } - DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); /* Acknowledge the MFW */ @@ -1641,7 +1641,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> OEM_CFG_CHANNEL_TYPE_OFFSET; if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) - DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + DP_NOTICE(p_hwfn, + "Incorrect UFP Channel type %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; if (val == OEM_CFG_SCHED_TYPE_ETS) { @@ -1650,7 +1652,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; } else { p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown UFP scheduling mode %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); @@ -1665,13 +1669,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; } else { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown Host priority control %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } DP_NOTICE(p_hwfn, - "UFP shmem config: mode = %d tc = %d pri_type = %d\n", - p_hwfn->ufp_info.mode, - p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); + "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); } static int @@ -2739,24 +2745,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) return 0; } -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr) -{ - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt; - u32 resp, param; - int rc; - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, - &resp, ¶m); - cdev->mcp_nvm_resp = resp; - qed_ptt_release(p_hwfn, p_ptt); - - return rc; -} - int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { @@ -2770,6 +2758,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, return -EBUSY; switch (cmd) { + case QED_PUT_FILE_BEGIN: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; + break; case QED_PUT_FILE_DATA: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; break; @@ -2782,10 +2773,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, goto out; } + buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); while (buf_idx < len) { - buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); - nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | - addr) + buf_idx; + if (cmd == QED_PUT_FILE_BEGIN) + nvm_offset = addr; + else + nvm_offset = ((buf_size << + DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + + buf_idx; rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); @@ -2810,7 +2805,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) usleep_range(1000, 2000); - buf_idx += buf_size; + /* For MBI upgrade, MFW response includes the next buffer offset + * to be delivered to MFW. + */ + if (param && cmd == QED_PUT_FILE_DATA) { + buf_idx = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + } else { + buf_idx += buf_size; + buf_size = min_t(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + } } cdev->mcp_nvm_resp = resp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 1adfe52b3905..eddf67798d6f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -543,16 +543,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Put file begin - * - * @param cdev - * @param addr - nvm offset - * - * @return int - 0 - operation was successful. - */ -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr); - -/** * @brief Check latest response * * @param cdev diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 2440970882c4..8939ed6e08b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1243,6 +1243,56 @@ 0x1701534UL #define TSEM_REG_DBG_FORCE_FRAME \ 0x1701538UL +#define DORQ_REG_PF_USAGE_CNT \ + 0x1009c0UL +#define DORQ_REG_PF_OVFL_STICKY \ + 0x1009d0UL +#define DORQ_REG_DPM_FORCE_ABORT \ + 0x1009d8UL +#define DORQ_REG_INT_STS \ + 0x100180UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR \ + (0x1UL << 0) +#define DORQ_REG_INT_STS_WR \ + 0x100188UL +#define DORQ_REG_DB_DROP_DETAILS_REL \ + 0x100a28UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \ + 0 +#define DORQ_REG_INT_STS_DB_DROP \ + (0x1UL << 1) +#define DORQ_REG_INT_STS_DB_DROP_SHIFT \ + 1 +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \ + (0x1UL << 2) +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \ + 2 +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\ + (0x1UL << 3) +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \ + 3 +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \ + (0x1UL << 4) +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \ + 4 +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \ + (0x1UL << 5) +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \ + 5 +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \ + (0x1UL << 6) +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \ + 6 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \ + (0x1UL << 7) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \ + 7 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \ + (0x1UL << 8) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \ + 8 +#define DORQ_REG_DB_DROP_DETAILS_REASON \ + 0x100a20UL #define MSEM_REG_DBG_SELECT \ 0x1801528UL #define MSEM_REG_DBG_DWORD_ENABLE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 3157c0d99441..4179c9013fc6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -227,7 +227,9 @@ struct qed_spq { u32 comp_count; u32 cid; - qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; + u32 db_addr_offset; + struct core_db_data db_data; + qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0a9c5bb0fa48..eb88bbc6b193 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -252,9 +252,9 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; - struct core_db_data db; p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); @@ -266,27 +266,22 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, *elem = p_ent->elem; /* struct assignment */ /* send a doorbell on the slow hwfn session */ - memset(&db, 0, sizeof(db)); - SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_SPQ_PROD_CMD); - db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); + p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); /* make sure the SPQE is updated before the doorbell */ wmb(); - DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); /* make sure doorbell is rang */ wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", - qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), - p_spq->cid, db.params, db.agg_flags, - qed_chain_get_prod_idx(p_chain)); + p_spq->db_addr_offset, + p_spq->cid, + p_db_data->params, + p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); return 0; } @@ -490,8 +485,11 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_virt = NULL; + struct core_db_data *p_db_data; + void __iomem *db_addr; dma_addr_t p_phys = 0; u32 i, capacity; + int rc; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -528,6 +526,25 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) /* reset the chain itself */ qed_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + memset(p_db_data, 0, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); } int qed_spq_alloc(struct qed_hwfn *p_hwfn) @@ -575,11 +592,17 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + void __iomem *db_addr; u32 capacity; if (!p_spq) return; + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); + if (p_spq->p_virt) { capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, |