diff options
author | Javed Hasan <jhasan@marvell.com> | 2020-03-26 22:48:47 -0700 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2020-03-29 18:10:59 -0400 |
commit | 77331115e220925af1f52e18ac99e37a0b0c10ad (patch) | |
tree | 7bef64ebf77518df1c81a31d00fd78af71c1070f /drivers/scsi/bnx2fc | |
parent | b92fcfcb687de7d08278a557faff3a7b4a672cc7 (diff) | |
download | linux-stable-77331115e220925af1f52e18ac99e37a0b0c10ad.tar.gz linux-stable-77331115e220925af1f52e18ac99e37a0b0c10ad.tar.bz2 linux-stable-77331115e220925af1f52e18ac99e37a0b0c10ad.zip |
scsi: bnx2fc: Process the RQE with CQE in interrupt context
Filesystem goes to read-only after continuous error injection because RQE
was handled in deferred context, leading to mismatch between CQE and RQE.
Specifically, this patch makes the following changes:
- Process the RQE with CQE in interrupt context, before putting it into
the work queue.
- Producer and consumer indices are also updated in the interrupt context
to guarantee the the order of processing.
[mkp: fixed bad indentation]
Link: https://lore.kernel.org/r/20200327054849.15947-2-skashyap@marvell.com
Signed-off-by: Javed Hasan <jhasan@marvell.com>
Signed-off-by: Saurav Kashyap <skashyap@marvell.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc.h | 11 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 8 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_hwi.c | 103 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_io.c | 28 |
4 files changed, 96 insertions, 54 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 3b84db8d13a9..15fa8e2a9ad8 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -482,7 +482,10 @@ struct io_bdt { struct bnx2fc_work { struct list_head list; struct bnx2fc_rport *tgt; + struct fcoe_task_ctx_entry *task; + unsigned char rq_data[BNX2FC_RQ_BUF_SZ]; u16 wqe; + u8 num_rq; }; struct bnx2fc_unsol_els { struct fc_lport *lport; @@ -550,7 +553,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport, enum fc_rport_event event); void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, - u8 num_rq); + u8 num_rq, unsigned char *rq_data); void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u8 num_rq); @@ -559,7 +562,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, u8 num_rq); void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, - u8 num_rq); + u8 num_rq, unsigned char *rq_data); void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, struct fcoe_task_ctx_entry *task, u8 num_rq); @@ -577,7 +580,9 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, void *arg, u32 timeout); void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); -void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task); struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, u32 port_id); void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index b4bfab5edf8f..1cbb431fa682 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -660,7 +660,10 @@ static int bnx2fc_percpu_io_thread(void *arg) list_for_each_entry_safe(work, tmp, &work_list, list) { list_del_init(&work->list); - bnx2fc_process_cq_compl(work->tgt, work->wqe); + bnx2fc_process_cq_compl(work->tgt, work->wqe, + work->rq_data, + work->num_rq, + work->task); kfree(work); } @@ -2655,7 +2658,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu) /* Free all work in the list */ list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); - bnx2fc_process_cq_compl(work->tgt, work->wqe); + bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, + work->num_rq, work->task); kfree(work); } diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 6f8335ddb1f2..eb41b0080f57 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -863,36 +863,22 @@ ret_warn_rqe: } } -void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) { - struct fcoe_task_ctx_entry *task; - struct fcoe_task_ctx_entry *task_page; struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_cmd *io_req; - int task_idx, index; + u16 xid; u8 cmd_type; u8 rx_state = 0; - u8 num_rq; spin_lock_bh(&tgt->tgt_lock); - xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; - if (xid >= hba->max_tasks) { - printk(KERN_ERR PFX "ERROR:xid out of range\n"); - spin_unlock_bh(&tgt->tgt_lock); - return; - } - task_idx = xid / BNX2FC_TASKS_PER_PAGE; - index = xid % BNX2FC_TASKS_PER_PAGE; - task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; - task = &(task_page[index]); - - num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & - FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> - FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (io_req == NULL) { @@ -912,7 +898,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) switch (cmd_type) { case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { - bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, + rq_data); spin_unlock_bh(&tgt->tgt_lock); return; } @@ -929,7 +916,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) case BNX2FC_TASK_MGMT_CMD: BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); - bnx2fc_process_tm_compl(io_req, task, num_rq); + bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); break; case BNX2FC_ABTS: @@ -987,7 +974,9 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) } -static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) +static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) { struct bnx2fc_work *work; work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); @@ -997,29 +986,87 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) INIT_LIST_HEAD(&work->list); work->tgt = tgt; work->wqe = wqe; + work->num_rq = num_rq; + work->task = task; + if (rq_data) + memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); + return work; } /* Pending work request completion */ -static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) +static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) { unsigned int cpu = wqe % num_possible_cpus(); struct bnx2fc_percpu_s *fps; struct bnx2fc_work *work; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + unsigned char *rq_data = NULL; + unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; + int task_idx, index; + unsigned char *dummy; + u16 xid; + u8 num_rq; + int i; + + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + if (xid >= hba->max_tasks) { + pr_err(PFX "ERROR:xid out of range\n"); + return 0; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; + task = &task_page[index]; + + num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); + + memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); + + if (!num_rq) + goto num_rq_zero; + + rq_data = bnx2fc_get_next_rqe(tgt, 1); + + if (num_rq > 1) { + /* We do not need extra sense data */ + for (i = 1; i < num_rq; i++) + dummy = bnx2fc_get_next_rqe(tgt, 1); + } + + if (rq_data) + memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); + + /* return RQ entries */ + for (i = 0; i < num_rq; i++) + bnx2fc_return_rqe(tgt, 1); + +num_rq_zero: fps = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&fps->fp_work_lock); if (fps->iothread) { - work = bnx2fc_alloc_work(tgt, wqe); + work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff, + num_rq, task); if (work) { list_add_tail(&work->list, &fps->work_list); wake_up_process(fps->iothread); spin_unlock_bh(&fps->fp_work_lock); - return; + return 1; } } spin_unlock_bh(&fps->fp_work_lock); - bnx2fc_process_cq_compl(tgt, wqe); + bnx2fc_process_cq_compl(tgt, wqe, + rq_data_buff, num_rq, task); + + return 1; } int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) @@ -1056,8 +1103,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) /* Unsolicited event notification */ bnx2fc_process_unsol_compl(tgt, wqe); } else { - bnx2fc_pending_work(tgt, wqe); - num_free_sqes++; + if (bnx2fc_pending_work(tgt, wqe)) + num_free_sqes++; } cqe++; tgt->cq_cons_idx++; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 4c8122a82322..9ab915240a2a 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -24,7 +24,7 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, - u8 num_rq); + u8 num_rq, unsigned char *rq_data); void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, unsigned int timer_msec) @@ -1518,7 +1518,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) } void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, - struct fcoe_task_ctx_entry *task, u8 num_rq) + struct fcoe_task_ctx_entry *task, u8 num_rq, + unsigned char *rq_data) { struct bnx2fc_mp_req *tm_req; struct fc_frame_header *fc_hdr; @@ -1557,7 +1558,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { bnx2fc_parse_fcp_rsp(io_req, (struct fcoe_fcp_rsp_payload *) - rsp_buf, num_rq); + rsp_buf, num_rq, rq_data); if (io_req->fcp_rsp_code == 0) { /* TM successful */ if (tm_req->tm_flags & FCP_TMF_LUN_RESET) @@ -1755,15 +1756,11 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp, - u8 num_rq) + u8 num_rq, unsigned char *rq_data) { struct scsi_cmnd *sc_cmd = io_req->sc_cmd; - struct bnx2fc_rport *tgt = io_req->tgt; u8 rsp_flags = fcp_rsp->fcp_flags.flags; u32 rq_buff_len = 0; - int i; - unsigned char *rq_data; - unsigned char *dummy; int fcp_sns_len = 0; int fcp_rsp_len = 0; @@ -1809,14 +1806,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; } - rq_data = bnx2fc_get_next_rqe(tgt, 1); - - if (num_rq > 1) { - /* We do not need extra sense data */ - for (i = 1; i < num_rq; i++) - dummy = bnx2fc_get_next_rqe(tgt, 1); - } - /* fetch fcp_rsp_code */ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { /* Only for task management function */ @@ -1837,9 +1826,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, if (fcp_sns_len) memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); - /* return RQ entries */ - for (i = 0; i < num_rq; i++) - bnx2fc_return_rqe(tgt, 1); } } @@ -1918,7 +1904,7 @@ exit_qcmd: void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, - u8 num_rq) + u8 num_rq, unsigned char *rq_data) { struct fcoe_fcp_rsp_payload *fcp_rsp; struct bnx2fc_rport *tgt = io_req->tgt; @@ -1950,7 +1936,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); /* parse fcp_rsp and obtain sense data from RQ if available */ - bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); if (!sc_cmd->SCp.ptr) { printk(KERN_ERR PFX "SCp.ptr is NULL\n"); |