summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorBhanu Prakash Gollapudi <bprakash@broadcom.com>2011-07-27 11:32:05 -0700
committerJames Bottomley <JBottomley@Parallels.com>2011-07-28 11:42:57 +0400
commit6c5a7ce4f176b641fd11e59be4df31ee3e6202dd (patch)
tree676e052ad6c2ac3868e934a9f22e2f5528d6a8f3 /drivers/scsi
parentd6857595394f1fa5c5752eae9bb6045c067fa41e (diff)
downloadlinux-6c5a7ce4f176b641fd11e59be4df31ee3e6202dd.tar.gz
linux-6c5a7ce4f176b641fd11e59be4df31ee3e6202dd.tar.bz2
linux-6c5a7ce4f176b641fd11e59be4df31ee3e6202dd.zip
[SCSI] bnx2fc: Support 'sequence cleanup' task
For the devices that support sequence level error recovery, based on the REC response, the firmware has to be informed about the offset from which the retransmission should happen. Driver initiates sequence cleanup task to firmware so that the firmware can program the task. Upon the sequence cleanup completion, SRR is issued to retransmit the sequence. Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h14
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c20
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c97
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c106
4 files changed, 237 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index d38dcc7f1047..45d5391229e2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -357,6 +357,8 @@ struct bnx2fc_els_cb_arg {
struct bnx2fc_cmd *aborted_io_req;
struct bnx2fc_cmd *io_req;
u16 l2_oxid;
+ u32 offset;
+ enum fc_rctl r_ctl;
};
/* bnx2fc command structure */
@@ -370,6 +372,7 @@ struct bnx2fc_cmd {
#define BNX2FC_ABTS 3
#define BNX2FC_ELS 4
#define BNX2FC_CLEANUP 5
+#define BNX2FC_SEQ_CLEANUP 6
u8 io_req_flags;
struct kref refcount;
struct fcoe_port *port;
@@ -466,6 +469,10 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid);
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset);
void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task);
void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
@@ -515,5 +522,12 @@ void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
unsigned char *buf,
u32 frame_len, u16 l2_oxid);
int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req);
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl);
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state);
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl);
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 5d7baa2371f8..75d0b6ac6c2b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -253,6 +253,26 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
return rc;
}
+int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
+{
+ /*
+ * Dummy function to enable compiling individual patches. Real function
+ * is in the next patch.
+ */
+ return 0;
+}
+
+int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
+{
+ /*
+ * Dummy function to enable compiling individual patches. Real function
+ * is in the next patch.
+ */
+ return 0;
+}
+
+
+
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
void *data, u32 data_len,
void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index fa263b5902e1..03ae003d3b85 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -880,6 +880,13 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
kref_put(&io_req->refcount, bnx2fc_cmd_release);
break;
+ case BNX2FC_SEQ_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n",
+ io_req->xid);
+ bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
default:
printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
break;
@@ -1369,6 +1376,96 @@ void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
tgt->conn_db->rq_prod = tgt->rq_prod_idx;
}
+void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ struct bnx2fc_cmd *orig_io_req,
+ u32 offset)
+{
+ struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = seq_clnp_req->tgt;
+ struct bnx2fc_interface *interface = tgt->port->priv;
+ struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl;
+ struct fcoe_task_ctx_entry *orig_task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_ext_mul_sges_ctx *sgl;
+ u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP;
+ u8 orig_task_type;
+ u16 orig_xid = orig_io_req->xid;
+ u32 context_id = tgt->context_id;
+ u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma;
+ u32 orig_offset = offset;
+ int bd_count;
+ int orig_task_idx, index;
+ int i;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ orig_task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ orig_task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx flags */
+ task->txwr_rxrd.const_ctx.tx_flags =
+ FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT;
+ /* init flags */
+ task->txwr_rxrd.const_ctx.init_flags = task_type <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
+ task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+ task->rxwr_txrd.const_ctx.init_flags = context_id <<
+ FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid;
+
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0;
+ task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset;
+
+ bd_count = orig_io_req->bd_tbl->bd_valid;
+
+ /* obtain the appropriate bd entry from relative offset */
+ for (i = 0; i < bd_count; i++) {
+ if (offset < bd[i].buf_len)
+ break;
+ offset -= bd[i].buf_len;
+ }
+ phys_addr += (i * sizeof(struct fcoe_bd_ctx));
+
+ if (orig_task_type == FCOE_TASK_TYPE_WRITE) {
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo =
+ (u32)phys_addr;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi =
+ (u32)((u64)phys_addr >> 32);
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size =
+ bd_count;
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off =
+ offset; /* adjusted offset */
+ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i;
+ } else {
+ orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE;
+ index = orig_xid % BNX2FC_TASKS_PER_PAGE;
+
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[orig_task_idx];
+ orig_task = &(task_page[index]);
+
+ /* Multiple SGEs were used for this IO */
+ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl;
+ sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr;
+ sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32);
+ sgl->mul_sgl.sgl_size = bd_count;
+ sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */
+ sgl->mul_sgl.cur_sge_idx = i;
+
+ memset(&task->rxwr_only.rx_seq_ctx, 0,
+ sizeof(struct fcoe_rx_seq_ctx));
+ task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset;
+ task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset;
+ }
+}
void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u16 orig_xid)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 72940b8625bd..9820d3060cd8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -930,6 +930,76 @@ abts_err:
return rc;
}
+int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
+ enum fc_rctl r_ctl)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+ struct bnx2fc_interface *interface;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *seq_clnp_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ int task_idx, index;
+ u16 xid;
+ int rc = 0;
+
+ BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n",
+ orig_io_req->xid);
+ kref_get(&orig_io_req->refcount);
+
+ port = orig_io_req->port;
+ interface = port->priv;
+ lport = port->lport;
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n");
+ rc = -ENOMEM;
+ goto cleanup_err;
+ }
+
+ seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
+ if (!seq_clnp_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -ENOMEM;
+ kfree(cb_arg);
+ goto cleanup_err;
+ }
+ /* Initialize rest of io_req fields */
+ seq_clnp_req->sc_cmd = NULL;
+ seq_clnp_req->port = port;
+ seq_clnp_req->tgt = tgt;
+ seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = seq_clnp_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *)
+ interface->hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ cb_arg->aborted_io_req = orig_io_req;
+ cb_arg->io_req = seq_clnp_req;
+ cb_arg->r_ctl = r_ctl;
+ cb_arg->offset = offset;
+ seq_clnp_req->cb_arg = cb_arg;
+
+ printk(KERN_ERR PFX "call init_seq_cleanup_task\n");
+ bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+cleanup_err:
+ return rc;
+}
+
int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
{
struct fc_lport *lport;
@@ -1156,6 +1226,42 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
return rc;
}
+void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 rx_state)
+{
+ struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg;
+ struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req;
+ u32 offset = cb_arg->offset;
+ enum fc_rctl r_ctl = cb_arg->r_ctl;
+ int rc = 0;
+ struct bnx2fc_rport *tgt = orig_io_req->tgt;
+
+ BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x"
+ "cmd_type = %d\n",
+ seq_clnp_req->xid, seq_clnp_req->cmd_type);
+
+ if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) {
+ printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n",
+ seq_clnp_req->xid);
+ goto free_cb_arg;
+ }
+ kref_get(&orig_io_req->refcount);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (rc)
+ printk(KERN_ERR PFX "clnup_compl: Unable to send SRR"
+ " IO will abort\n");
+ seq_clnp_req->cb_arg = NULL;
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+free_cb_arg:
+ kfree(cb_arg);
+ return;
+}
+
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq)