summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2023-08-25 17:07:34 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2023-08-25 17:07:34 -0400
commit530e86c745ae3342b1df5e8f38529b9f8a6cac17 (patch)
tree633e96532d484312f69f003936f5c65c49a9c46b /drivers/scsi
parent749652a1dee649c797a1493991429f0e230f8c64 (diff)
parent659d36cc732a7de8c6ca2ae347db1a5401a1b93c (diff)
downloadlinux-530e86c745ae3342b1df5e8f38529b9f8a6cac17.tar.gz
linux-530e86c745ae3342b1df5e8f38529b9f8a6cac17.tar.bz2
linux-530e86c745ae3342b1df5e8f38529b9f8a6cac17.zip
Merge patch series "qla2xxx driver misc features"
Nilesh Javali <njavali@marvell.com> says: Martin, Please apply the qla2xxx driver miscellaneous features and bug fixes to the scsi tree at your earliest convenience. Link: https://lore.kernel.org/r/20230821130045.34850-1-njavali@marvell.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h57
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c321
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c403
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c75
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
16 files changed, 1085 insertions, 93 deletions
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b00222459607..44449c70a375 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -3093,8 +3093,6 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
scsi_host_set_prot(vha->host,
prot | SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d7e8454304ce..691ef827a5ab 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,13 +12,12 @@
* ----------------------------------------------------------------------
* | Module Init and Probe | 0x0199 | |
* | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
- * | Device Discovery | 0x2134 | 0x210e-0x2115 |
- * | | | 0x211c-0x2128 |
- * | | | 0x212c-0x2134 |
+ * | Device Discovery | 0x2134 | 0x2112-0x2115 |
+ * | | | 0x2127-0x2128 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
- * | | | 0x302d,0x3033 |
+ * | | | 0x302e,0x3033 |
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4023 | 0x4002,0x4013 |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 70482b55d240..54f0a412226f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -368,6 +368,7 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...);
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
#define ql_dbg_edif 0x00000400 /* edif and purex debug */
+#define ql_dbg_unsol 0x00000100 /* Unsolicited path debug */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b3b1df34afd3..9ec39bcd41b5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -346,6 +346,12 @@ struct name_list_extended {
u8 sent;
};
+struct qla_nvme_fc_rjt {
+ struct fcnvme_ls_rjt *c;
+ dma_addr_t cdma;
+ u16 size;
+};
+
struct els_reject {
struct fc_els_ls_rjt *c;
dma_addr_t cdma;
@@ -503,6 +509,20 @@ struct ct_arg {
port_id_t id;
};
+struct qla_nvme_lsrjt_pt_arg {
+ struct fc_port *fcport;
+ u8 opcode;
+ u8 vp_idx;
+ u8 reason;
+ u8 explanation;
+ __le16 nport_handle;
+ u16 control_flags;
+ __le16 ox_id;
+ __le32 xchg_address;
+ u32 tx_byte_count, rx_byte_count;
+ dma_addr_t tx_addr, rx_addr;
+};
+
/*
* SRB extensions.
*/
@@ -611,13 +631,16 @@ struct srb_iocb {
void *desc;
/* These are only used with ls4 requests */
- int cmd_len;
- int rsp_len;
+ __le32 cmd_len;
+ __le32 rsp_len;
dma_addr_t cmd_dma;
dma_addr_t rsp_dma;
enum nvmefc_fcp_datadir dir;
uint32_t dl;
uint32_t timeout_sec;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
struct list_head entry;
} nvme;
struct {
@@ -707,6 +730,10 @@ typedef struct srb {
struct fc_port *fcport;
struct scsi_qla_host *vha;
unsigned int start_timer:1;
+ unsigned int abort:1;
+ unsigned int aborted:1;
+ unsigned int completed:1;
+ unsigned int unsol_rsp:1;
uint32_t handle;
uint16_t flags;
@@ -2542,6 +2569,7 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
+ struct list_head unsol_ctx_head;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -3742,6 +3770,16 @@ struct qla_fw_resources {
u16 pad;
};
+struct qla_fw_res {
+ u16 iocb_total;
+ u16 iocb_limit;
+ atomic_t iocb_used;
+
+ u16 exch_total;
+ u16 exch_limit;
+ atomic_t exch_used;
+};
+
#define QLA_IOCB_PCT_LIMIT 95
struct qla_buf_pool {
@@ -3787,6 +3825,12 @@ struct qla_qpair {
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
+
+ uint16_t dsd_inuse;
+ uint16_t dsd_avail;
+ struct list_head dsd_list;
+#define NUM_DSD_CHAIN 4096
+
mempool_t *srb_mempool;
struct pci_dev *pdev;
@@ -4384,7 +4428,6 @@ struct qla_hw_data {
uint8_t aen_mbx_count;
atomic_t num_pend_mbx_stage1;
atomic_t num_pend_mbx_stage2;
- atomic_t num_pend_mbx_stage3;
uint16_t frame_payload_size;
uint32_t login_retry_count;
@@ -4715,11 +4758,6 @@ struct qla_hw_data {
struct fw_blob *hablob;
struct qla82xx_legacy_intr_set nx_legacy_intr;
- uint16_t gbl_dsd_inuse;
- uint16_t gbl_dsd_avail;
- struct list_head gbl_dsd_list;
-#define NUM_DSD_CHAIN 4096
-
uint8_t fw_type;
uint32_t file_prd_off; /* File firmware product offset */
@@ -4801,6 +4839,8 @@ struct qla_hw_data {
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
struct qla_vp_map *vp_map;
+ struct qla_nvme_fc_rjt lsrjt;
+ struct qla_fw_res fwres ____cacheline_aligned;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4833,6 +4873,7 @@ struct active_regions {
* is variable) starting at "iocb".
*/
struct purex_item {
+ void *purls_context;
struct list_head list;
struct scsi_qla_host *vha;
void (*process_item)(struct scsi_qla_host *vha,
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 1925cc6897b6..f060e593685d 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -276,6 +276,16 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
exch_used, ha->base_qpair->fwres.exch_limit);
+
+ if (ql2xenforce_iocb_limit == 2) {
+ iocbs_used = atomic_read(&ha->fwres.iocb_used);
+ exch_used = atomic_read(&ha->fwres.exch_used);
+ seq_printf(s, " estimate iocb2 used [%d] high water limit [%d]\n",
+ iocbs_used, ha->fwres.iocb_limit);
+
+ seq_printf(s, " estimate exchange2 used[%d] high water limit [%d] \n",
+ exch_used, ha->fwres.exch_limit);
+ }
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 816c0b9ecd0e..09cb9413670a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -603,7 +603,11 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id);
fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
-void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp);
+void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *,
+ void (*process_item)(struct scsi_qla_host *,
+ struct purex_item *));
+void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **);
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -666,9 +670,11 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *);
extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *);
extern int qla2x00_mailbox_passthru(struct bsg_job *bsg_job);
-int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, void **pkt,
- struct rsp_que **rsp, u8 *buf, u32 buf_len);
-
+int qla2x00_sys_ld_info(struct bsg_job *bsg_job);
+int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **,
+ struct rsp_que **, u8 *, u32);
+struct purex_item *qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha,
+ void **pkt, struct rsp_que **rsp, bool is_purls, bool byte_order);
int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in,
uint16_t *mbx_out);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 059175f2c8f5..82077edfda1f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2223,6 +2223,8 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
rval = QLA_FUNCTION_FAILED;
}
}
+ if (tm_iocb->u.tmf.data)
+ rval = tm_iocb->u.tmf.data;
done_free_sp:
/* ref: INIT */
@@ -4203,7 +4205,7 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
u8 i;
struct qla_hw_data *ha = vha->hw;
- __qla_adjust_iocb_limit(ha->base_qpair);
+ __qla_adjust_iocb_limit(ha->base_qpair);
ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;
@@ -4214,6 +4216,14 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
+
+ ha->fwres.iocb_total = ha->orig_fw_iocb_count;
+ ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ atomic_set(&ha->fwres.iocb_used, 0);
+ atomic_set(&ha->fwres.exch_used, 0);
}
void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
@@ -5554,6 +5564,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
+ INIT_LIST_HEAD(&fcport->unsol_ctx_head);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -5596,7 +5607,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha)
__be32 *q;
memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
ha->init_cb, sz);
if (rval != QLA_SUCCESS) {
@@ -7390,14 +7401,15 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
/* purge MBox commands */
- if (atomic_read(&ha->num_pend_mbx_stage3)) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
complete(&ha->mbx_intr_comp);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
i = 0;
- while (atomic_read(&ha->num_pend_mbx_stage3) ||
- atomic_read(&ha->num_pend_mbx_stage2) ||
+ while (atomic_read(&ha->num_pend_mbx_stage2) ||
atomic_read(&ha->num_pend_mbx_stage1)) {
msleep(20);
i++;
@@ -9643,6 +9655,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->vp_idx = vp_idx;
qpair->fw_started = ha->flags.fw_started;
INIT_LIST_HEAD(&qpair->hints_list);
+ INIT_LIST_HEAD(&qpair->dsd_list);
qpair->chip_reset = ha->base_qpair->chip_reset;
qpair->enable_class_2 = ha->base_qpair->enable_class_2;
qpair->enable_explicit_conf =
@@ -9771,6 +9784,19 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
if (ret != QLA_SUCCESS)
goto fail;
+ if (!list_empty(&qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
+ }
+ }
+
mutex_lock(&ha->mq_lock);
ha->queue_pair_map[qpair->id] = NULL;
clear_bit(qpair->id, ha->qpair_qid_map);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 0167e85ba058..0556969f6dc1 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -386,6 +386,7 @@ enum {
RESOURCE_IOCB = BIT_0,
RESOURCE_EXCH = BIT_1, /* exchange */
RESOURCE_FORCE = BIT_2,
+ RESOURCE_HA = BIT_3,
};
static inline int
@@ -393,7 +394,7 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
u16 exch_used;
- struct qla_hw_data *ha = qp->vha->hw;
+ struct qla_hw_data *ha = qp->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
@@ -428,15 +429,69 @@ qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
return -ENOSPC;
}
}
+
+ if (ql2xenforce_iocb_limit == 2) {
+ if ((iores->iocb_cnt + atomic_read(&ha->fwres.iocb_used)) >=
+ ha->fwres.iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if ((iores->exch_cnt + atomic_read(&ha->fwres.exch_used)) >=
+ ha->fwres.exch_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+ }
+
force:
qp->fwres.iocbs_used += iores->iocb_cnt;
qp->fwres.exch_used += iores->exch_cnt;
+ if (ql2xenforce_iocb_limit == 2) {
+ atomic_add(iores->iocb_cnt, &ha->fwres.iocb_used);
+ atomic_add(iores->exch_cnt, &ha->fwres.exch_used);
+ iores->res_type |= RESOURCE_HA;
+ }
return 0;
}
+/*
+ * decrement to zero. This routine will not decrement below zero
+ * @v: pointer of type atomic_t
+ * @amount: amount to decrement from v
+ */
+static void qla_atomic_dtz(atomic_t *v, int amount)
+{
+ int c, old, dec;
+
+ c = atomic_read(v);
+ for (;;) {
+ dec = c - amount;
+ if (unlikely(dec < 0))
+ dec = 0;
+
+ old = atomic_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+}
+
static inline void
qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
+ struct qla_hw_data *ha = qp->hw;
+
+ if (iores->res_type & RESOURCE_HA) {
+ if (iores->res_type & RESOURCE_IOCB)
+ qla_atomic_dtz(&ha->fwres.iocb_used, iores->iocb_cnt);
+
+ if (iores->res_type & RESOURCE_EXCH)
+ qla_atomic_dtz(&ha->fwres.exch_used, iores->exch_cnt);
+ }
+
if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1c6e300ed3ab..4450c0d000cc 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,6 +11,7 @@
#include <scsi/scsi_tcq.h>
+static int qla_start_scsi_type6(srb_t *sp);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @sp: SCSI command
@@ -590,8 +591,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint16_t tot_dsds)
{
struct dsd64 *cur_dsd = NULL, *next_dsd;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
struct scsi_cmnd *cmd;
struct scatterlist *cur_seg;
uint8_t avail_dsds;
@@ -613,9 +612,6 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->vha;
- ha = vha->hw;
-
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
@@ -636,14 +632,13 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
tot_dsds -= avail_dsds;
dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
+ dsd_ptr = list_first_entry(&qpair->dsd_list, struct dsd_dma, list);
next_dsd = dsd_ptr->dsd_addr;
list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
+ qpair->dsd_avail--;
list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
+ qpair->dsd_inuse++;
if (first_iocb) {
first_iocb = 0;
@@ -1722,6 +1717,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla24xx_start_scsi(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
/* Setup device pointers. */
@@ -2101,6 +2098,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
if (cmd->cmd_len <= 16)
return qla2xxx_start_scsi_mq(sp);
+ else
+ return qla_start_scsi_type6(sp);
}
spin_lock_irqsave(&qpair->qp_lock, flags);
@@ -3367,6 +3366,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ struct qla_qpair *qpair = sp->qpair;
/* Setup device pointers. */
reg = &ha->iobase->isp82;
@@ -3415,18 +3415,18 @@ qla82xx_start_scsi(srb_t *sp)
uint16_t i;
more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
ql_dbg(ql_dbg_io, vha, 0x300d,
"Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN,
cmd);
goto queuing_error;
}
- if (more_dsd_lists <= ha->gbl_dsd_avail)
+ if (more_dsd_lists <= qpair->dsd_avail)
goto sufficient_dsds;
else
- more_dsd_lists -= ha->gbl_dsd_avail;
+ more_dsd_lists -= qpair->dsd_avail;
for (i = 0; i < more_dsd_lists; i++) {
dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
@@ -3446,8 +3446,8 @@ qla82xx_start_scsi(srb_t *sp)
"for cmd=%p.\n", cmd);
goto queuing_error;
}
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
}
sufficient_dsds:
@@ -3766,21 +3766,28 @@ qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
nvme = &sp->u.iocb_cmd;
cmd_pkt->entry_type = PT_LS4_REQUEST;
cmd_pkt->entry_count = 1;
- cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
-
cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ if (sp->unsol_rsp) {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_RESPONDER << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = nvme->u.nvme.nport_handle;
+ cmd_pkt->exchange_address = nvme->u.nvme.exchange_address;
+ } else {
+ cmd_pkt->control_flags =
+ cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->rx_dseg_count = cpu_to_le16(1);
+ cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+ cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len;
+ put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
+ }
+
cmd_pkt->tx_dseg_count = cpu_to_le16(1);
- cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
- cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
+ cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+ cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len;
put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
-
- cmd_pkt->rx_dseg_count = cpu_to_le16(1);
- cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
- cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
- put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
}
static void
@@ -4197,3 +4204,267 @@ queuing_error:
return rval;
}
+
+/**
+ * qla_start_scsi_type6() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+static int
+qla_start_scsi_type6(srb_t *sp)
+{
+ int nseg;
+ unsigned long flags;
+ uint32_t *clr_ptr;
+ uint32_t handle;
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp;
+ struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair = sp->qpair;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+ __be32 *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+
+ /* Acquire qpair specific lock */
+ spin_lock_irqsave(&qpair->qp_lock, flags);
+
+ /* Setup qpair pointers */
+ req = qpair->req;
+ rsp = qpair->rsp;
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else {
+ nseg = 0;
+ }
+
+ tot_dsds = nseg;
+
+ /* eventhough driver only need 1 T6 IOCB, FW still convert DSD to Continueation IOCB */
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
+ goto queuing_error;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + qpair->dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x3028,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + qpair->dsd_inuse, NUM_DSD_CHAIN, cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= qpair->dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= qpair->dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x3029,
+ "Failed to allocate memory for dsd_dma for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ INIT_LIST_HEAD(&dsd_ptr->list);
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x302a,
+ "Failed to allocate memory for dsd_addr for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &qpair->dsd_list);
+ qpair->dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ if (IS_SHADOW_REG_CAPABLE(ha)) {
+ cnt = *req->out_ptr;
+ } else {
+ cnt = (uint16_t)rd_reg_dword_relaxed(req->req_q_out);
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ goto queuing_error;
+ }
+
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length - (req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+ }
+
+ ctx = &sp->u.scmd.ct6_ctx;
+
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3031,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if (cmd->cmd_len % 4 ||
+ cmd->cmd_len > QLA_CDB_BUF_SIZE) {
+ /*
+ * SCSI command bigger than 16 bytes must be
+ * multiple of 4 or too big.
+ */
+ ql_log(ql_log_warn, vha, 0x3033,
+ "scsi cmd len %d not multiple of 4 for cmd=%p.\n",
+ cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = make_handle(req->id, handle);
+
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number */
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds);
+
+ int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ put_unaligned_le64(ctx->fcp_cmnd_dma,
+ &cmd_pkt->fcp_cmnd_dseg_address);
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+
+ wmb();
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else {
+ req->ring_ptr++;
+ }
+
+ sp->qpair->cmd_cnt++;
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ wrt_reg_dword(req->req_q_in, req->ring_index);
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
+
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
+ }
+
+ spin_unlock_irqrestore(&qpair->qp_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1f42a413b598..e98788191897 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,6 +56,22 @@ const char *const port_state_str[] = {
[FCS_ONLINE] = "ONLINE"
};
+#define SFP_DISABLE_LASER_INITIATED 0x15 /* Sub code of 8070 AEN */
+#define SFP_ENABLE_LASER_INITIATED 0x16 /* Sub code of 8070 AEN */
+
+static inline void display_Laser_info(scsi_qla_host_t *vha,
+ u16 mb1, u16 mb2, u16 mb3) {
+
+ if (mb1 == SFP_DISABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a2,
+ "SFP temperature (%d C) reached/exceeded the threshold (%d C). Laser is disabled.\n",
+ mb3, mb2);
+ if (mb1 == SFP_ENABLE_LASER_INITIATED)
+ ql_log(ql_log_warn, vha, 0xf0a3,
+ "SFP temperature (%d C) reached normal operating level. Laser is enabled.\n",
+ mb3);
+}
+
static void
qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt)
{
@@ -823,6 +839,135 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
}
}
+/**
+ * qla27xx_copy_multiple_pkt() - Copy over purex/purls packets that can
+ * span over multiple IOCBs.
+ * @vha: SCSI driver HA context
+ * @pkt: ELS packet
+ * @rsp: Response queue
+ * @is_purls: True, for Unsolicited Received FC-NVMe LS rsp IOCB
+ * false, for Unsolicited Received ELS IOCB
+ * @byte_order: True, to change the byte ordering of iocb payload
+ */
+struct purex_item *
+qla27xx_copy_multiple_pkt(struct scsi_qla_host *vha, void **pkt,
+ struct rsp_que **rsp, bool is_purls,
+ bool byte_order)
+{
+ struct purex_entry_24xx *purex = NULL;
+ struct pt_ls4_rx_unsol *purls = NULL;
+ struct rsp_que *rsp_q = *rsp;
+ sts_cont_entry_t *new_pkt;
+ uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0;
+ uint16_t buffer_copy_offset = 0, payload_size = 0;
+ uint16_t entry_count, entry_count_remaining;
+ struct purex_item *item;
+ void *iocb_pkt = NULL;
+
+ if (is_purls) {
+ purls = *pkt;
+ total_bytes = (le16_to_cpu(purls->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purls->entry_count;
+ payload_size = sizeof(purls->payload);
+ } else {
+ purex = *pkt;
+ total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) -
+ PURX_ELS_HEADER_SIZE;
+ entry_count = entry_count_remaining = purex->entry_count;
+ payload_size = sizeof(purex->els_frame_payload);
+ }
+
+ pending_bytes = total_bytes;
+ no_bytes = (pending_bytes > payload_size) ? payload_size :
+ pending_bytes;
+ ql_dbg(ql_dbg_async, vha, 0x509a,
+ "%s LS, frame_size 0x%x, entry count %d\n",
+ (is_purls ? "PURLS" : "FPIN"), total_bytes, entry_count);
+
+ item = qla24xx_alloc_purex_item(vha, total_bytes);
+ if (!item)
+ return item;
+
+ iocb_pkt = &item->iocb;
+
+ if (is_purls)
+ memcpy(iocb_pkt, &purls->payload[0], no_bytes);
+ else
+ memcpy(iocb_pkt, &purex->els_frame_payload[0], no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+
+ if (is_purls)
+ ((response_t *)purls)->signature = RESPONSE_PROCESSED;
+ else
+ ((response_t *)purex)->signature = RESPONSE_PROCESSED;
+ wmb();
+
+ do {
+ while ((total_bytes > 0) && (entry_count_remaining > 0)) {
+ if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) {
+ ql_dbg(ql_dbg_async, vha, 0x5084,
+ "Ran out of IOCBs, partial data 0x%x\n",
+ buffer_copy_offset);
+ cpu_relax();
+ continue;
+ }
+
+ new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr;
+ *pkt = new_pkt;
+
+ if (new_pkt->entry_type != STATUS_CONT_TYPE) {
+ ql_log(ql_log_warn, vha, 0x507a,
+ "Unexpected IOCB type, partial data 0x%x\n",
+ buffer_copy_offset);
+ break;
+ }
+
+ rsp_q->ring_index++;
+ if (rsp_q->ring_index == rsp_q->length) {
+ rsp_q->ring_index = 0;
+ rsp_q->ring_ptr = rsp_q->ring;
+ } else {
+ rsp_q->ring_ptr++;
+ }
+ no_bytes = (pending_bytes > sizeof(new_pkt->data)) ?
+ sizeof(new_pkt->data) : pending_bytes;
+ if ((buffer_copy_offset + no_bytes) <= total_bytes) {
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data, no_bytes);
+ buffer_copy_offset += no_bytes;
+ pending_bytes -= no_bytes;
+ --entry_count_remaining;
+ } else {
+ ql_log(ql_log_warn, vha, 0x5044,
+ "Attempt to copy more that we got, optimizing..%x\n",
+ buffer_copy_offset);
+ memcpy(((uint8_t *)iocb_pkt + buffer_copy_offset),
+ new_pkt->data,
+ total_bytes - buffer_copy_offset);
+ }
+
+ ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED;
+ wmb();
+ }
+
+ if (pending_bytes != 0 || entry_count_remaining != 0) {
+ ql_log(ql_log_fatal, vha, 0x508b,
+ "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n",
+ total_bytes, entry_count_remaining);
+ qla24xx_free_purex_item(item);
+ return NULL;
+ }
+ } while (entry_count_remaining > 0);
+
+ if (byte_order)
+ host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes);
+
+ return item;
+}
+
int
qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
{
@@ -958,7 +1103,7 @@ initialize_purex_header:
return item;
}
-static void
+void
qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
void (*process_item)(struct scsi_qla_host *vha,
struct purex_item *pkt))
@@ -1798,6 +1943,8 @@ global_port_update:
break;
case MBA_TEMPERATURE_ALERT:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ display_Laser_info(vha, mb[1], mb[2], mb[3]);
ql_dbg(ql_dbg_async, vha, 0x505e,
"TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
break;
@@ -3811,6 +3958,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
struct purex_entry_24xx *purex_entry;
struct purex_item *pure_item;
+ struct pt_ls4_rx_unsol *p;
u16 rsp_in = 0, cur_ring_index;
int is_shadow_hba;
@@ -3983,7 +4131,19 @@ process_err:
qla28xx_sa_update_iocb_entry(vha, rsp->req,
(struct sa_update_28xx *)pkt);
break;
-
+ case PT_LS4_UNSOL:
+ p = (void *)pkt;
+ if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) {
+ rsp->ring_ptr = (response_t *)pkt;
+ rsp->ring_index = cur_ring_index;
+
+ ql_dbg(ql_dbg_init, vha, 0x2124,
+ "Defer processing UNSOL LS req opcode %#x...\n",
+ p->payload[0]);
+ return;
+ }
+ qla2xxx_process_purls_iocb((void **)&pkt, &rsp);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b05f93037875..21ec32b4fb28 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -273,7 +273,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
wait_time = jiffies;
- atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
ql_dbg(ql_dbg_mbx, vha, 0x117a,
@@ -290,7 +289,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
@@ -302,11 +300,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
- atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
goto premature_exit;
}
- atomic_dec(&ha->num_pend_mbx_stage3);
if (time_after(jiffies, wait_time + 5 * HZ))
ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 9941b38eac93..62a67662cbf3 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -12,6 +12,26 @@
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
+static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
+ struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a,
+ bool is_xchg_terminate);
+
+struct qla_nvme_unsol_ctx {
+ struct list_head elem;
+ struct scsi_qla_host *vha;
+ struct fc_port *fcport;
+ struct srb *sp;
+ struct nvmefc_ls_rsp lsrsp;
+ struct nvmefc_ls_rsp *fd_rsp;
+ struct work_struct lsrsp_work;
+ struct work_struct abort_work;
+ __le32 exchange_address;
+ __le16 nport_handle;
+ __le16 ox_id;
+ int comp_status;
+ spinlock_t cmd_lock;
+};
int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
{
@@ -216,6 +236,55 @@ static void qla_nvme_sp_ls_done(srb_t *sp, int res)
schedule_work(&priv->ls_work);
}
+static void qla_nvme_release_lsrsp_cmd_kref(struct kref *kref)
+{
+ struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+ struct nvmefc_ls_rsp *fd_rsp;
+ unsigned long flags;
+
+ if (!uctx) {
+ qla2x00_rel_sp(sp);
+ return;
+ }
+
+ spin_lock_irqsave(&uctx->cmd_lock, flags);
+ uctx->sp = NULL;
+ sp->priv = NULL;
+ spin_unlock_irqrestore(&uctx->cmd_lock, flags);
+
+ fd_rsp = uctx->fd_rsp;
+
+ list_del(&uctx->elem);
+
+ fd_rsp->done(fd_rsp);
+ kfree(uctx);
+ qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_lsrsp_complete(struct work_struct *work)
+{
+ struct qla_nvme_unsol_ctx *uctx =
+ container_of(work, struct qla_nvme_unsol_ctx, lsrsp_work);
+
+ kref_put(&uctx->sp->cmd_kref, qla_nvme_release_lsrsp_cmd_kref);
+}
+
+static void qla_nvme_sp_lsrsp_done(srb_t *sp, int res)
+{
+ struct qla_nvme_unsol_ctx *uctx = sp->priv;
+
+ if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
+ return;
+
+ if (res)
+ res = -EINVAL;
+
+ uctx->comp_status = res;
+ INIT_WORK(&uctx->lsrsp_work, qla_nvme_lsrsp_complete);
+ schedule_work(&uctx->lsrsp_work);
+}
+
/* it assumed that QPair lock is held. */
static void qla_nvme_sp_done(srb_t *sp, int res)
{
@@ -288,6 +357,92 @@ out:
kref_put(&sp->cmd_kref, sp->put_fn);
}
+static int qla_nvme_xmt_ls_rsp(struct nvme_fc_local_port *lport,
+ struct nvme_fc_remote_port *rport,
+ struct nvmefc_ls_rsp *fd_resp)
+{
+ struct qla_nvme_unsol_ctx *uctx = container_of(fd_resp,
+ struct qla_nvme_unsol_ctx, lsrsp);
+ struct qla_nvme_rport *qla_rport = rport->private;
+ fc_port_t *fcport = qla_rport->fcport;
+ struct scsi_qla_host *vha = uctx->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct srb_iocb *nvme;
+ srb_t *sp;
+ int rval = QLA_FUNCTION_FAILED;
+ uint8_t cnt = 0;
+
+ if (!fcport || fcport->deleted)
+ goto out;
+
+ if (!ha->flags.fw_started)
+ goto out;
+
+ /* Alloc SRB structure */
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto out;
+
+ sp->type = SRB_NVME_LS;
+ sp->name = "nvme_ls";
+ sp->done = qla_nvme_sp_lsrsp_done;
+ sp->put_fn = qla_nvme_release_lsrsp_cmd_kref;
+ sp->priv = (void *)uctx;
+ sp->unsol_rsp = 1;
+ uctx->sp = sp;
+ spin_lock_init(&uctx->cmd_lock);
+ nvme = &sp->u.iocb_cmd;
+ uctx->fd_rsp = fd_resp;
+ nvme->u.nvme.desc = fd_resp;
+ nvme->u.nvme.dir = 0;
+ nvme->u.nvme.dl = 0;
+ nvme->u.nvme.timeout_sec = 0;
+ nvme->u.nvme.cmd_dma = fd_resp->rspdma;
+ nvme->u.nvme.cmd_len = fd_resp->rsplen;
+ nvme->u.nvme.rsp_len = 0;
+ nvme->u.nvme.rsp_dma = 0;
+ nvme->u.nvme.exchange_address = uctx->exchange_address;
+ nvme->u.nvme.nport_handle = uctx->nport_handle;
+ nvme->u.nvme.ox_id = uctx->ox_id;
+ dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+ le32_to_cpu(fd_resp->rsplen), DMA_TO_DEVICE);
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2122,
+ "Unsol lsreq portid=%06x %8phC exchange_address 0x%x ox_id 0x%x hdl 0x%x\n",
+ fcport->d_id.b24, fcport->port_name, uctx->exchange_address,
+ uctx->ox_id, uctx->nport_handle);
+retry:
+ rval = qla2x00_start_sp(sp);
+ switch (rval) {
+ case QLA_SUCCESS:
+ break;
+ case EAGAIN:
+ msleep(PURLS_MSLEEP_INTERVAL);
+ cnt++;
+ if (cnt < PURLS_RETRY_COUNT)
+ goto retry;
+
+ fallthrough;
+ default:
+ ql_dbg(ql_log_warn, vha, 0x2123,
+ "Failed to xmit Unsol ls response = %d\n", rval);
+ rval = -EIO;
+ qla2x00_rel_sp(sp);
+ goto out;
+ }
+
+ return 0;
+out:
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
+ kfree(uctx);
+ return rval;
+}
+
static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
{
@@ -355,7 +510,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.timeout_sec = fd->timeout;
nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
+ le32_to_cpu(fd->rqstlen), DMA_TO_DEVICE);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -720,6 +875,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .xmt_ls_rsp = qla_nvme_xmt_ls_rsp,
.map_queues = qla_nvme_map_queues,
.max_hw_queues = DEF_NVME_HW_QUEUES,
.max_sgl_segments = 1024,
@@ -924,3 +1080,248 @@ inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
return;
kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
}
+
+static void qla_nvme_fc_format_rjt(void *buf, u8 ls_cmd, u8 reason,
+ u8 explanation, u8 vendor)
+{
+ struct fcnvme_ls_rjt *rjt = buf;
+
+ rjt->w0.ls_cmd = FCNVME_LSDESC_RQST;
+ rjt->desc_list_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt));
+ rjt->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
+ rjt->rqst.desc_len =
+ fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
+ rjt->rqst.w0.ls_cmd = ls_cmd;
+ rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
+ rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
+ rjt->rjt.reason_code = reason;
+ rjt->rjt.reason_explanation = explanation;
+ rjt->rjt.vendor = vendor;
+}
+
+static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
+ struct pt_ls4_request *lsrjt_iocb,
+ struct qla_nvme_lsrjt_pt_arg *a)
+{
+ lsrjt_iocb->entry_type = PT_LS4_REQUEST;
+ lsrjt_iocb->entry_count = 1;
+ lsrjt_iocb->sys_define = 0;
+ lsrjt_iocb->entry_status = 0;
+ lsrjt_iocb->handle = QLA_SKIP_HANDLE;
+ lsrjt_iocb->nport_handle = a->nport_handle;
+ lsrjt_iocb->exchange_address = a->xchg_address;
+ lsrjt_iocb->vp_index = a->vp_idx;
+
+ lsrjt_iocb->control_flags = cpu_to_le16(a->control_flags);
+
+ put_unaligned_le64(a->tx_addr, &lsrjt_iocb->dsd[0].address);
+ lsrjt_iocb->dsd[0].length = cpu_to_le32(a->tx_byte_count);
+ lsrjt_iocb->tx_dseg_count = cpu_to_le16(1);
+ lsrjt_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
+
+ put_unaligned_le64(a->rx_addr, &lsrjt_iocb->dsd[1].address);
+ lsrjt_iocb->dsd[1].length = 0;
+ lsrjt_iocb->rx_dseg_count = 0;
+ lsrjt_iocb->rx_byte_count = 0;
+}
+
+static int
+qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
+ struct qla_nvme_lsrjt_pt_arg *a, bool is_xchg_terminate)
+{
+ struct pt_ls4_request *lsrjt_iocb;
+
+ lsrjt_iocb = __qla2x00_alloc_iocbs(qp, NULL);
+ if (!lsrjt_iocb) {
+ ql_log(ql_log_warn, vha, 0x210e,
+ "qla2x00_alloc_iocbs failed.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (!is_xchg_terminate) {
+ qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
+ a->reason, a->explanation, 0);
+
+ a->tx_byte_count = sizeof(struct fcnvme_ls_rjt);
+ a->tx_addr = vha->hw->lsrjt.cdma;
+ a->control_flags = CF_LS4_RESPONDER << CF_LS4_SHIFT;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x211f,
+ "Sending nvme fc ls reject ox_id %04x op %04x\n",
+ a->ox_id, a->opcode);
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
+ vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
+ } else {
+ a->tx_byte_count = 0;
+ a->control_flags = CF_LS4_RESPONDER_TERM << CF_LS4_SHIFT;
+ ql_dbg(ql_dbg_unsol, vha, 0x2110,
+ "Terminate nvme ls xchg 0x%x\n", a->xchg_address);
+ }
+
+ qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
+ /* flush iocb to mem before notifying hw doorbell */
+ wmb();
+ qla2x00_start_iocbs(vha, qp->req);
+ return 0;
+}
+
+/*
+ * qla2xxx_process_purls_pkt() - Pass-up Unsolicited
+ * Received FC-NVMe Link Service pkt to nvme_fc_rcv_ls_req().
+ * LLDD need to provide memory for response buffer, which
+ * will be used to reference the exchange corresponding
+ * to the LS when issuing an ls response. LLDD will have to free
+ * response buffer in lport->ops->xmt_ls_rsp().
+ *
+ * @vha: SCSI qla host
+ * @item: ptr to purex_item
+ */
+static void
+qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
+{
+ struct qla_nvme_unsol_ctx *uctx = item->purls_context;
+ fc_port_t *fcport = uctx->fcport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ int ret = 1;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ ret = nvme_fc_rcv_ls_req(fcport->nvme_remote_port, &uctx->lsrsp,
+ &item->iocb, item->size);
+#endif
+ if (ret) {
+ ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe tranport ls_req failed\n");
+ memset((void *)&a, 0, sizeof(a));
+ a.vp_idx = vha->vp_idx;
+ a.nport_handle = uctx->nport_handle;
+ a.xchg_address = uctx->exchange_address;
+ qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
+ list_del(&uctx->elem);
+ kfree(uctx);
+ }
+}
+
+static scsi_qla_host_t *
+qla2xxx_get_vha_from_vp_idx(struct qla_hw_data *ha, uint16_t vp_index)
+{
+ scsi_qla_host_t *base_vha, *vha, *tvp;
+ unsigned long flags;
+
+ base_vha = pci_get_drvdata(ha->pdev);
+
+ if (!vp_index && !ha->num_vhosts)
+ return base_vha;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
+ if (vha->vp_idx == vp_index) {
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ return vha;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return NULL;
+}
+
+void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
+{
+ struct nvme_fc_remote_port *rport;
+ struct qla_nvme_rport *qla_rport;
+ struct qla_nvme_lsrjt_pt_arg a;
+ struct pt_ls4_rx_unsol *p = *pkt;
+ struct qla_nvme_unsol_ctx *uctx;
+ struct rsp_que *rsp_q = *rsp;
+ struct qla_hw_data *ha;
+ scsi_qla_host_t *vha;
+ fc_port_t *fcport = NULL;
+ struct purex_item *item;
+ port_id_t d_id = {0};
+ port_id_t id = {0};
+ u8 *opcode;
+ bool xmt_reject = false;
+
+ ha = rsp_q->hw;
+
+ vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
+ if (!vha) {
+ ql_log(ql_log_warn, NULL, 0x2110, "Invalid vp index %d\n", p->vp_index);
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ memset((void *)&a, 0, sizeof(a));
+ opcode = (u8 *)&p->payload[0];
+ a.opcode = opcode[3];
+ a.vp_idx = p->vp_index;
+ a.nport_handle = p->nport_handle;
+ a.ox_id = p->ox_id;
+ a.xchg_address = p->exchange_address;
+
+ id.b.domain = p->s_id.domain;
+ id.b.area = p->s_id.area;
+ id.b.al_pa = p->s_id.al_pa;
+ d_id.b.domain = p->d_id[2];
+ d_id.b.area = p->d_id[1];
+ d_id.b.al_pa = p->d_id[0];
+
+ fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
+ if (!fcport) {
+ ql_dbg(ql_dbg_unsol, vha, 0x211e,
+ "Failed to find sid=%06x did=%06x\n",
+ id.b24, d_id.b24);
+ a.reason = FCNVME_RJT_RC_INV_ASSOC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+ rport = fcport->nvme_remote_port;
+ qla_rport = rport->private;
+
+ item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
+ if (!item) {
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ goto out;
+ }
+
+ uctx = kzalloc(sizeof(*uctx), GFP_ATOMIC);
+ if (!uctx) {
+ ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
+ a.reason = FCNVME_RJT_RC_LOGIC;
+ a.explanation = FCNVME_RJT_EXP_NONE;
+ xmt_reject = true;
+ kfree(item);
+ goto out;
+ }
+
+ uctx->vha = vha;
+ uctx->fcport = fcport;
+ uctx->exchange_address = p->exchange_address;
+ uctx->nport_handle = p->nport_handle;
+ uctx->ox_id = p->ox_id;
+ qla_rport->uctx = uctx;
+ INIT_LIST_HEAD(&uctx->elem);
+ list_add_tail(&uctx->elem, &fcport->unsol_ctx_head);
+ item->purls_context = (void *)uctx;
+
+ ql_dbg(ql_dbg_unsol, vha, 0x2121,
+ "PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
+ item->iocb.iocb[3], item->size, uctx->exchange_address,
+ fcport->d_id.b24);
+ /* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * ----- -----------------------------------------------
+ * 0000: 00 00 00 05 28 00 00 00 07 00 00 00 08 00 00 00
+ * 0010: ab ec 0f cc 00 00 8d 7d 05 00 00 00 10 00 00 00
+ * 0020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ */
+ ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
+ &item->iocb, item->size);
+
+ qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
+out:
+ if (xmt_reject) {
+ qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
+ __qla_consume_iocb(vha, pkt, rsp);
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index d299478371b2..a253ac55171b 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -21,6 +21,7 @@
#define Q2T_NVME_NUM_TAGS 2048
#define QLA_MAX_FC_SEGMENTS 64
+struct qla_nvme_unsol_ctx;
struct scsi_qla_host;
struct qla_hw_data;
struct req_que;
@@ -37,6 +38,7 @@ struct nvme_private {
struct qla_nvme_rport {
struct fc_port *fcport;
+ struct qla_nvme_unsol_ctx *uctx;
};
#define COMMAND_NVME 0x88 /* Command Type FC-NVMe IOCB */
@@ -75,6 +77,9 @@ struct cmd_nvme {
struct dsd64 nvme_dsd;
};
+#define PURLS_MSLEEP_INTERVAL 1
+#define PURLS_RETRY_COUNT 5
+
#define PT_LS4_REQUEST 0x89 /* Link Service pass-through IOCB (request) */
struct pt_ls4_request {
uint8_t entry_type;
@@ -118,21 +123,19 @@ struct pt_ls4_rx_unsol {
__le32 exchange_address;
uint8_t d_id[3];
uint8_t r_ctl;
- be_id_t s_id;
+ le_id_t s_id;
uint8_t cs_ctl;
uint8_t f_ctl[3];
uint8_t type;
__le16 seq_cnt;
uint8_t df_ctl;
uint8_t seq_id;
- __le16 rx_id;
- __le16 ox_id;
- __le32 param;
- __le32 desc0;
+ __le16 rx_id;
+ __le16 ox_id;
+ __le32 desc0;
#define PT_LS4_PAYLOAD_OFFSET 0x2c
#define PT_LS4_FIRST_PACKET_LEN 20
- __le32 desc_len;
- __le32 payload[3];
+ __le32 payload[5];
};
/*
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6dc80c8ddf79..5d1bdc15b75c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -857,7 +857,9 @@ struct fcp_cmnd {
uint8_t task_attribute;
uint8_t task_management;
uint8_t additional_cdb_len;
- uint8_t cdb[260]; /* 256 for CDB len and 4 for FCP_DL */
+#define QLA_CDB_BUF_SIZE 256
+#define QLA_FCP_DL_SIZE 4
+ uint8_t cdb[QLA_CDB_BUF_SIZE + QLA_FCP_DL_SIZE]; /* 256 for CDB len and 4 for FCP_DL */
};
struct dsd_dma {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 03bc3a0b45b6..50db08265c51 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -44,10 +44,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
-int ql2xenforce_iocb_limit = 1;
+int ql2xenforce_iocb_limit = 2;
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
- "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 2) "
+ "1: track usage per queue, 2: track usage per adapter");
/*
* CT6 CTX allocation cache
@@ -432,6 +433,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
+ INIT_LIST_HEAD(&ha->base_qpair->dsd_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
qla_cpu_update(rsp->qpair, raw_smp_processor_id());
@@ -750,9 +752,9 @@ void qla2x00_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
}
if (sp->flags & SRB_GOT_BUF)
@@ -836,9 +838,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
- list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
- ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
- ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
+ list_splice(&ctx1->dsd_list, &sp->qpair->dsd_list);
+ sp->qpair->dsd_inuse -= ctx1->dsd_use_cnt;
+ sp->qpair->dsd_avail += ctx1->dsd_use_cnt;
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -3007,7 +3009,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->max_exchg = FW_MAX_EXCHANGES_CNT;
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
- atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
INIT_LIST_HEAD(&ha->tmf_pending);
@@ -3288,6 +3289,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_id = ha->max_fibre_devices;
host->cmd_per_lun = 3;
host->unique_id = host->host_no;
+
+ if (ql2xenabledif && ql2xenabledif != 2) {
+ ql_log(ql_log_warn, base_vha, 0x302d,
+ "Invalid value for ql2xenabledif, resetting it to default (2)\n");
+ ql2xenabledif = 2;
+ }
+
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
host->max_cmd_len = 32;
else
@@ -3524,8 +3532,6 @@ skip_dpc:
base_vha->flags.difdix_supported = 1;
ql_dbg(ql_dbg_init, base_vha, 0x00f1,
"Registering for DIF/DIX type 1 and 3 protection.\n");
- if (ql2xenabledif == 1)
- prot = SHOST_DIX_TYPE0_PROTECTION;
if (ql2xprotmask)
scsi_host_set_prot(host, ql2xprotmask);
else
@@ -4402,7 +4408,6 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
- INIT_LIST_HEAD(&ha->gbl_dsd_list);
/* Get consistent memory allocated for Async Port-Database. */
if (!IS_FWI2_CAPABLE(ha)) {
@@ -4457,8 +4462,9 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16;
ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev,
- ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL);
-
+ ha->elsrej.size,
+ &ha->elsrej.cdma,
+ GFP_KERNEL);
if (!ha->elsrej.c) {
ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
"Alloc failed for els reject cmd.\n");
@@ -4467,8 +4473,21 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->elsrej.c->er_cmd = ELS_LS_RJT;
ha->elsrej.c->er_reason = ELS_RJT_LOGIC;
ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA;
+
+ ha->lsrjt.size = sizeof(struct fcnvme_ls_rjt);
+ ha->lsrjt.c = dma_alloc_coherent(&ha->pdev->dev, ha->lsrjt.size,
+ &ha->lsrjt.cdma, GFP_KERNEL);
+ if (!ha->lsrjt.c) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff,
+ "Alloc failed for nvme fc reject cmd.\n");
+ goto fail_lsrjt;
+ }
+
return 0;
+fail_lsrjt:
+ dma_free_coherent(&ha->pdev->dev, ha->elsrej.size,
+ ha->elsrej.c, ha->elsrej.cdma);
fail_elsrej:
dma_pool_destroy(ha->purex_dma_pool);
fail_flt:
@@ -4934,18 +4953,16 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (IS_QLA82XX(ha)) {
- if (!list_empty(&ha->gbl_dsd_list)) {
- struct dsd_dma *dsd_ptr, *tdsd_ptr;
-
- /* clean up allocated prev pool */
- list_for_each_entry_safe(dsd_ptr,
- tdsd_ptr, &ha->gbl_dsd_list, list) {
- dma_pool_free(ha->dl_dma_pool,
- dsd_ptr->dsd_addr, dsd_ptr->dsd_list_dma);
- list_del(&dsd_ptr->list);
- kfree(dsd_ptr);
- }
+ if (!list_empty(&ha->base_qpair->dsd_list)) {
+ struct dsd_dma *dsd_ptr, *tdsd_ptr;
+
+ /* clean up allocated prev pool */
+ list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
+ &ha->base_qpair->dsd_list, list) {
+ dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
+ dsd_ptr->dsd_list_dma);
+ list_del(&dsd_ptr->list);
+ kfree(dsd_ptr);
}
}
@@ -5000,6 +5017,12 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->elsrej.c = NULL;
}
+ if (ha->lsrjt.c) {
+ dma_free_coherent(&ha->pdev->dev, ha->lsrjt.size, ha->lsrjt.c,
+ ha->lsrjt.cdma);
+ ha->lsrjt.c = NULL;
+ }
+
ha->init_cb = NULL;
ha->init_cb_dma = 0;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 81bdf6b03241..d903563e969e 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.500-k"
+#define QLA2XXX_VERSION "10.02.09.100-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 500
+#define QLA_DRIVER_PATCH_VER 9
+#define QLA_DRIVER_BETA_VER 100