summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_nvme.c
diff options
context:
space:
mode:
authorJames Smart <jsmart2021@gmail.com>2019-01-28 11:14:28 -0800
committerMartin K. Petersen <martin.petersen@oracle.com>2019-02-05 22:29:09 -0500
commitc490850a094794e7515737a6939146966c826577 (patch)
tree7501cd7a6d7150695d0dd1914a8c72ce843de194 /drivers/scsi/lpfc/lpfc_nvme.c
parentace44e48b1fc36055a8ff05a778110e726387032 (diff)
downloadlinux-c490850a094794e7515737a6939146966c826577.tar.gz
linux-c490850a094794e7515737a6939146966c826577.tar.bz2
linux-c490850a094794e7515737a6939146966c826577.zip
scsi: lpfc: Adapt partitioned XRI lists to efficient sharing
The XRI get/put lists were partitioned per hardware queue. However, the adapter rarely had sufficient resources to give a large number of resources per queue. As such, it became common for a cpu to encounter a lack of XRI resource and request the upper io stack to retry after returning a BUSY condition. This occurred even though other cpus were idle and not using their resources. Create as efficient a scheme as possible to move resources to the cpus that need them. Each cpu maintains a small private pool which it allocates from for io. There is a watermark that the cpu attempts to keep in the private pool. The private pool, when empty, pulls from a global pool from the cpu. When the cpu's global pool is empty it will pull from other cpu's global pool. As there many cpu global pools (1 per cpu or hardware queue count) and as each cpu selects what cpu to pull from at different rates and at different times, it creates a radomizing effect that minimizes the number of cpu's that will contend with each other when the steal XRI's from another cpu's global pool. On io completion, a cpu will push the XRI back on to its private pool. A watermark level is maintained for the private pool such that when it is exceeded it will move XRI's to the CPU global pool so that other cpu's may allocate them. On NVME, as heartbeat commands are critical to get placed on the wire, a single expedite pool is maintained. When a heartbeat is to be sent, it will allocate an XRI from the expedite pool rather than the normal cpu private/global pools. On any io completion, if a reduction in the expedite pools is seen, it will be replenished before the XRI is placed on the cpu private pool. Statistics are added to aid understanding the XRI levels on each cpu and their behaviors. Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_nvme.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c91
1 files changed, 26 insertions, 65 deletions
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index f1f697cd7e97..0c6c91d39e2f 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -56,12 +56,12 @@
/* NVME initiator-based functions */
-static struct lpfc_nvme_buf *
+static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
int idx, int expedite);
static void
-lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
static struct nvme_fc_port_template lpfc_nvme_template;
@@ -760,7 +760,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
/* Fix up the existing sgls for NVME IO. */
static inline void
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd,
+ struct lpfc_io_buf *lpfc_ncmd,
struct nvmefc_fcp_req *nCmd)
{
struct lpfc_hba *phba = vport->phba;
@@ -857,7 +857,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
static void
lpfc_nvme_ktime(struct lpfc_hba *phba,
- struct lpfc_nvme_buf *lpfc_ncmd)
+ struct lpfc_io_buf *lpfc_ncmd)
{
uint64_t seg1, seg2, seg3, seg4;
uint64_t segsum;
@@ -955,8 +955,8 @@ static void
lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
struct lpfc_wcqe_complete *wcqe)
{
- struct lpfc_nvme_buf *lpfc_ncmd =
- (struct lpfc_nvme_buf *)pwqeIn->context1;
+ struct lpfc_io_buf *lpfc_ncmd =
+ (struct lpfc_io_buf *)pwqeIn->context1;
struct lpfc_vport *vport = pwqeIn->vport;
struct nvmefc_fcp_req *nCmd;
struct nvme_fc_ersp_iu *ep;
@@ -1181,7 +1181,7 @@ out_err:
**/
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd,
+ struct lpfc_io_buf *lpfc_ncmd,
struct lpfc_nodelist *pnode,
struct lpfc_fc4_ctrl_stat *cstat)
{
@@ -1287,7 +1287,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
**/
static int
lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
- struct lpfc_nvme_buf *lpfc_ncmd)
+ struct lpfc_io_buf *lpfc_ncmd)
{
struct lpfc_hba *phba = vport->phba;
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
@@ -1428,7 +1428,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_vport *vport;
struct lpfc_hba *phba;
struct lpfc_nodelist *ndlp;
- struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd;
struct lpfc_nvme_rport *rport;
struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1616,6 +1616,9 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
goto out_free_nvme_buf;
}
+ if (phba->cfg_xri_rebalancing)
+ lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
@@ -1704,7 +1707,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_lport *lport;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- struct lpfc_nvme_buf *lpfc_nbuf;
+ struct lpfc_io_buf *lpfc_nbuf;
struct lpfc_iocbq *abts_buf;
struct lpfc_iocbq *nvmereq_wqe;
struct lpfc_nvme_fcpreq_priv *freqpriv;
@@ -1911,22 +1914,6 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
};
-static inline struct lpfc_nvme_buf *
-lpfc_nvme_buf(struct lpfc_hba *phba, int idx)
-{
- struct lpfc_sli4_hdw_queue *qp;
- struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
-
- qp = &phba->sli4_hba.hdwq[idx];
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
- &qp->lpfc_io_buf_list_get, list) {
- list_del_init(&lpfc_ncmd->list);
- qp->get_io_bufs--;
- return lpfc_ncmd;
- }
- return NULL;
-}
-
/**
* lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
* @phba: The HBA for which this call is being executed.
@@ -1938,34 +1925,17 @@ lpfc_nvme_buf(struct lpfc_hba *phba, int idx)
* NULL - Error
* Pointer to lpfc_nvme_buf - Success
**/
-static struct lpfc_nvme_buf *
+static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
int idx, int expedite)
{
- struct lpfc_nvme_buf *lpfc_ncmd = NULL;
+ struct lpfc_io_buf *lpfc_ncmd;
struct lpfc_sli4_hdw_queue *qp;
struct sli4_sge *sgl;
struct lpfc_iocbq *pwqeq;
union lpfc_wqe128 *wqe;
- unsigned long iflag = 0;
- qp = &phba->sli4_hba.hdwq[idx];
- spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag);
- if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
- lpfc_ncmd = lpfc_nvme_buf(phba, idx);
- if (!lpfc_ncmd) {
- spin_lock(&qp->io_buf_list_put_lock);
- list_splice(&qp->lpfc_io_buf_list_put,
- &qp->lpfc_io_buf_list_get);
- qp->get_io_bufs += qp->put_io_bufs;
- INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
- qp->put_io_bufs = 0;
- spin_unlock(&qp->io_buf_list_put_lock);
- if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
- expedite)
- lpfc_ncmd = lpfc_nvme_buf(phba, idx);
- }
- spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
+ lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
if (lpfc_ncmd) {
pwqeq = &(lpfc_ncmd->cur_iocbq);
@@ -1978,8 +1948,6 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
- lpfc_ncmd->hdwq = qp;
- lpfc_ncmd->hdwq_no = idx;
/* Rsp SGE will be filled in when we rcv an IO
* from the NVME Layer to be sent.
@@ -1996,11 +1964,13 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
- lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
+ lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
- } else
+ } else {
+ qp = &phba->sli4_hba.hdwq[idx];
qp->empty_io_bufs++;
+ }
return lpfc_ncmd;
}
@@ -2016,16 +1986,16 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
* aborted.
**/
static void
-lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
{
struct lpfc_sli4_hdw_queue *qp;
unsigned long iflag = 0;
- if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
+ if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
lpfc_ncmd->ndlp = NULL;
- lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
qp = lpfc_ncmd->hdwq;
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
@@ -2040,17 +2010,8 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
&qp->lpfc_abts_nvme_buf_list);
qp->abts_nvme_io_bufs++;
spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
- } else {
- /* MUST zero fields if buffer is reused by another protocol */
- lpfc_ncmd->nvmeCmd = NULL;
- lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
-
- spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
- list_add_tail(&lpfc_ncmd->list,
- &qp->lpfc_io_buf_list_put);
- qp->put_io_bufs++;
- spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
- }
+ } else
+ lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
}
/**
@@ -2510,7 +2471,7 @@ lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
struct sli4_wcqe_xri_aborted *axri, int idx)
{
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
- struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
struct nvmefc_fcp_req *nvme_cmd = NULL;
struct lpfc_nodelist *ndlp;
struct lpfc_sli4_hdw_queue *qp;