summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_uc.c
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2016-02-14 12:10:04 -0800
committerDoug Ledford <dledford@redhat.com>2016-03-10 20:38:07 -0500
commit46a80d62e6e0ccfc9d8a05c5b773405b84a4afd7 (patch)
tree077ea38ac2f7fd75c1334deadcc141ad6753a009 /drivers/infiniband/hw/qib/qib_uc.c
parent20f333b61300fa658952713ca9b8b4b72bbaed9f (diff)
downloadlinux-stable-46a80d62e6e0ccfc9d8a05c5b773405b84a4afd7.tar.gz
linux-stable-46a80d62e6e0ccfc9d8a05c5b773405b84a4afd7.tar.bz2
linux-stable-46a80d62e6e0ccfc9d8a05c5b773405b84a4afd7.zip
IB/qib, staging/rdma/hfi1: add s_hlock for use in post send
This patch adds an additional lock to reduce contention on the s_lock. This lock is used in post_send() so that the post_send is not serialized with the send engine and other send related processing. To do this the s_next_psn is now maintained on post_send() while post_send() related fields are moved to a new cache line. There is an s_avail maintained for the post_send() to mitigate trading cache lines with the send engine. The lock is released/acquired around releasing the just built packet to the egress mechanism. Reviewed-by: Jubin John <jubin.john@intel.com> Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_uc.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 1b2fc69855b2..7bdbc79ceaa3 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -41,6 +41,8 @@
* qib_make_uc_req - construct a request packet (SEND, RDMA write)
* @qp: a pointer to the QP
*
+ * Assumes the s_lock is held.
+ *
* Return 1 if constructed; otherwise, return 0.
*/
int qib_make_uc_req(struct rvt_qp *qp)
@@ -48,20 +50,18 @@ int qib_make_uc_req(struct rvt_qp *qp)
struct qib_qp_priv *priv = qp->priv;
struct qib_other_headers *ohdr;
struct rvt_swqe *wqe;
- unsigned long flags;
u32 hwords;
u32 bth0;
u32 len;
u32 pmtu = qp->pmtu;
int ret = 0;
- spin_lock_irqsave(&qp->s_lock, flags);
-
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
goto bail;
/* We are in the error state, flush the work request. */
- if (qp->s_last == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_last == ACCESS_ONCE(qp->s_head))
goto bail;
/* If DMAs are in progress, we can't flush immediately. */
if (atomic_read(&priv->s_dma_busy)) {
@@ -90,13 +90,13 @@ int qib_make_uc_req(struct rvt_qp *qp)
RVT_PROCESS_NEXT_SEND_OK))
goto bail;
/* Check if send work queue is empty. */
- if (qp->s_cur == qp->s_head)
+ smp_read_barrier_depends(); /* see post_one_send() */
+ if (qp->s_cur == ACCESS_ONCE(qp->s_head))
goto bail;
/*
* Start a new request.
*/
- wqe->psn = qp->s_next_psn;
- qp->s_psn = qp->s_next_psn;
+ qp->s_psn = wqe->psn;
qp->s_sge.sge = wqe->sg_list[0];
qp->s_sge.sg_list = wqe->sg_list + 1;
qp->s_sge.num_sge = wqe->wr.num_sge;
@@ -215,15 +215,11 @@ int qib_make_uc_req(struct rvt_qp *qp)
qp->s_cur_sge = &qp->s_sge;
qp->s_cur_size = len;
qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
- qp->s_next_psn++ & QIB_PSN_MASK);
+ qp->s_psn++ & QIB_PSN_MASK);
done:
- ret = 1;
- goto unlock;
-
+ return 1;
bail:
qp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}