summaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
authorKamenee Arumugam <kamenee.arumugam@intel.com>2019-06-28 14:21:52 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-06-28 22:34:26 -0300
commit5136bfea7e79b333af77594fac5bc70282a95313 (patch)
tree529f6d18c809c51b2d228a18bab4b5e83707d7e7 /include/rdma
parentf592ae3c999fbe4faeeb90dfde8ff7da49ee4ae6 (diff)
downloadlinux-5136bfea7e79b333af77594fac5bc70282a95313.tar.gz
linux-5136bfea7e79b333af77594fac5bc70282a95313.tar.bz2
linux-5136bfea7e79b333af77594fac5bc70282a95313.zip
IB/{hfi1, qib, rdmavt}: Put qp in error state when cq is full
When a completion queue is full, the associated queue pairs are not put into the error state. According to the IBTA specification, this is a violation. Quote from IBTA spec: C9-218: A Requester Class F error occurs when the CQ is inaccessible or full and an attempt is made to complete a WQE. The Affected QP shall be moved to the error state and affiliated asynchronous errors generated as described in 11.6.3.1 Affiliated Asynchronous Events on page 678. The current WQE and any subsequent WQEs are left in an unknown state. C11-37: The CI shall generate a CQ Error when a CQ overrun is detected. This condition will result in an Affiliated Asynchronous Error for any associated Work Queues when they attempt to use that CQ. Completions can no longer be added to the CQ. It is not guaranteed that completions present in the CQ at the time the error occurred can be retrieved. Possible causes include a CQ overrun or a CQ protection error. Put the qp in error state when cq is full. Implement a state called full to continue to put other associated QPs in error state. Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Kamenee Arumugam <kamenee.arumugam@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/rdmavt_cq.h3
-rw-r--r--include/rdma/rdmavt_qp.h47
2 files changed, 45 insertions, 5 deletions
diff --git a/include/rdma/rdmavt_cq.h b/include/rdma/rdmavt_cq.h
index ab22860a63e2..04c519ef6d71 100644
--- a/include/rdma/rdmavt_cq.h
+++ b/include/rdma/rdmavt_cq.h
@@ -93,6 +93,7 @@ struct rvt_cq {
spinlock_t lock; /* protect changes in this struct */
u8 notify;
u8 triggered;
+ u8 cq_full;
int comp_vector_cpu;
struct rvt_dev_info *rdi;
struct rvt_cq_wc *queue;
@@ -105,6 +106,6 @@ static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq)
return container_of(ibcq, struct rvt_cq, ibcq);
}
-void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
+bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited);
#endif /* DEF_RDMAVT_INCCQH */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index de5915b244be..e4be869c4f21 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -718,6 +718,48 @@ rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
return val;
}
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
+
+/**
+ * rvt_recv_cq - add a new entry to completion queue
+ * by receive queue
+ * @qp: receive queue
+ * @wc: work completion entry to add
+ * @solicited: true if @entry is solicited
+ *
+ * This is wrapper function for rvt_enter_cq function call by
+ * receive queue. If rvt_cq_enter return false, it means cq is
+ * full and the qp is put into error state.
+ */
+static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
+ bool solicited)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
+
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
+}
+
+/**
+ * rvt_send_cq - add a new entry to completion queue
+ * by send queue
+ * @qp: send queue
+ * @wc: work completion entry to add
+ * @solicited: true if @entry is solicited
+ *
+ * This is wrapper function for rvt_enter_cq function call by
+ * send queue. If rvt_cq_enter return false, it means cq is
+ * full and the qp is put into error state.
+ */
+static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
+ bool solicited)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
+
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
+}
+
/**
* rvt_qp_complete_swqe - insert send completion
* @qp - the qp
@@ -768,9 +810,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
.qp = &qp->ibqp,
.byte_len = byte_len,
};
-
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &w,
- status != IB_WC_SUCCESS);
+ rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
}
return last;
}
@@ -780,7 +820,6 @@ extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
-int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
unsigned long rvt_rnr_tbl_to_usec(u32 index);
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);