summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_resp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_resp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 64c64f5f36a8..6596a85723c9 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -49,18 +49,8 @@ static char *resp_state_name[] = {
/* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
-
skb_queue_tail(&qp->req_pkts, skb);
-
- must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
- (skb_queue_len(&qp->req_pkts) > 1);
-
- if (must_sched)
- rxe_sched_task(&qp->resp.task);
- else
- rxe_run_task(&qp->resp.task);
+ rxe_sched_task(&qp->recv_task);
}
static inline enum resp_states get_req(struct rxe_qp *qp,
@@ -354,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
@@ -362,18 +365,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
if ((pkt->mask & RXE_START_MASK) &&
(pkt->mask & RXE_END_MASK)) {
if (unlikely(payload > mtu)) {
- rxe_dbg_qp(qp, "only packet too long");
+ rxe_dbg_qp(qp, "only packet too long\n");
return RESPST_ERR_LENGTH;
}
} else if ((pkt->mask & RXE_START_MASK) ||
(pkt->mask & RXE_MIDDLE_MASK)) {
if (unlikely(payload != mtu)) {
- rxe_dbg_qp(qp, "first or middle packet not mtu");
+ rxe_dbg_qp(qp, "first or middle packet not mtu\n");
return RESPST_ERR_LENGTH;
}
} else if (pkt->mask & RXE_END_MASK) {
if (unlikely((payload == 0) || (payload > mtu))) {
- rxe_dbg_qp(qp, "last packet zero or too long");
+ rxe_dbg_qp(qp, "last packet zero or too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -382,7 +385,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/* See IBA C9-94 */
if (pkt->mask & RXE_RETH_MASK) {
if (reth_len(pkt) > (1U << 31)) {
- rxe_dbg_qp(qp, "dma length too long");
+ rxe_dbg_qp(qp, "dma length too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -1133,7 +1136,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wc->status);
}
@@ -1442,7 +1445,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
err = rxe_cq_post(qp->rcq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
return err;
}
@@ -1469,6 +1472,10 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
return;
}
+ /* recv queue not created. nothing to do. */
+ if (!qp->rq.queue)
+ return;
+
while ((wqe = queue_head(q, q->type))) {
if (notify) {
err = flush_recv_wqe(qp, wqe);
@@ -1481,7 +1488,7 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
qp->resp.wqe = NULL;
}
-int rxe_responder(struct rxe_qp *qp)
+int rxe_receiver(struct rxe_qp *qp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
enum resp_states state;