summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2024-03-29 09:55:09 -0500
committerJason Gunthorpe <jgg@nvidia.com>2024-04-22 16:54:33 -0300
commit4891f4fed04718a642ff4a4563128699c47d8918 (patch)
tree16c0f14e4692aa1f97d3a45b4b6a991736100776 /drivers/infiniband
parentcd8aaddf0d6dbd4798d2de2f4e1cd62a91ac62f0 (diff)
downloadlinux-4891f4fed04718a642ff4a4563128699c47d8918.tar.gz
linux-4891f4fed04718a642ff4a4563128699c47d8918.tar.bz2
linux-4891f4fed04718a642ff4a4563128699c47d8918.zip
RDMA/rxe: Don't schedule rxe_completer from rxe_requester
Now that rxe_completer() is always called serially after rxe_requester() there is no reason to schedule rxe_completer() from rxe_requester(). Link: https://lore.kernel.org/r/20240329145513.35381-9-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c9
2 files changed, 2 insertions, 13 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 928508558df4..a2fc118e7ec1 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -440,12 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
- if ((qp_type(qp) != IB_QPT_RC) &&
- (pkt->mask & RXE_END_MASK)) {
- pkt->wqe->state = wqe_state_done;
- rxe_sched_task(&qp->send_task);
- }
-
rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
goto done;
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index e20462c3040d..34c55dee0774 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -545,6 +545,8 @@ static void update_wqe_state(struct rxe_qp *qp,
if (pkt->mask & RXE_END_MASK) {
if (qp_type(qp) == IB_QPT_RC)
wqe->state = wqe_state_pending;
+ else
+ wqe->state = wqe_state_done;
} else {
wqe->state = wqe_state_processing;
}
@@ -631,12 +633,6 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->status = IB_WC_SUCCESS;
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
- /* There is no ack coming for local work requests
- * which can lead to a deadlock. So go ahead and complete
- * it now.
- */
- rxe_sched_task(&qp->send_task);
-
return 0;
}
@@ -760,7 +756,6 @@ int rxe_requester(struct rxe_qp *qp)
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- rxe_sched_task(&qp->send_task);
goto done;
}
payload = mtu;