summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2024-03-29 09:55:11 -0500
committerJason Gunthorpe <jgg@nvidia.com>2024-04-22 16:54:33 -0300
commit23bc06af547f2ca3b7d345e09fd8d04575406274 (patch)
tree83be7d44282435b82d30a51f6b109f0e288c6d69 /drivers/infiniband
parent3d807a3ebc48a2e1685ebfa9d26ea2c9ceb9c53e (diff)
downloadlinux-23bc06af547f2ca3b7d345e09fd8d04575406274.tar.gz
linux-23bc06af547f2ca3b7d345e09fd8d04575406274.tar.bz2
linux-23bc06af547f2ca3b7d345e09fd8d04575406274.zip
RDMA/rxe: Don't call direct between tasks
Replace calls to rxe_run_task() with rxe_sched_task(). This prevents the tasks from all running on the same cpu. This change slightly reduces performance for single qp send and write benchmarks in loopback mode but greatly improves the performance with multiple qps because if run task is used all the work tends to be performed on one cpu. For actual on the wire benchmarks there is no noticeable performance change. Link: https://lore.kernel.org/r/20240329145513.35381-11-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
3 files changed, 4 insertions, 23 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 357c1d516efb..d48af2180745 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -129,18 +129,9 @@ void retransmit_timer(struct timer_list *t)
void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
-
- must_sched = skb_queue_len(&qp->resp_pkts) > 0;
- if (must_sched != 0)
- rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
-
+ rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_SENDER_SCHED);
skb_queue_tail(&qp->resp_pkts, skb);
-
- if (must_sched)
- rxe_sched_task(&qp->send_task);
- else
- rxe_run_task(&qp->send_task);
+ rxe_sched_task(&qp->send_task);
}
static inline enum comp_state get_wqe(struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 3ce7a32b5dcf..c6a7fa3054fa 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -49,18 +49,8 @@ static char *resp_state_name[] = {
/* rxe_recv calls here to add a request packet to the input queue */
void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
- int must_sched;
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
-
skb_queue_tail(&qp->req_pkts, skb);
-
- must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
- (skb_queue_len(&qp->req_pkts) > 1);
-
- if (must_sched)
- rxe_sched_task(&qp->recv_task);
- else
- rxe_run_task(&qp->recv_task);
+ rxe_sched_task(&qp->recv_task);
}
static inline enum resp_states get_req(struct rxe_qp *qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index d07f7bd3b2ae..c7d4d8ab5a09 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -935,7 +935,7 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
if (qp->is_user) {
/* Utilize process context to do protocol processing */
- rxe_run_task(&qp->send_task);
+ rxe_sched_task(&qp->send_task);
} else {
err = rxe_post_send_kernel(qp, wr, bad_wr);
if (err)