summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2023-03-04 11:45:32 -0600
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-05-11 23:17:33 +0900
commit9d2e108a5c1a9020ad4cf0f336eb726280cdfd96 (patch)
tree05b89d150a14d81454e44ba16694e977041584df /drivers
parent1661cc42bb9ceac591d318a024c5328fdc3c934d (diff)
downloadlinux-stable-9d2e108a5c1a9020ad4cf0f336eb726280cdfd96.tar.gz
linux-stable-9d2e108a5c1a9020ad4cf0f336eb726280cdfd96.tar.bz2
linux-stable-9d2e108a5c1a9020ad4cf0f336eb726280cdfd96.zip
RDMA/rxe: Remove __rxe_do_task()
[ Upstream commit 960ebe97e5238565d15063c8f4d1b2108efe2e65 ] The subroutine __rxe_do_task is not thread safe and it has no way to guarantee that the tasks, which are designed with the assumption that they are non-reentrant, are not reentered. All of its uses are non-performance critical. This patch replaces calls to __rxe_do_task with calls to rxe_sched_task. It also removes irrelevant or unneeded if tests. Instead of calling the task machinery a single call to the tasklet function (rxe_requester, etc.) is sufficient to draing the queues if task execution has been disabled or stopped. Together these changes allow the removal of __rxe_do_task. Link: https://lore.kernel.org/r/20230304174533.11296-7-rpearsonhpe@gmail.com Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com> Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Stable-dep-of: b2b1ddc45745 ("RDMA/rxe: Fix the error "trying to register non-static key in rxe_cleanup_task"") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c56
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.h6
3 files changed, 17 insertions, 58 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index ab72db68b58f..c243e1cfcd4b 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -473,29 +473,23 @@ static void rxe_qp_reset(struct rxe_qp *qp)
{
/* stop tasks from running */
rxe_disable_task(&qp->resp.task);
-
- /* stop request/comp */
- if (qp->sq.queue) {
- if (qp_type(qp) == IB_QPT_RC)
- rxe_disable_task(&qp->comp.task);
- rxe_disable_task(&qp->req.task);
- }
+ rxe_disable_task(&qp->comp.task);
+ rxe_disable_task(&qp->req.task);
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
- /* let state machines reset themselves drain work and packet queues
- * etc.
- */
- __rxe_do_task(&qp->resp.task);
+ /* drain work and packet queuesc */
+ rxe_requester(qp);
+ rxe_completer(qp);
+ rxe_responder(qp);
- if (qp->sq.queue) {
- __rxe_do_task(&qp->comp.task);
- __rxe_do_task(&qp->req.task);
+ if (qp->rq.queue)
+ rxe_queue_reset(qp->rq.queue);
+ if (qp->sq.queue)
rxe_queue_reset(qp->sq.queue);
- }
/* cleanup attributes */
atomic_set(&qp->ssn, 0);
@@ -518,13 +512,8 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* reenable tasks */
rxe_enable_task(&qp->resp.task);
-
- if (qp->sq.queue) {
- if (qp_type(qp) == IB_QPT_RC)
- rxe_enable_task(&qp->comp.task);
-
- rxe_enable_task(&qp->req.task);
- }
+ rxe_enable_task(&qp->comp.task);
+ rxe_enable_task(&qp->req.task);
}
/* drain the send queue */
@@ -533,10 +522,7 @@ static void rxe_qp_drain(struct rxe_qp *qp)
if (qp->sq.queue) {
if (qp->req.state != QP_STATE_DRAINED) {
qp->req.state = QP_STATE_DRAIN;
- if (qp_type(qp) == IB_QPT_RC)
- rxe_sched_task(&qp->comp.task);
- else
- __rxe_do_task(&qp->comp.task);
+ rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
}
}
@@ -552,11 +538,7 @@ void rxe_qp_error(struct rxe_qp *qp)
/* drain work and packet queues */
rxe_sched_task(&qp->resp.task);
-
- if (qp_type(qp) == IB_QPT_RC)
- rxe_sched_task(&qp->comp.task);
- else
- __rxe_do_task(&qp->comp.task);
+ rxe_sched_task(&qp->comp.task);
rxe_sched_task(&qp->req.task);
}
@@ -773,24 +755,20 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
qp->valid = 0;
qp->qp_timeout_jiffies = 0;
- rxe_cleanup_task(&qp->resp.task);
if (qp_type(qp) == IB_QPT_RC) {
del_timer_sync(&qp->retrans_timer);
del_timer_sync(&qp->rnr_nak_timer);
}
+ rxe_cleanup_task(&qp->resp.task);
rxe_cleanup_task(&qp->req.task);
rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
- if (qp->req.task.func)
- __rxe_do_task(&qp->req.task);
-
- if (qp->sq.queue) {
- __rxe_do_task(&qp->comp.task);
- __rxe_do_task(&qp->req.task);
- }
+ rxe_requester(qp);
+ rxe_completer(qp);
+ rxe_responder(qp);
if (qp->sq.queue)
rxe_queue_cleanup(qp->sq.queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 959cc6229a34..a67f48545443 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -6,19 +6,6 @@
#include "rxe.h"
-int __rxe_do_task(struct rxe_task *task)
-
-{
- int ret;
-
- while ((ret = task->func(task->qp)) == 0)
- ;
-
- task->ret = ret;
-
- return ret;
-}
-
/*
* this locking is due to a potential race where
* a second caller finds the task already running
diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h
index 41efd5fd49b0..99585e40cef9 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.h
+++ b/drivers/infiniband/sw/rxe/rxe_task.h
@@ -39,12 +39,6 @@ int rxe_init_task(struct rxe_task *task, struct rxe_qp *qp,
/* cleanup task */
void rxe_cleanup_task(struct rxe_task *task);
-/*
- * raw call to func in loop without any checking
- * can call when tasklets are disabled
- */
-int __rxe_do_task(struct rxe_task *task);
-
void rxe_run_task(struct rxe_task *task);
void rxe_sched_task(struct rxe_task *task);