summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/sw/rxe/rxe_qp.c
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-07-20 04:56:07 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-08-02 13:53:36 -0300
commit62494ec7fbca4d58900eb62e075f2fedc85b5fb9 (patch)
treeb788f5759d837710cd2a8b25c5fca4de5ac0131c /drivers/infiniband/sw/rxe/rxe_qp.c
parentae720bdb703b295fed4ded28e14dd06a534a3012 (diff)
downloadlinux-stable-62494ec7fbca4d58900eb62e075f2fedc85b5fb9.tar.gz
linux-stable-62494ec7fbca4d58900eb62e075f2fedc85b5fb9.tar.bz2
linux-stable-62494ec7fbca4d58900eb62e075f2fedc85b5fb9.zip
RDMA/rxe: Split qp state for requester and completer
Currently the requester can continue to process send wqes after an local qp operation error is detected because the setting of the qp state to the error state is deferred until later. This patch splits the qp state for the completer and requester into two separate states and sets qp->req.state = QP_STATE_ERROR as soon as the error is detected before another wqe can be executed. Link: https://lore.kernel.org/r/1658307368-1851-4-git-send-email-lizhijian@fujitsu.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_qp.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index eef91b8cb4ed..c6519b9b94fb 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -228,6 +228,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
@@ -488,6 +489,7 @@ static void rxe_qp_reset(struct rxe_qp *qp)
/* move qp to the reset state */
qp->req.state = QP_STATE_RESET;
+ qp->comp.state = QP_STATE_RESET;
qp->resp.state = QP_STATE_RESET;
/* let state machines reset themselves drain work and packet queues
@@ -551,6 +553,7 @@ void rxe_qp_error(struct rxe_qp *qp)
{
qp->req.state = QP_STATE_ERROR;
qp->resp.state = QP_STATE_ERROR;
+ qp->comp.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
@@ -688,6 +691,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
pr_debug("qp#%d state -> INIT\n", qp_num(qp));
qp->req.state = QP_STATE_INIT;
qp->resp.state = QP_STATE_INIT;
+ qp->comp.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
@@ -698,6 +702,7 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
case IB_QPS_RTS:
pr_debug("qp#%d state -> RTS\n", qp_num(qp));
qp->req.state = QP_STATE_READY;
+ qp->comp.state = QP_STATE_READY;
break;
case IB_QPS_SQD: