diff options
author | Yonatan Cohen <yonatanc@mellanox.com> | 2018-10-09 12:05:15 +0300 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2018-10-17 11:25:41 -0400 |
commit | 6f4bc0ea682b59d7013cbc5ced2d4dd73067a33f (patch) | |
tree | 33698021af90822b34bc9345b8d270aee82c8e88 /drivers/infiniband/hw | |
parent | 2e43bb31b8df662f591a7e80270ca3acda44bb48 (diff) | |
download | linux-6f4bc0ea682b59d7013cbc5ced2d4dd73067a33f.tar.gz linux-6f4bc0ea682b59d7013cbc5ced2d4dd73067a33f.tar.bz2 linux-6f4bc0ea682b59d7013cbc5ced2d4dd73067a33f.zip |
IB/mlx5: Allow scatter to CQE without global signaled WRs
Requester scatter to CQE is restricted to QPs configured to signal
all WRs.
This patch adds ability to enable scatter to cqe (force enable)
in the requester without sig_all, for users who do not want all WRs
signaled but rather just the ones whose data found in the CQE.
Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
Reviewed-by: Guy Levi <guyle@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 14 |
1 files changed, 11 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 5b1811be6677..368728e6f980 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1706,15 +1706,20 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr, static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, + struct mlx5_ib_create_qp *ucmd, void *qpc) { enum ib_qp_type qpt = init_attr->qp_type; int scqe_sz; + bool allow_scat_cqe = 0; if (qpt == IB_QPT_UC || qpt == IB_QPT_UD) return; - if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) + if (ucmd) + allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE; + + if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) return; scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); @@ -1836,7 +1841,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, MLX5_QP_FLAG_TUNNEL_OFFLOADS | MLX5_QP_FLAG_BFREG_INDEX | MLX5_QP_FLAG_TYPE_DCT | - MLX5_QP_FLAG_TYPE_DCI)) + MLX5_QP_FLAG_TYPE_DCI | + MLX5_QP_FLAG_ALLOW_SCATTER_CQE)) return -EINVAL; err = get_qp_user_index(to_mucontext(pd->uobject->context), @@ -1971,7 +1977,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (qp->scat_cqe && is_connected(init_attr->qp_type)) { configure_responder_scat_cqe(init_attr, qpc); - configure_requester_scat_cqe(dev, init_attr, qpc); + configure_requester_scat_cqe(dev, init_attr, + (pd && pd->uobject) ? &ucmd : NULL, + qpc); } if (qp->rq.wqe_cnt) { |