summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hns/hns_roce_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_qp.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c124
1 files changed, 76 insertions, 48 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 004aca9086ab..230a909ba9bc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -98,7 +98,9 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
(event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR)) {
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
@@ -142,6 +144,8 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
event.event = IB_EVENT_QP_REQ_ERR;
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
+ case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
@@ -366,8 +370,13 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
unsigned long flags;
list_del(&hr_qp->node);
- list_del(&hr_qp->sq_node);
- list_del(&hr_qp->rq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->sq_node);
+
+ if (hr_qp->ibqp.qp_type != IB_QPT_XRC_INI &&
+ hr_qp->ibqp.qp_type != IB_QPT_XRC_TGT)
+ list_del(&hr_qp->rq_node);
xa_lock_irqsave(xa, flags);
__xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1));
@@ -478,7 +487,9 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE)
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
+ hr_qp->ibqp.qp_type != IB_QPT_UD &&
+ hr_qp->ibqp.qp_type != IB_QPT_GSI)
hr_qp->rq_inl_buf.wqe_cnt = cnt;
else
hr_qp->rq_inl_buf.wqe_cnt = 0;
@@ -776,7 +787,7 @@ static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
struct hns_roce_ib_create_qp_resp *resp,
struct hns_roce_ib_create_qp *ucmd)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_sq(init_attr) &&
udata->inlen >= offsetofend(typeof(*ucmd), sdb_addr));
@@ -787,7 +798,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_udata *udata,
struct hns_roce_ib_create_qp_resp *resp)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
udata->outlen >= offsetofend(typeof(*resp), cap_flags) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -795,7 +806,7 @@ static inline bool user_qp_has_rdb(struct hns_roce_dev *hr_dev,
static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
struct ib_qp_init_attr *init_attr)
{
- return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) &&
+ return ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) &&
hns_roce_qp_has_rq(init_attr));
}
@@ -840,11 +851,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
resp->cap_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
}
} else {
- /* QP doorbell register address */
- hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ hr_qp->sq.db_reg = hr_dev->mem_base +
+ HNS_ROCE_DWQE_SIZE * hr_qp->qpn;
+ else
+ hr_qp->sq.db_reg =
+ hr_dev->reg_base + hr_dev->sdb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
+
+ hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset +
+ DB_REG_OFFSET * hr_dev->priv_uar.index;
if (kernel_qp_has_rdb(hr_dev, init_attr)) {
ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
@@ -1011,36 +1027,36 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
}
}
- ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
- ret);
- goto err_wrid;
- }
-
ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
- goto err_db;
+ goto err_buf;
}
ret = alloc_qpn(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
- goto err_buf;
+ goto err_qpn;
+ }
+
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
+ if (ret) {
+ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
+ ret);
+ goto err_db;
}
ret = alloc_qpc(hr_dev, hr_qp);
if (ret) {
ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
ret);
- goto err_qpn;
+ goto err_qpc;
}
ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
if (ret) {
ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
- goto err_qpc;
+ goto err_store;
}
if (udata) {
@@ -1055,7 +1071,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
if (ret)
- goto err_store;
+ goto err_flow_ctrl;
}
hr_qp->ibqp.qp_num = hr_qp->qpn;
@@ -1065,17 +1081,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
return 0;
-err_store:
+err_flow_ctrl:
hns_roce_qp_remove(hr_dev, hr_qp);
-err_qpc:
+err_store:
free_qpc(hr_dev, hr_qp);
-err_qpn:
+err_qpc:
+ free_qp_db(hr_dev, hr_qp, udata);
+err_db:
free_qpn(hr_dev, hr_qp);
-err_buf:
+err_qpn:
free_qp_buf(hr_dev, hr_qp);
-err_db:
- free_qp_db(hr_dev, hr_qp, udata);
-err_wrid:
+err_buf:
free_kernel_wrid(hr_qp);
return ret;
}
@@ -1100,11 +1116,16 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
bool is_user)
{
switch (type) {
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC))
+ goto out;
+ break;
case IB_QPT_UD:
if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
- fallthrough;
+ break;
case IB_QPT_RC:
case IB_QPT_GSI:
break;
@@ -1124,8 +1145,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct ib_device *ibdev = pd ? pd->device : init_attr->xrcd->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_qp *hr_qp;
int ret;
@@ -1137,6 +1158,15 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
if (!hr_qp)
return ERR_PTR(-ENOMEM);
+ if (init_attr->qp_type == IB_QPT_XRC_INI)
+ init_attr->recv_cq = NULL;
+
+ if (init_attr->qp_type == IB_QPT_XRC_TGT) {
+ hr_qp->xrcdn = to_hr_xrcd(init_attr->xrcd)->xrcdn;
+ init_attr->recv_cq = NULL;
+ init_attr->send_cq = NULL;
+ }
+
if (init_attr->qp_type == IB_QPT_GSI) {
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
@@ -1156,20 +1186,18 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
int to_hr_qp_type(int qp_type)
{
- int transport_type;
-
- if (qp_type == IB_QPT_RC)
- transport_type = SERV_TYPE_RC;
- else if (qp_type == IB_QPT_UC)
- transport_type = SERV_TYPE_UC;
- else if (qp_type == IB_QPT_UD)
- transport_type = SERV_TYPE_UD;
- else if (qp_type == IB_QPT_GSI)
- transport_type = SERV_TYPE_UD;
- else
- transport_type = -1;
-
- return transport_type;
+ switch (qp_type) {
+ case IB_QPT_RC:
+ return SERV_TYPE_RC;
+ case IB_QPT_UD:
+ case IB_QPT_GSI:
+ return SERV_TYPE_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT:
+ return SERV_TYPE_XRC;
+ default:
+ return -1;
+ }
}
static int check_mtu_validate(struct hns_roce_dev *hr_dev,