summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/restrack.c51
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h13
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c99
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c54
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c15
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c105
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h16
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c85
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c13
-rw-r--r--drivers/infiniband/hw/mana/cq.c52
-rw-r--r--drivers/infiniband/hw/mana/main.c43
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h26
-rw-r--r--drivers/infiniband/hw/mana/qp.c88
-rw-r--r--drivers/infiniband/hw/mana/wq.c31
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c35
-rw-r--r--include/uapi/rdma/hns-abi.h9
22 files changed, 427 insertions, 362 deletions
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 01a499a8b88d..438ed3588175 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -37,22 +37,6 @@ int rdma_restrack_init(struct ib_device *dev)
return 0;
}
-static const char *type2str(enum rdma_restrack_type type)
-{
- static const char * const names[RDMA_RESTRACK_MAX] = {
- [RDMA_RESTRACK_PD] = "PD",
- [RDMA_RESTRACK_CQ] = "CQ",
- [RDMA_RESTRACK_QP] = "QP",
- [RDMA_RESTRACK_CM_ID] = "CM_ID",
- [RDMA_RESTRACK_MR] = "MR",
- [RDMA_RESTRACK_CTX] = "CTX",
- [RDMA_RESTRACK_COUNTER] = "COUNTER",
- [RDMA_RESTRACK_SRQ] = "SRQ",
- };
-
- return names[type];
-};
-
/**
* rdma_restrack_clean() - clean resource tracking
* @dev: IB device
@@ -60,47 +44,14 @@ static const char *type2str(enum rdma_restrack_type type)
void rdma_restrack_clean(struct ib_device *dev)
{
struct rdma_restrack_root *rt = dev->res;
- struct rdma_restrack_entry *e;
- char buf[TASK_COMM_LEN];
- bool found = false;
- const char *owner;
int i;
for (i = 0 ; i < RDMA_RESTRACK_MAX; i++) {
struct xarray *xa = &dev->res[i].xa;
- if (!xa_empty(xa)) {
- unsigned long index;
-
- if (!found) {
- pr_err("restrack: %s", CUT_HERE);
- dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
- }
- xa_for_each(xa, index, e) {
- if (rdma_is_kernel_res(e)) {
- owner = e->kern_name;
- } else {
- /*
- * There is no need to call get_task_struct here,
- * because we can be here only if there are more
- * get_task_struct() call than put_task_struct().
- */
- get_task_comm(buf, e->task);
- owner = buf;
- }
-
- pr_err("restrack: %s %s object allocated by %s is not freed\n",
- rdma_is_kernel_res(e) ? "Kernel" :
- "User",
- type2str(e->type), owner);
- }
- found = true;
- }
+ WARN_ON(!xa_empty(xa));
xa_destroy(xa);
}
- if (found)
- pr_err("restrack: %s", CUT_HERE);
-
kfree(rt);
}
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 5df401a30cb9..c8bd698e21b0 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -33,7 +33,8 @@ struct erdma_eq {
atomic64_t notify_num;
void __iomem *db;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_cmdq_sq {
@@ -48,7 +49,8 @@ struct erdma_cmdq_sq {
u16 wqebb_cnt;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_cmdq_cq {
@@ -61,7 +63,8 @@ struct erdma_cmdq_cq {
u32 ci;
u32 cmdsn;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
atomic64_t armed_num;
};
@@ -177,9 +180,6 @@ enum {
ERDMA_RES_CNT = 2,
};
-#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
-#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
-
struct erdma_dev {
struct ib_device ibdev;
struct net_device *netdev;
@@ -213,6 +213,7 @@ struct erdma_dev {
atomic_t num_ctx;
struct list_head cep_list;
+ struct dma_pool *db_pool;
struct dma_pool *resp_pool;
};
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index a151a7bdd504..43ff40b5a09d 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -14,7 +14,7 @@ static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
- *cmdq->cq.db_record = db_data;
+ *cmdq->cq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
atomic64_inc(&cmdq->cq.armed_num);
@@ -25,7 +25,7 @@ static void kick_cmdq_db(struct erdma_cmdq *cmdq)
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
- *cmdq->sq.db_record = db_data;
+ *cmdq->sq.dbrec = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
}
@@ -89,20 +89,18 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_sq *sq = &cmdq->sq;
- u32 buf_size;
sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
- buf_size = sq->depth << SQEBB_SHIFT;
-
- sq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &sq->qbuf_dma_addr, GFP_KERNEL);
+ sq->qbuf = dma_alloc_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ &sq->qbuf_dma_addr, GFP_KERNEL);
if (!sq->qbuf)
return -ENOMEM;
- sq->db_record = (u64 *)(sq->qbuf + buf_size);
+ sq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &sq->dbrec_dma);
+ if (!sq->dbrec)
+ goto err_out;
spin_lock_init(&sq->lock);
@@ -111,30 +109,33 @@ static int erdma_cmdq_sq_init(struct erdma_dev *dev)
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
lower_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
- erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
- sq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG, sq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, sq->depth << SQEBB_SHIFT,
+ sq->qbuf, sq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
static int erdma_cmdq_cq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_cq *cq = &cmdq->cq;
- u32 buf_size;
cq->depth = cmdq->sq.depth;
- buf_size = cq->depth << CQE_SHIFT;
-
- cq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ cq->qbuf = dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ &cq->qbuf_dma_addr, GFP_KERNEL);
if (!cq->qbuf)
return -ENOMEM;
spin_lock_init(&cq->lock);
- cq->db_record = (u64 *)(cq->qbuf + buf_size);
+ cq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &cq->dbrec_dma);
+ if (!cq->dbrec)
+ goto err_out;
atomic64_set(&cq->armed_num, 0);
@@ -142,24 +143,25 @@ static int erdma_cmdq_cq_init(struct erdma_dev *dev)
upper_32_bits(cq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
lower_32_bits(cq->qbuf_dma_addr));
- erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
- cq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG, cq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT, cq->qbuf,
+ cq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
- u32 buf_size;
eq->depth = cmdq->max_outstandings;
- buf_size = eq->depth << EQE_SHIFT;
-
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -167,17 +169,24 @@ static int erdma_cmdq_eq_init(struct erdma_dev *dev)
atomic64_set(&eq->event_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
- eq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+ eq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
int erdma_cmdq_init(struct erdma_dev *dev)
@@ -211,17 +220,17 @@ int erdma_cmdq_init(struct erdma_dev *dev)
return 0;
err_destroy_cq:
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->cq.depth << CQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
+
err_destroy_sq:
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->sq.depth << SQEBB_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
return err;
}
@@ -238,18 +247,20 @@ void erdma_cmdq_destroy(struct erdma_dev *dev)
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->eq.depth << EQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+ dma_free_coherent(&dev->pdev->dev, cmdq->eq.depth << EQE_SHIFT,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->sq.depth << SQEBB_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+
+ dma_pool_free(dev->db_pool, cmdq->eq.dbrec, cmdq->eq.dbrec_dma);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->sq.depth << SQEBB_SHIFT,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
- dma_free_coherent(&dev->pdev->dev,
- (cmdq->cq.depth << CQE_SHIFT) +
- ERDMA_EXTRA_BUFFER_SIZE,
+
+ dma_pool_free(dev->db_pool, cmdq->sq.dbrec, cmdq->sq.dbrec_dma);
+
+ dma_free_coherent(&dev->pdev->dev, cmdq->cq.depth << CQE_SHIFT,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, cmdq->cq.dbrec, cmdq->cq.dbrec_dma);
}
static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index c1cb5568eab2..70f89f0162aa 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -26,7 +26,7 @@ static void notify_cq(struct erdma_cq *cq, u8 solcitied)
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
- *cq->kern_cq.db_record = db_data;
+ *cq->kern_cq.dbrec = db_data;
writeq(db_data, cq->kern_cq.db);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index ea47cb21fdb8..84ccdd8144c9 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -13,7 +13,7 @@ void notify_eq(struct erdma_eq *eq)
u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
- *eq->db_record = db_data;
+ *eq->dbrec = db_data;
writeq(db_data, eq->db);
atomic64_inc(&eq->notify_num);
@@ -83,14 +83,11 @@ void erdma_aeq_event_handler(struct erdma_dev *dev)
int erdma_aeq_init(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
- u32 buf_size;
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
- buf_size = eq->depth << EQE_SHIFT;
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -99,26 +96,34 @@ int erdma_aeq_init(struct erdma_dev *dev)
atomic64_set(&eq->notify_num, 0);
eq->db = dev->func_bar + ERDMA_REGS_AEQ_DB_REG;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec)
+ goto err_out;
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
- erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
- eq->qbuf_dma_addr + buf_size);
+ erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG, eq->dbrec_dma);
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
+ eq->qbuf_dma_addr);
+
+ return -ENOMEM;
}
void erdma_aeq_destroy(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
@@ -209,7 +214,6 @@ static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
{
struct erdma_cmdq_create_eq_req req;
- dma_addr_t db_info_dma_addr;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CREATE_EQ);
@@ -219,9 +223,8 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.qtype = ERDMA_EQ_TYPE_CEQ;
/* Vector index is the same as EQN. */
req.vector_idx = eqn;
- db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
- req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
- req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
+ req.db_dma_addr_l = lower_32_bits(eq->dbrec_dma);
+ req.db_dma_addr_h = upper_32_bits(eq->dbrec_dma);
return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
@@ -229,12 +232,11 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
- u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
int ret;
- eq->qbuf =
- dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
- &eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
+ eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
+ eq->qbuf = dma_alloc_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ &eq->qbuf_dma_addr, GFP_KERNEL);
if (!eq->qbuf)
return -ENOMEM;
@@ -242,10 +244,16 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0);
- eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
eq->db = dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE;
- eq->db_record = (u64 *)(eq->qbuf + buf_size);
+
+ eq->dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &eq->dbrec_dma);
+ if (!eq->dbrec) {
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT,
+ eq->qbuf, eq->qbuf_dma_addr);
+ return -ENOMEM;
+ }
+
eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
@@ -259,7 +267,6 @@ static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
- u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
struct erdma_cmdq_destroy_eq_req req;
int err;
@@ -276,8 +283,9 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
if (err)
return;
- dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
+ dma_free_coherent(&dev->pdev->dev, eq->depth << EQE_SHIFT, eq->qbuf,
eq->qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, eq->dbrec, eq->dbrec_dma);
}
int erdma_ceqs_init(struct erdma_dev *dev)
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index 3212a1222760..05978f3b1475 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -240,7 +240,7 @@ struct erdma_cmdq_create_cq_req {
u32 qbuf_addr_l;
u32 qbuf_addr_h;
u32 cfg1;
- u64 cq_db_info_addr;
+ u64 cq_dbrec_dma;
u32 first_page_offset;
u32 cfg2;
};
@@ -335,8 +335,8 @@ struct erdma_cmdq_create_qp_req {
u64 rq_buf_addr;
u32 sq_mtt_cfg;
u32 rq_mtt_cfg;
- u64 sq_db_info_dma_addr;
- u64 rq_db_info_dma_addr;
+ u64 sq_dbrec_dma;
+ u64 rq_dbrec_dma;
u64 sq_mtt_entry[3];
u64 rq_mtt_entry[3];
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 472939172f0c..7080f8a71ec4 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -178,16 +178,26 @@ static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
if (!dev->resp_pool)
return -ENOMEM;
+ dev->db_pool = dma_pool_create("erdma_db_pool", &pdev->dev,
+ ERDMA_DB_SIZE, ERDMA_DB_SIZE, 0);
+ if (!dev->db_pool) {
+ ret = -ENOMEM;
+ goto destroy_resp_pool;
+ }
+
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
- goto destroy_pool;
+ goto destroy_db_pool;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
-destroy_pool:
+destroy_db_pool:
+ dma_pool_destroy(dev->db_pool);
+
+destroy_resp_pool:
dma_pool_destroy(dev->resp_pool);
return ret;
@@ -195,6 +205,7 @@ destroy_pool:
static void erdma_device_uninit(struct erdma_dev *dev)
{
+ dma_pool_destroy(dev->db_pool);
dma_pool_destroy(dev->resp_pool);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 6d0330badd68..4d1f9114cd97 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -492,7 +492,7 @@ static void kick_sq_db(struct erdma_qp *qp, u16 pi)
u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
- *(u64 *)qp->kern_qp.sq_db_info = db_data;
+ *(u64 *)qp->kern_qp.sq_dbrec = db_data;
writeq(db_data, qp->kern_qp.hw_sq_db);
}
@@ -557,7 +557,7 @@ static int erdma_post_recv_one(struct erdma_qp *qp,
return -EINVAL;
}
- *(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
+ *(u64 *)qp->kern_qp.rq_dbrec = *(u64 *)rqe;
writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 23dfc01603f8..40c9b6e46b82 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -76,10 +76,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
req.sq_buf_addr = qp->kern_qp.sq_buf_dma_addr;
- req.sq_db_info_dma_addr = qp->kern_qp.sq_buf_dma_addr +
- (qp->attrs.sq_size << SQEBB_SHIFT);
- req.rq_db_info_dma_addr = qp->kern_qp.rq_buf_dma_addr +
- (qp->attrs.rq_size << RQE_SHIFT);
+ req.sq_dbrec_dma = qp->kern_qp.sq_dbrec_dma;
+ req.rq_dbrec_dma = qp->kern_qp.rq_dbrec_dma;
} else {
user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP(
@@ -107,8 +105,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
&req.rq_buf_addr, req.rq_mtt_entry);
- req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
- req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
+ req.sq_dbrec_dma = user_qp->sq_dbrec_dma;
+ req.rq_dbrec_dma = user_qp->rq_dbrec_dma;
if (uctx->ext_db.enable) {
req.sq_cqn_mtt_cfg |=
@@ -209,8 +207,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0;
- req.cq_db_info_addr =
- cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
+ req.cq_dbrec_dma = cq->kern_cq.dbrec_dma;
} else {
mem = &cq->user_cq.qbuf_mem;
req.cfg0 |=
@@ -233,7 +230,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
mem->mtt_nents);
req.first_page_offset = mem->page_offset;
- req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
+ req.cq_dbrec_dma = cq->user_cq.dbrec_dma;
if (uctx->ext_db.enable) {
req.cfg1 |= FIELD_PREP(
@@ -482,16 +479,24 @@ static void free_kernel_qp(struct erdma_qp *qp)
vfree(qp->kern_qp.rwr_tbl);
if (qp->kern_qp.sq_buf)
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
- qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.sq_size << SQEBB_SHIFT,
+ qp->kern_qp.sq_buf,
+ qp->kern_qp.sq_buf_dma_addr);
+
+ if (qp->kern_qp.sq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.sq_dbrec,
+ qp->kern_qp.sq_dbrec_dma);
if (qp->kern_qp.rq_buf)
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
- qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
+ dma_free_coherent(&dev->pdev->dev,
+ qp->attrs.rq_size << RQE_SHIFT,
+ qp->kern_qp.rq_buf,
+ qp->kern_qp.rq_buf_dma_addr);
+
+ if (qp->kern_qp.rq_dbrec)
+ dma_pool_free(dev->db_pool, qp->kern_qp.rq_dbrec,
+ qp->kern_qp.rq_dbrec_dma);
}
static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
@@ -516,20 +521,27 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
if (!kqp->swr_tbl || !kqp->rwr_tbl)
goto err_out;
- size = (qp->attrs.sq_size << SQEBB_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ size = qp->attrs.sq_size << SQEBB_SHIFT;
kqp->sq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->sq_buf_dma_addr, GFP_KERNEL);
if (!kqp->sq_buf)
goto err_out;
- size = (qp->attrs.rq_size << RQE_SHIFT) + ERDMA_EXTRA_BUFFER_SIZE;
+ kqp->sq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->sq_dbrec_dma);
+ if (!kqp->sq_dbrec)
+ goto err_out;
+
+ size = qp->attrs.rq_size << RQE_SHIFT;
kqp->rq_buf = dma_alloc_coherent(&dev->pdev->dev, size,
&kqp->rq_buf_dma_addr, GFP_KERNEL);
if (!kqp->rq_buf)
goto err_out;
- kqp->sq_db_info = kqp->sq_buf + (qp->attrs.sq_size << SQEBB_SHIFT);
- kqp->rq_db_info = kqp->rq_buf + (qp->attrs.rq_size << RQE_SHIFT);
+ kqp->rq_dbrec =
+ dma_pool_zalloc(dev->db_pool, GFP_KERNEL, &kqp->rq_dbrec_dma);
+ if (!kqp->rq_dbrec)
+ goto err_out;
return 0;
@@ -864,9 +876,9 @@ erdma_unmap_user_dbrecords(struct erdma_ucontext *ctx,
}
static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
- u64 va, u32 len, u64 db_info_va)
+ u64 va, u32 len, u64 dbrec_va)
{
- dma_addr_t db_info_dma_addr;
+ dma_addr_t dbrec_dma;
u32 rq_offset;
int ret;
@@ -889,14 +901,14 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
if (ret)
goto put_sq_mtt;
- ret = erdma_map_user_dbrecords(uctx, db_info_va,
+ ret = erdma_map_user_dbrecords(uctx, dbrec_va,
&qp->user_qp.user_dbr_page,
- &db_info_dma_addr);
+ &dbrec_dma);
if (ret)
goto put_rq_mtt;
- qp->user_qp.sq_db_info_dma_addr = db_info_dma_addr;
- qp->user_qp.rq_db_info_dma_addr = db_info_dma_addr + ERDMA_DB_SIZE;
+ qp->user_qp.sq_dbrec_dma = dbrec_dma;
+ qp->user_qp.rq_dbrec_dma = dbrec_dma + ERDMA_DB_SIZE;
return 0;
@@ -1237,9 +1249,10 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return err;
if (rdma_is_kernel_res(&cq->ibcq.res)) {
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
} else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@@ -1279,16 +1292,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
wait_for_completion(&qp->safe_free);
if (rdma_is_kernel_res(&qp->ibqp.res)) {
- vfree(qp->kern_qp.swr_tbl);
- vfree(qp->kern_qp.rwr_tbl);
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.rq_size << RQE_SHIFT),
- qp->kern_qp.rq_buf, qp->kern_qp.rq_buf_dma_addr);
- dma_free_coherent(
- &dev->pdev->dev,
- WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
- qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
+ free_kernel_qp(qp);
} else {
put_mtt_entries(dev, &qp->user_qp.sq_mem);
put_mtt_entries(dev, &qp->user_qp.rq_mem);
@@ -1588,7 +1592,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
ret = erdma_map_user_dbrecords(ctx, ureq->db_record_va,
&cq->user_cq.user_dbr_page,
- &cq->user_cq.db_info_dma_addr);
+ &cq->user_cq.dbrec_dma);
if (ret)
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
@@ -1600,19 +1604,27 @@ static int erdma_init_kernel_cq(struct erdma_cq *cq)
struct erdma_dev *dev = to_edev(cq->ibcq.device);
cq->kern_cq.qbuf =
- dma_alloc_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(cq->depth << CQE_SHIFT),
+ dma_alloc_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
&cq->kern_cq.qbuf_dma_addr, GFP_KERNEL);
if (!cq->kern_cq.qbuf)
return -ENOMEM;
- cq->kern_cq.db_record =
- (u64 *)(cq->kern_cq.qbuf + (cq->depth << CQE_SHIFT));
+ cq->kern_cq.dbrec = dma_pool_zalloc(dev->db_pool, GFP_KERNEL,
+ &cq->kern_cq.dbrec_dma);
+ if (!cq->kern_cq.dbrec)
+ goto err_out;
+
spin_lock_init(&cq->kern_cq.lock);
/* use default cqdb addr */
cq->kern_cq.db = dev->func_bar + ERDMA_BAR_CQDB_SPACE_OFFSET;
return 0;
+
+err_out:
+ dma_free_coherent(&dev->pdev->dev, cq->depth << CQE_SHIFT,
+ cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+
+ return -ENOMEM;
}
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
@@ -1676,9 +1688,10 @@ err_free_res:
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
} else {
- dma_free_coherent(&dev->pdev->dev,
- WARPPED_BUFSIZE(depth << CQE_SHIFT),
+ dma_free_coherent(&dev->pdev->dev, depth << CQE_SHIFT,
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
+ dma_pool_free(dev->db_pool, cq->kern_cq.dbrec,
+ cq->kern_cq.dbrec_dma);
}
err_out_xa:
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index db6018529ccc..4f02ba06b210 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -140,8 +140,8 @@ struct erdma_uqp {
struct erdma_mem sq_mem;
struct erdma_mem rq_mem;
- dma_addr_t sq_db_info_dma_addr;
- dma_addr_t rq_db_info_dma_addr;
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
struct erdma_user_dbrecords_page *user_dbr_page;
@@ -167,8 +167,11 @@ struct erdma_kqp {
void *rq_buf;
dma_addr_t rq_buf_dma_addr;
- void *sq_db_info;
- void *rq_db_info;
+ void *sq_dbrec;
+ void *rq_dbrec;
+
+ dma_addr_t sq_dbrec_dma;
+ dma_addr_t rq_dbrec_dma;
u8 sig_all;
};
@@ -246,13 +249,14 @@ struct erdma_kcq_info {
spinlock_t lock;
u8 __iomem *db;
- u64 *db_record;
+ u64 *dbrec;
+ dma_addr_t dbrec_dma;
};
struct erdma_ucq_info {
struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page;
- dma_addr_t db_info_dma_addr;
+ dma_addr_t dbrec_dma;
};
struct erdma_cq {
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index b4209b6aed8d..3e02c474f59f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -59,8 +59,10 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibah->device);
struct hns_roce_ib_create_ah_resp resp = {};
struct hns_roce_ah *ah = to_hr_ah(ibah);
- int ret = 0;
- u32 max_sl;
+ u8 tclass = get_tclass(grh);
+ u8 priority = 0;
+ u8 tc_mode = 0;
+ int ret;
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
@@ -74,16 +76,23 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
ah->av.hop_limit = grh->hop_limit;
ah->av.flowlabel = grh->flow_label;
ah->av.udp_sport = get_ah_udp_sport(ah_attr);
- ah->av.tclass = get_tclass(grh);
-
- ah->av.sl = rdma_ah_get_sl(ah_attr);
- max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
- if (unlikely(ah->av.sl > max_sl)) {
- ibdev_err_ratelimited(&hr_dev->ib_dev,
- "failed to set sl, sl (%u) shouldn't be larger than %u.\n",
- ah->av.sl, max_sl);
+ ah->av.tclass = tclass;
+
+ ret = hr_dev->hw->get_dscp(hr_dev, tclass, &tc_mode, &priority);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ if (ret && grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return ret;
+
+ if (tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ ah->av.sl = priority;
+ else
+ ah->av.sl = rdma_ah_get_sl(ah_attr);
+
+ if (!check_sl_valid(hr_dev, ah->av.sl))
return -EINVAL;
- }
memcpy(ah->av.dgid, grh->dgid.raw, HNS_ROCE_GID_SIZE);
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
@@ -99,6 +108,8 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
}
if (udata) {
+ resp.priority = ah->av.sl;
+ resp.tc_mode = tc_mode;
memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
ret = ib_copy_to_udata(udata, &resp,
min(udata->outlen, sizeof(resp)));
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index c3cbd0a494bf..78b4d19ff848 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -645,6 +645,8 @@ struct hns_roce_qp {
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
enum hns_roce_cong_type cong_type;
+ u8 tc_mode;
+ u8 priority;
};
struct hns_roce_ib_iboe {
@@ -950,6 +952,8 @@ struct hns_roce_hw {
int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
+ int (*get_dscp)(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -1292,4 +1296,6 @@ struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
enum hns_roce_mmap_type mmap_type);
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
+
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba7ae792d279..423ab66c5856 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -443,10 +443,6 @@ static int fill_ud_av(struct hns_roce_v2_ud_send_wqe *ud_sq_wqe,
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass);
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel);
-
- if (WARN_ON(ah->av.sl > MAX_SERVICE_LEVEL))
- return -EINVAL;
-
hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl);
ud_sq_wqe->sgid_index = ah->av.gid_index;
@@ -4828,6 +4824,69 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return 0;
}
+static int hns_roce_hw_v2_get_dscp(struct hns_roce_dev *hr_dev, u8 dscp,
+ u8 *tc_mode, u8 *priority)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!ops->get_dscp_prio)
+ return -EOPNOTSUPP;
+
+ return ops->get_dscp_prio(handle, dscp, tc_mode, priority);
+}
+
+bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl)
+{
+ u32 max_sl;
+
+ max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
+ if (unlikely(sl > max_sl)) {
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to set SL(%u). Shouldn't be larger than %u.\n",
+ sl, max_sl);
+ return false;
+ }
+
+ return true;
+}
+
+static int hns_roce_set_sl(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ int ret;
+
+ ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh),
+ &hr_qp->tc_mode, &hr_qp->priority);
+ if (ret && ret != -EOPNOTSUPP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ ibdev_err_ratelimited(ibdev,
+ "failed to get dscp, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP &&
+ grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ hr_qp->sl = hr_qp->priority;
+ else
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+
+ if (!check_sl_valid(hr_dev, hr_qp->sl))
+ return -EINVAL;
+
+ hr_reg_write(context, QPC_SL, hr_qp->sl);
+ hr_reg_clear(qpc_mask, QPC_SL);
+
+ return 0;
+}
+
static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask,
@@ -4843,25 +4902,18 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
int is_roce_protocol;
u16 vlan_id = 0xffff;
bool is_udp = false;
- u32 max_sl;
u8 ib_port;
u8 hr_port;
int ret;
- max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1);
- if (unlikely(sl > max_sl)) {
- ibdev_err_ratelimited(ibdev,
- "failed to fill QPC, sl (%u) shouldn't be larger than %u.\n",
- sl, max_sl);
- return -EINVAL;
- }
-
/*
* If free_mr_en of qp is set, it means that this qp comes from
* free mr. This qp will perform the loopback operation.
* In the loopback scenario, only sl needs to be set.
*/
if (hr_qp->free_mr_en) {
+ if (!check_sl_valid(hr_dev, sl))
+ return -EINVAL;
hr_reg_write(context, QPC_SL, sl);
hr_reg_clear(qpc_mask, QPC_SL);
hr_qp->sl = sl;
@@ -4931,11 +4983,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
- hr_qp->sl = sl;
- hr_reg_write(context, QPC_SL, hr_qp->sl);
- hr_reg_clear(qpc_mask, QPC_SL);
-
- return 0;
+ return hns_roce_set_sl(ibqp, attr, context, qpc_mask);
}
static bool check_qp_state(enum ib_qp_state cur_state,
@@ -6735,6 +6783,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_srqc = hns_roce_v2_query_srqc,
.query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
+ .get_dscp = hns_roce_hw_v2_get_dscp,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index f35a66325d9a..697230f964b1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1386,6 +1386,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_ib_modify_qp_resp resp = {};
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
enum ib_qp_state cur_state, new_state;
int ret = -EINVAL;
@@ -1427,6 +1428,18 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
new_state, udata);
+ if (ret)
+ goto out;
+
+ if (udata && udata->outlen) {
+ resp.tc_mode = hr_qp->tc_mode;
+ resp.priority = hr_qp->sl;
+ ret = ib_copy_to_udata(udata, &resp,
+ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ ibdev_err_ratelimited(&hr_dev->ib_dev,
+ "failed to copy modify qp resp.\n");
+ }
out:
mutex_unlock(&hr_qp->mutex);
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 4a71e678d09c..c9129218f1be 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->cqe = attr->cqe;
- cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->umem)) {
- err = PTR_ERR(cq->umem);
- ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
- err);
- return err;
- }
-
- err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
if (err) {
- ibdev_dbg(ibdev,
- "Failed to create dma region for create cq, %d\n",
- err);
- goto err_release_umem;
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+ return err;
}
- ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, cq->gdma_region);
-
- /*
- * The CQ ID is not known at this time. The ID is generated at create_qp
- */
- cq->id = INVALID_QUEUE_ID;
-
return 0;
-
-err_release_umem:
- ib_umem_release(cq->umem);
- return err;
}
int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
- int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev_to_gc(mdev);
- err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to destroy dma region, %d\n", err);
- return err;
- }
-
- if (cq->id != INVALID_QUEUE_ID) {
- kfree(gc->cq_table[cq->id]);
- gc->cq_table[cq->id] = NULL;
+ if (cq->queue.id != INVALID_QUEUE_ID) {
+ kfree(gc->cq_table[cq->queue.id]);
+ gc->cq_table[cq->queue.id] = NULL;
}
- ib_umem_release(cq->umem);
+ mana_ib_destroy_queue(mdev, &cq->queue);
return 0;
}
@@ -114,7 +82,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
struct gdma_queue *gdma_cq;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
+ WARN_ON(gc->cq_table[cq->queue.id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
@@ -122,7 +90,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq->id = cq->queue.id;
+ gc->cq_table[cq->queue.id] = gdma_cq;
return 0;
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee61b..4524c6b80748 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,49 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
}
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue)
+{
+ struct ib_umem *umem;
+ int err;
+
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+ return err;
+ }
+
+ err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ goto free_umem;
+ }
+ queue->umem = umem;
+
+ ibdev_dbg(&mdev->ib_dev,
+ "create_dma_region ret %d gdma_region 0x%llx\n",
+ err, queue->gdma_region);
+
+ return 0;
+free_umem:
+ ib_umem_release(umem);
+ return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+ ib_umem_release(queue->umem);
+}
+
static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eebb7d..ceca21cef72a 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
u32 max_inline_data_size;
};
+struct mana_ib_queue {
+ struct ib_umem *umem;
+ u64 gdma_region;
+ u64 id;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
@@ -53,11 +59,9 @@ struct mana_ib_dev {
struct mana_ib_wq {
struct ib_wq ibwq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int wqe;
u32 wq_buf_size;
- u64 gdma_region;
- u64 id;
mana_handle_t rx_object;
};
@@ -82,22 +86,16 @@ struct mana_ib_mr {
struct mana_ib_cq {
struct ib_cq ibcq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int cqe;
- u64 gdma_region;
- u64 id;
u32 comp_vector;
};
struct mana_ib_qp {
struct ib_qp ibqp;
- /* Work queue info */
- struct ib_umem *sq_umem;
- int sqe;
- u64 sq_gdma_region;
- u64 sq_id;
- mana_handle_t tx_object;
+ mana_handle_t qp_handle;
+ struct mana_ib_queue raw_sq;
/* The port on the IB device, starting with 1 */
u32 port;
@@ -169,6 +167,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 258f89464c10..4cd8f8afe80d 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -192,10 +192,10 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
ibcq = ibwq->cq;
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- wq_spec.gdma_region = wq->gdma_region;
+ wq_spec.gdma_region = wq->queue.gdma_region;
wq_spec.queue_size = wq->wq_buf_size;
- cq_spec.gdma_region = cq->gdma_region;
+ cq_spec.gdma_region = cq->queue.gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
@@ -210,18 +210,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
}
/* The GDMA regions are now owned by the WQ object */
- wq->gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ wq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- wq->id = wq_spec.queue_index;
- cq->id = cq_spec.queue_index;
+ wq->queue.id = wq_spec.queue_index;
+ cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev,
"ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
- ret, wq->rx_object, wq->id, cq->id);
+ ret, wq->rx_object, wq->queue.id, cq->queue.id);
- resp.entries[i].cqid = cq->id;
- resp.entries[i].wqid = wq->id;
+ resp.entries[i].cqid = cq->queue.id;
+ resp.entries[i].wqid = wq->queue.id;
mana_ind_table[i] = wq->rx_object;
@@ -230,7 +230,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
if (ret)
goto fail;
- gdma_cq_allocated[i] = gc->cq_table[cq->id];
+ gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
}
resp.num_entries = i;
@@ -262,7 +262,7 @@ fail:
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- gc->cq_table[cq->id] = NULL;
+ gc->cq_table[cq->queue.id] = NULL;
kfree(gdma_cq_allocated[i]);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
@@ -295,7 +295,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
struct net_device *ndev;
- struct ib_umem *umem;
struct mana_eq *eq;
int eq_vec;
u32 port;
@@ -344,35 +343,18 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n",
ucmd.sq_buf_addr, ucmd.port);
- umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create qp-raw, err %d\n",
- err);
- goto err_free_vport;
- }
- qp->sq_umem = umem;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq);
if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create qp-raw, %d\n",
- err);
- goto err_release_umem;
+ "Failed to create queue for create qp-raw, err %d\n", err);
+ goto err_free_vport;
}
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, qp->sq_gdma_region);
-
/* Create a WQ on the same port handle used by the Ethernet */
- wq_spec.gdma_region = qp->sq_gdma_region;
+ wq_spec.gdma_region = qp->raw_sq.gdma_region;
wq_spec.queue_size = ucmd.sq_buf_size;
- cq_spec.gdma_region = send_cq->gdma_region;
+ cq_spec.gdma_region = send_cq->queue.gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq_vec = send_cq->comp_vector % gc->max_num_queues;
@@ -380,20 +362,20 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
- &cq_spec, &qp->tx_object);
+ &cq_spec, &qp->qp_handle);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create wq for create raw-qp, err %d\n",
err);
- goto err_destroy_dma_region;
+ goto err_destroy_queue;
}
/* The GDMA regions are now owned by the WQ object */
- qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
- send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION;
+ send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
- qp->sq_id = wq_spec.queue_index;
- send_cq->id = cq_spec.queue_index;
+ qp->raw_sq.id = wq_spec.queue_index;
+ send_cq->queue.id = cq_spec.queue_index;
/* Create CQ table entry */
err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -401,11 +383,11 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto err_destroy_wq_obj;
ibdev_dbg(&mdev->ib_dev,
- "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
- qp->tx_object, qp->sq_id, send_cq->id);
+ "ret %d qp->qp_handle 0x%llx sq id %llu cq id %llu\n", err,
+ qp->qp_handle, qp->raw_sq.id, send_cq->queue.id);
- resp.sqid = qp->sq_id;
- resp.cqid = send_cq->id;
+ resp.sqid = qp->raw_sq.id;
+ resp.cqid = send_cq->queue.id;
resp.tx_vp_offset = pd->tx_vp_offset;
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -420,16 +402,13 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err_release_gdma_cq:
kfree(gdma_cq);
- gc->cq_table[send_cq->id] = NULL;
+ gc->cq_table[send_cq->queue.id] = NULL;
err_destroy_wq_obj:
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
-err_destroy_dma_region:
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
-
-err_release_umem:
- ib_umem_release(umem);
+err_destroy_queue:
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
err_free_vport:
mana_ib_uncfg_vport(mdev, pd, port);
@@ -503,12 +482,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
- mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
+ mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle);
- if (qp->sq_umem) {
- mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region);
- ib_umem_release(qp->sq_umem);
- }
+ mana_ib_destroy_queue(mdev, &qp->raw_sq);
mana_ib_uncfg_vport(mdev, pd, qp->port);
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 7c9c69962573..f959f4b9244f 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -13,7 +13,6 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
container_of(pd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_create_wq ucmd = {};
struct mana_ib_wq *wq;
- struct ib_umem *umem;
int err;
if (udata->inlen < sizeof(ucmd))
@@ -32,39 +31,18 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
ibdev_dbg(&mdev->ib_dev, "ucmd wq_buf_addr 0x%llx\n", ucmd.wq_buf_addr);
- umem = ib_umem_get(pd->device, ucmd.wq_buf_addr, ucmd.wq_buf_size,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
+ err = mana_ib_create_queue(mdev, ucmd.wq_buf_addr, ucmd.wq_buf_size, &wq->queue);
+ if (err) {
ibdev_dbg(&mdev->ib_dev,
- "Failed to get umem for create wq, err %d\n", err);
+ "Failed to create queue for create wq, %d\n", err);
goto err_free_wq;
}
- wq->umem = umem;
wq->wqe = init_attr->max_wr;
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
-
- err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
- if (err) {
- ibdev_dbg(&mdev->ib_dev,
- "Failed to create dma region for create wq, %d\n",
- err);
- goto err_release_umem;
- }
-
- ibdev_dbg(&mdev->ib_dev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, wq->gdma_region);
-
- /* WQ ID is returned at wq_create time, doesn't know the value yet */
-
return &wq->ibwq;
-err_release_umem:
- ib_umem_release(umem);
-
err_free_wq:
kfree(wq);
@@ -86,8 +64,7 @@ int mana_ib_destroy_wq(struct ib_wq *ibwq, struct ib_udata *udata)
mdev = container_of(ib_dev, struct mana_ib_dev, ib_dev);
- mana_ib_gd_destroy_dma_region(mdev, wq->gdma_region);
- ib_umem_release(wq->umem);
+ mana_ib_destroy_queue(mdev, &wq->queue);
kfree(wq);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a8de35c07c9e..f255a12e26a0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -643,9 +643,10 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
- /* User Mkey must hold either a rb_key or a cache_ent. */
+ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
struct mlx5r_cache_rb_key rb_key;
struct mlx5_cache_ent *cache_ent;
+ u8 cacheable : 1;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a8ee2ca1f4a1..ecc111ed5d86 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (IS_ERR(mr))
return mr;
mr->mmkey.rb_key = rb_key;
+ mr->mmkey.cacheable = true;
return mr;
}
@@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
mr->page_shift = order_base_2(page_size);
+ mr->mmkey.cacheable = true;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
@@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
unsigned int diffs = current_access_flags ^ target_access_flags;
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
+ IB_ACCESS_REMOTE_ATOMIC))
return false;
return mlx5r_umr_can_reconfig(dev, current_access_flags,
target_access_flags);
@@ -1835,6 +1838,23 @@ end:
return ret;
}
+static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+
+ if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
+ return 0;
+
+ if (ent) {
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use--;
+ mr->mmkey.cache_ent = NULL;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ }
+ return destroy_mkey(dev, mr);
+}
+
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
- if (mlx5r_umr_revoke_mr(mr) ||
- cache_ent_find_and_store(dev, mr))
- mr->mmkey.cache_ent = NULL;
-
- if (!mr->mmkey.cache_ent) {
- rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
- if (rc)
- return rc;
- }
+ rc = mlx5_revoke_mr(mr);
+ if (rc)
+ return rc;
if (mr->umem) {
bool is_odp = is_odp_mr(mr);
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 158670da2b2a..94e861870e27 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -109,6 +109,12 @@ struct hns_roce_ib_create_qp_resp {
__aligned_u64 dwqe_mmap_key;
};
+struct hns_roce_ib_modify_qp_resp {
+ __u8 tc_mode;
+ __u8 priority;
+ __u8 reserved[6];
+};
+
enum {
HNS_ROCE_EXSGE_FLAGS = 1 << 0,
HNS_ROCE_RQ_INLINE_FLAGS = 1 << 1,
@@ -143,7 +149,8 @@ struct hns_roce_ib_alloc_pd_resp {
struct hns_roce_ib_create_ah_resp {
__u8 dmac[6];
- __u8 reserved[2];
+ __u8 priority;
+ __u8 tc_mode;
};
#endif /* HNS_ABI_USER_H */