summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h6
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c5
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h3
-rw-r--r--drivers/infiniband/hw/efa/efa_io_defs.h289
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c11
-rw-r--r--drivers/infiniband/hw/erdma/erdma.h4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cmdq.c8
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cq.c4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_eq.c13
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h14
-rw-r--r--drivers/infiniband/hw/erdma/erdma_main.c17
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c15
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c35
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c2
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_rx.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c6
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h3
-rw-r--r--drivers/infiniband/hw/hns/Makefile2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c33
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c164
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c34
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c220
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1
-rw-r--r--drivers/infiniband/hw/irdma/hw.c51
-rw-r--r--drivers/infiniband/hw/irdma/type.h1
-rw-r--r--drivers/infiniband/hw/irdma/user.h1
-rw-r--r--drivers/infiniband/hw/irdma/utils.c3
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c69
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c57
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h36
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c3
52 files changed, 912 insertions, 395 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 3d6834d3d4fb..8c0c80a8d338 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -725,7 +725,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 0b0b93b529f3..d4b9226088bd 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -444,7 +444,10 @@ struct efa_admin_create_cq_cmd {
/*
* 4:0 : cq_entry_size_words - size of CQ entry in
* 32-bit words, valid values: 4, 8.
- * 7:5 : reserved7 - MBZ
+ * 5 : set_src_addr - If set, source address will be
+ * filled on RX completions from unknown senders.
+ * Requires 8 words CQ entry size.
+ * 7:6 : reserved7 - MBZ
*/
u8 cq_caps_2;
@@ -980,6 +983,7 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR_MASK BIT(5)
/* create_cq_resp */
#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index fb405da4e1db..8f8885e002ba 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -168,7 +168,10 @@ int efa_com_create_cq(struct efa_com_dev *edev,
EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
create_cmd.eqn = params->eqn;
}
-
+ if (params->set_src_addr) {
+ EFA_SET(&create_cmd.cq_caps_2,
+ EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
+ }
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
&create_cmd.cq_ba.mem_addr_low);
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index c33010bbf9e8..0898ad5bc340 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -75,7 +75,8 @@ struct efa_com_create_cq_params {
u16 uarn;
u16 eqn;
u8 entry_size_in_bytes;
- bool interrupt_mode_enabled;
+ u8 interrupt_mode_enabled : 1;
+ u8 set_src_addr : 1;
};
struct efa_com_create_cq_result {
diff --git a/drivers/infiniband/hw/efa/efa_io_defs.h b/drivers/infiniband/hw/efa/efa_io_defs.h
new file mode 100644
index 000000000000..17ba8984b11e
--- /dev/null
+++ b/drivers/infiniband/hw/efa/efa_io_defs.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef _EFA_IO_H_
+#define _EFA_IO_H_
+
+#define EFA_IO_TX_DESC_NUM_BUFS 2
+#define EFA_IO_TX_DESC_NUM_RDMA_BUFS 1
+#define EFA_IO_TX_DESC_INLINE_MAX_SIZE 32
+#define EFA_IO_TX_DESC_IMM_DATA_SIZE 4
+
+enum efa_io_queue_type {
+ /* send queue (of a QP) */
+ EFA_IO_SEND_QUEUE = 1,
+ /* recv queue (of a QP) */
+ EFA_IO_RECV_QUEUE = 2,
+};
+
+enum efa_io_send_op_type {
+ /* send message */
+ EFA_IO_SEND = 0,
+ /* RDMA read */
+ EFA_IO_RDMA_READ = 1,
+};
+
+enum efa_io_comp_status {
+ /* Successful completion */
+ EFA_IO_COMP_STATUS_OK = 0,
+ /* Flushed during QP destroy */
+ EFA_IO_COMP_STATUS_FLUSHED = 1,
+ /* Internal QP error */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_QP_INTERNAL_ERROR = 2,
+ /* Bad operation type */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_OP_TYPE = 3,
+ /* Bad AH */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_AH = 4,
+ /* LKEY not registered or does not match IOVA */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_INVALID_LKEY = 5,
+ /* Message too long */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_BAD_LENGTH = 6,
+ /* Destination ENI is down or does not run EFA */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_ADDRESS = 7,
+ /* Connection was reset by remote side */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_ABORT = 8,
+ /* Bad dest QP number (QP does not exist or is in error state) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_DEST_QPN = 9,
+ /* Destination resource not ready (no WQEs posted on RQ) */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_RNR = 10,
+ /* Receiver SGL too short */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_LENGTH = 11,
+ /* Unexpected status returned by responder */
+ EFA_IO_COMP_STATUS_REMOTE_ERROR_BAD_STATUS = 12,
+ /* Unresponsive remote - detected locally */
+ EFA_IO_COMP_STATUS_LOCAL_ERROR_UNRESP_REMOTE = 13,
+};
+
+struct efa_io_tx_meta_desc {
+ /* Verbs-generated Request ID */
+ u16 req_id;
+
+ /*
+ * control flags
+ * 3:0 : op_type - operation type: send/rdma/fast mem
+ * ops/etc
+ * 4 : has_imm - immediate_data field carries valid
+ * data.
+ * 5 : inline_msg - inline mode - inline message data
+ * follows this descriptor (no buffer descriptors).
+ * Note that it is different from immediate data
+ * 6 : meta_extension - Extended metadata. MBZ
+ * 7 : meta_desc - Indicates metadata descriptor.
+ * Must be set.
+ */
+ u8 ctrl1;
+
+ /*
+ * control flags
+ * 0 : phase
+ * 1 : reserved25 - MBZ
+ * 2 : first - Indicates first descriptor in
+ * transaction. Must be set.
+ * 3 : last - Indicates last descriptor in
+ * transaction. Must be set.
+ * 4 : comp_req - Indicates whether completion should
+ * be posted, after packet is transmitted. Valid only
+ * for the first descriptor
+ * 7:5 : reserved29 - MBZ
+ */
+ u8 ctrl2;
+
+ u16 dest_qp_num;
+
+ /*
+ * If inline_msg bit is set, length of inline message in bytes,
+ * otherwise length of SGL (number of buffers).
+ */
+ u16 length;
+
+ /*
+ * immediate data: if has_imm is set, then this field is included
+ * within Tx message and reported in remote Rx completion.
+ */
+ u32 immediate_data;
+
+ u16 ah;
+
+ u16 reserved;
+
+ /* Queue key */
+ u32 qkey;
+
+ u8 reserved2[12];
+};
+
+/*
+ * Tx queue buffer descriptor, for any transport type. Preceded by metadata
+ * descriptor.
+ */
+struct efa_io_tx_buf_desc {
+ /* length in bytes */
+ u32 length;
+
+ /*
+ * 23:0 : lkey - local memory translation key
+ * 31:24 : reserved - MBZ
+ */
+ u32 lkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_remote_mem_addr {
+ /* length in bytes */
+ u32 length;
+
+ /* remote memory translation key */
+ u32 rkey;
+
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer address bits[63:32] */
+ u32 buf_addr_hi;
+};
+
+struct efa_io_rdma_req {
+ /* Remote memory address */
+ struct efa_io_remote_mem_addr remote_mem;
+
+ /* Local memory address */
+ struct efa_io_tx_buf_desc local_mem[1];
+};
+
+/*
+ * Tx WQE, composed of tx meta descriptors followed by either tx buffer
+ * descriptors or inline data
+ */
+struct efa_io_tx_wqe {
+ /* TX meta */
+ struct efa_io_tx_meta_desc meta;
+
+ union {
+ /* Send buffer descriptors */
+ struct efa_io_tx_buf_desc sgl[2];
+
+ u8 inline_data[32];
+
+ /* RDMA local and remote memory addresses */
+ struct efa_io_rdma_req rdma_req;
+ } data;
+};
+
+/*
+ * Rx buffer descriptor; RX WQE is composed of one or more RX buffer
+ * descriptors.
+ */
+struct efa_io_rx_desc {
+ /* Buffer address bits[31:0] */
+ u32 buf_addr_lo;
+
+ /* Buffer Pointer[63:32] */
+ u32 buf_addr_hi;
+
+ /* Verbs-generated request id. */
+ u16 req_id;
+
+ /* Length in bytes. */
+ u16 length;
+
+ /*
+ * LKey and control flags
+ * 23:0 : lkey
+ * 29:24 : reserved - MBZ
+ * 30 : first - Indicates first descriptor in WQE
+ * 31 : last - Indicates last descriptor in WQE
+ */
+ u32 lkey_ctrl;
+};
+
+/* Common IO completion descriptor */
+struct efa_io_cdesc_common {
+ /*
+ * verbs-generated request ID, as provided in the completed tx or rx
+ * descriptor.
+ */
+ u16 req_id;
+
+ u8 status;
+
+ /*
+ * flags
+ * 0 : phase - Phase bit
+ * 2:1 : q_type - enum efa_io_queue_type: send/recv
+ * 3 : has_imm - indicates that immediate data is
+ * present - for RX completions only
+ * 7:4 : reserved28 - MBZ
+ */
+ u8 flags;
+
+ /* local QP number */
+ u16 qp_num;
+
+ /* Transferred length */
+ u16 length;
+};
+
+/* Tx completion descriptor */
+struct efa_io_tx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+};
+
+/* Rx Completion Descriptor */
+struct efa_io_rx_cdesc {
+ /* Common completion info */
+ struct efa_io_cdesc_common common;
+
+ /* Remote Address Handle FW index, 0xFFFF indicates invalid ah */
+ u16 ah;
+
+ u16 src_qp_num;
+
+ /* Immediate data */
+ u32 imm;
+};
+
+/* Extended Rx Completion Descriptor */
+struct efa_io_rx_cdesc_ex {
+ /* Base RX completion info */
+ struct efa_io_rx_cdesc rx_cdesc_base;
+
+ /*
+ * Valid only in case of unknown AH (0xFFFF) and CQ set_src_addr is
+ * enabled.
+ */
+ u8 src_addr[16];
+};
+
+/* tx_meta_desc */
+#define EFA_IO_TX_META_DESC_OP_TYPE_MASK GENMASK(3, 0)
+#define EFA_IO_TX_META_DESC_HAS_IMM_MASK BIT(4)
+#define EFA_IO_TX_META_DESC_INLINE_MSG_MASK BIT(5)
+#define EFA_IO_TX_META_DESC_META_EXTENSION_MASK BIT(6)
+#define EFA_IO_TX_META_DESC_META_DESC_MASK BIT(7)
+#define EFA_IO_TX_META_DESC_PHASE_MASK BIT(0)
+#define EFA_IO_TX_META_DESC_FIRST_MASK BIT(2)
+#define EFA_IO_TX_META_DESC_LAST_MASK BIT(3)
+#define EFA_IO_TX_META_DESC_COMP_REQ_MASK BIT(4)
+
+/* tx_buf_desc */
+#define EFA_IO_TX_BUF_DESC_LKEY_MASK GENMASK(23, 0)
+
+/* rx_desc */
+#define EFA_IO_RX_DESC_LKEY_MASK GENMASK(23, 0)
+#define EFA_IO_RX_DESC_FIRST_MASK BIT(30)
+#define EFA_IO_RX_DESC_LAST_MASK BIT(31)
+
+/* cdesc_common */
+#define EFA_IO_CDESC_COMMON_PHASE_MASK BIT(0)
+#define EFA_IO_CDESC_COMMON_Q_TYPE_MASK GENMASK(2, 1)
+#define EFA_IO_CDESC_COMMON_HAS_IMM_MASK BIT(3)
+
+#endif /* _EFA_IO_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index ecfe70eb5efb..31454643f8c5 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/dma-buf.h>
@@ -15,6 +15,7 @@
#include <rdma/uverbs_ioctl.h>
#include "efa.h"
+#include "efa_io_defs.h"
enum {
EFA_MMAP_DMA_PAGE = 0,
@@ -242,6 +243,7 @@ int efa_query_device(struct ib_device *ibdev,
resp.max_rq_wr = dev_attr->max_rq_depth;
resp.max_rdma_size = dev_attr->max_rdma_size;
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_WITH_SGID;
if (EFA_DEV_CAP(dev, RDMA_READ))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RDMA_READ;
@@ -1064,6 +1066,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq);
int entries = attr->cqe;
+ bool set_src_addr;
int err;
ibdev_dbg(ibdev, "create_cq entries %d\n", entries);
@@ -1109,7 +1112,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (!cmd.cq_entry_size) {
+ set_src_addr = !!(cmd.flags & EFA_CREATE_CQ_WITH_SGID);
+ if ((cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc_ex)) &&
+ (set_src_addr ||
+ cmd.cq_entry_size != sizeof(struct efa_io_rx_cdesc))) {
ibdev_dbg(ibdev,
"Invalid entry size [%u]\n", cmd.cq_entry_size);
err = -EINVAL;
@@ -1138,6 +1144,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs;
+ params.set_src_addr = set_src_addr;
if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
cq->eq = efa_vec2eq(dev, attr->comp_vector);
params.eqn = cq->eq->eeq.eqn;
diff --git a/drivers/infiniband/hw/erdma/erdma.h b/drivers/infiniband/hw/erdma/erdma.h
index 2aae635c1c8d..730783fbc894 100644
--- a/drivers/infiniband/hw/erdma/erdma.h
+++ b/drivers/infiniband/hw/erdma/erdma.h
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <linux/xarray.h>
#include <rdma/ib_verbs.h>
@@ -196,6 +197,7 @@ struct erdma_dev {
struct erdma_devattr attrs;
/* physical port state (only one port per device) */
enum ib_port_state state;
+ u32 mtu;
/* cmdq and aeq use the same msix vector */
struct erdma_irq comm_irq;
@@ -269,7 +271,7 @@ void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index f13f16479eca..74f6348f240a 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -10,15 +10,7 @@
/* Copyright (c) 2008-2019, IBM Corporation */
/* Copyright (c) 2017, Open Grid Computing, Inc. */
-#include <linux/errno.h>
-#include <linux/inetdevice.h>
-#include <linux/net.h>
-#include <linux/types.h>
#include <linux/workqueue.h>
-#include <net/addrconf.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
diff --git a/drivers/infiniband/hw/erdma/erdma_cmdq.c b/drivers/infiniband/hw/erdma/erdma_cmdq.c
index 57da0c670472..6ebfa6989b11 100644
--- a/drivers/infiniband/hw/erdma/erdma_cmdq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cmdq.c
@@ -4,13 +4,7 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
#include "erdma.h"
-#include "erdma_hw.h"
-#include "erdma_verbs.h"
static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
{
@@ -441,7 +435,7 @@ void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
}
-int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
+int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, void *req, u32 req_size,
u64 *resp0, u64 *resp1)
{
struct erdma_comp_wait *comp_wait;
diff --git a/drivers/infiniband/hw/erdma/erdma_cq.c b/drivers/infiniband/hw/erdma/erdma_cq.c
index 751c7f9f0de7..58e0dc5c75d1 100644
--- a/drivers/infiniband/hw/erdma/erdma_cq.c
+++ b/drivers/infiniband/hw/erdma/erdma_cq.c
@@ -4,9 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <rdma/ib_verbs.h>
-
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static void *get_next_valid_cqe(struct erdma_cq *cq)
@@ -62,7 +59,6 @@ static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
[ERDMA_OP_RECV_INV] = IB_WC_RECV,
[ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
- [ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
[ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
[ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
[ERDMA_OP_REG_MR] = IB_WC_REG_MR,
diff --git a/drivers/infiniband/hw/erdma/erdma_eq.c b/drivers/infiniband/hw/erdma/erdma_eq.c
index 8f2d094e0227..ed54130d924b 100644
--- a/drivers/infiniband/hw/erdma/erdma_eq.c
+++ b/drivers/infiniband/hw/erdma/erdma_eq.c
@@ -4,12 +4,6 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-
-#include "erdma.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
#define MAX_POLL_CHUNK_SIZE 16
@@ -229,9 +223,7 @@ static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
- sizeof(struct erdma_cmdq_create_eq_req),
- NULL, NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
@@ -281,8 +273,7 @@ static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return;
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index b210c49c669f..e788887732e1 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -153,6 +153,7 @@ enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_CREATE_EQ = 0,
CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2,
+ CMDQ_OPCODE_CONF_MTU = 3,
};
/* cmdq-SQE HDR */
@@ -190,6 +191,11 @@ struct erdma_cmdq_destroy_eq_req {
u8 qtype;
};
+struct erdma_cmdq_config_mtu_req {
+ u64 hdr;
+ u32 mtu;
+};
+
/* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
@@ -450,13 +456,13 @@ enum erdma_opcode {
ERDMA_OP_RECV_IMM = 5,
ERDMA_OP_RECV_INV = 6,
- ERDMA_OP_REQ_ERR = 7,
- ERDMA_OP_READ_RESPONSE = 8,
+ ERDMA_OP_RSVD0 = 7,
+ ERDMA_OP_RSVD1 = 8,
ERDMA_OP_WRITE_WITH_IMM = 9,
- ERDMA_OP_RECV_ERR = 10,
+ ERDMA_OP_RSVD2 = 10,
+ ERDMA_OP_RSVD3 = 11,
- ERDMA_OP_INVALIDATE = 11,
ERDMA_OP_RSP_SEND_IMM = 12,
ERDMA_OP_SEND_WITH_INV = 13,
diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 07e743d24847..49778bb294ae 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -4,21 +4,12 @@
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/pci.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
@@ -43,10 +34,15 @@ static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
+ case NETDEV_CHANGEMTU:
+ if (dev->mtu != netdev->mtu) {
+ erdma_set_mtu(dev, netdev->mtu);
+ dev->mtu = netdev->mtu;
+ }
+ break;
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR:
- case NETDEV_CHANGEMTU:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGE:
default:
@@ -104,6 +100,7 @@ static int erdma_device_register(struct erdma_dev *dev)
if (ret)
return ret;
+ dev->mtu = dev->netdev->mtu;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index bc3ec22a62c5..5fe1a339a435 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -6,15 +6,6 @@
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/scatterlist.h>
-#include <linux/types.h>
-
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
-
-#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_verbs.h"
@@ -105,8 +96,7 @@ static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
req.recv_nxt = tp->rcv_nxt;
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
@@ -124,8 +114,7 @@ static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 699bd3f59cd3..62be98e2b941 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -9,21 +9,14 @@
/* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. */
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/ib_verbs.h>
#include <rdma/uverbs_ioctl.h>
#include "erdma.h"
#include "erdma_cm.h"
-#include "erdma_hw.h"
#include "erdma_verbs.h"
static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
@@ -102,7 +95,7 @@ static int create_qp_cmd(struct erdma_dev *dev, struct erdma_qp *qp)
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
}
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), &resp0,
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), &resp0,
&resp1);
if (!err)
qp->attrs.cookie =
@@ -151,8 +144,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
}
post_cmd:
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
@@ -202,8 +194,7 @@ static int create_cq_cmd(struct erdma_dev *dev, struct erdma_cq *cq)
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
}
- return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ return erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
}
static int erdma_alloc_idx(struct erdma_resource_cb *res_cb)
@@ -976,8 +967,7 @@ int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
req.cfg = FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, ibmr->lkey >> 8) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, ibmr->lkey & 0xFF);
- ret = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ ret = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (ret)
return ret;
@@ -1002,8 +992,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_CQ);
req.cqn = cq->cqn;
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1040,8 +1029,7 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
CMDQ_OPCODE_DESTROY_QP);
req.qpn = QP_ID(qp);
- err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
- NULL);
+ err = erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
if (err)
return err;
@@ -1448,6 +1436,17 @@ err_out_xa:
return ret;
}
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu)
+{
+ struct erdma_cmdq_config_mtu_req req;
+
+ erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
+ CMDQ_OPCODE_CONF_MTU);
+ req.mtu = mtu;
+
+ erdma_post_cmd_wait(&dev->cmdq, &req, sizeof(req), NULL, NULL);
+}
+
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason)
{
struct ib_event event;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c7baddb1f292..ab6380635e9e 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -7,15 +7,7 @@
#ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__
-#include <linux/errno.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-#include <rdma/iw_cm.h>
-
#include "erdma.h"
-#include "erdma_cm.h"
-#include "erdma_hw.h"
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
@@ -338,5 +330,6 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
+void erdma_set_mtu(struct erdma_dev *dev, u32 mtu);
#endif
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index f1245c94ae26..ebe970f76232 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -8753,7 +8753,7 @@ static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
/*
* When writing a LCB CSR, out_data contains the full value to
- * to be written, while in_data contains the relative LCB
+ * be written, while in_data contains the relative LCB
* address in 7:0. Do the work here, rather than the caller,
* of distrubting the write data to where it needs to go:
*
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 629beff053ad..f5f9269fdc16 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -965,7 +965,7 @@ static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
uctxt->userversion = uinfo->userversion;
uctxt->flags = hfi1_cap_mask; /* save current flag state */
init_waitqueue_head(&uctxt->wait);
- strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
+ strscpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
uctxt->jkey = generate_jkey(current_uid());
hfi1_stats.sps_ctxts++;
diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index aa15a5cc7cf3..1d77514ebbee 100644
--- a/drivers/infiniband/hw/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
@@ -1114,7 +1114,7 @@ static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
* Reset all of the fabric serdes for this HFI in preparation to take the
* link to Polling.
*
- * To do a reset, we need to write to to the serdes registers. Unfortunately,
+ * To do a reset, we need to write to the serdes registers. Unfortunately,
* the fabric serdes download to the other HFI on the ASIC will have turned
* off the firmware validation on this HFI. This means we can't write to the
* registers to reset the serdes. Work around this by performing a complete
diff --git a/drivers/infiniband/hw/hfi1/ipoib_rx.c b/drivers/infiniband/hw/hfi1/ipoib_rx.c
index 3afa7545242c..629691a572ef 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_rx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_rx.c
@@ -11,13 +11,10 @@
static void copy_ipoib_buf(struct sk_buff *skb, void *data, int size)
{
- void *dst_data;
-
skb_checksum_none_assert(skb);
skb->protocol = *((__be16 *)data);
- dst_data = skb_put(skb, size);
- memcpy(dst_data, data, size);
+ skb_put_data(skb, data, size);
skb->mac_header = HFI1_IPOIB_PSEUDO_LEN;
skb_pull(skb, HFI1_IPOIB_ENCAP_LEN);
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 6988f6f21bde..e6e17984553c 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1447,12 +1447,10 @@ static int shut_down_port(struct rvt_dev_info *rdi, u32 port_num)
struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
struct hfi1_pportdata *ppd = &dd->pport[port_num - 1];
- int ret;
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_UNKNOWN, 0,
OPA_LINKDOWN_REASON_UNKNOWN);
- ret = set_link_state(ppd, HLS_DN_DOWNDEF);
- return ret;
+ return set_link_state(ppd, HLS_DN_DOWNDEF);
}
static int hfi1_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp,
@@ -1801,7 +1799,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ib_set_device_ops(ibdev, &hfi1_dev_ops);
- strlcpy(ibdev->node_desc, init_utsname()->nodename,
+ strscpy(ibdev->node_desc, init_utsname()->nodename,
sizeof(ibdev->node_desc));
/*
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 38565532d654..7f30f32b34dc 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -391,9 +391,6 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
bool *call_send);
-extern const u32 rc_only_opcode;
-extern const u32 uc_only_opcode;
-
int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet);
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index 9f04f25d9631..a7d259238305 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -10,6 +10,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
ifdef CONFIG_INFINIBAND_HNS_HIP08
-hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
+hns-roce-hw-v2-objs := hns_roce_hw_v2.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 8acd599ffac1..736dc2f993b4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -454,7 +454,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(hr_dev->dev, "Completion event for bogus CQ 0x%06x\n",
+ dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
cqn);
return;
}
@@ -475,14 +475,14 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
hr_cq = xa_load(&hr_dev->cq_table.array,
cqn & (hr_dev->caps.num_cqs - 1));
if (!hr_cq) {
- dev_warn(dev, "Async event for bogus CQ 0x%06x\n", cqn);
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
return;
}
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
- dev_err(dev, "Unexpected event type 0x%x on CQ 0x%06x\n",
+ dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
event_type, cqn);
return;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index d24996526c4d..723e55a7de8d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -240,7 +240,6 @@ struct hns_roce_hem_table {
/* Single obj size */
unsigned long obj_size;
unsigned long table_chunk_size;
- int lowmem;
struct mutex mutex;
struct hns_roce_hem **hem;
u64 **bt_l1;
@@ -599,7 +598,6 @@ struct hns_roce_qp {
struct hns_roce_db rdb;
struct hns_roce_db sdb;
unsigned long en_flags;
- u32 doorbell_qpn;
enum ib_sig_type sq_signal_bits;
struct hns_roce_wq sq;
@@ -726,7 +724,7 @@ struct hns_roce_caps {
u32 max_sq_sg;
u32 max_sq_inline;
u32 max_rq_sg;
- u32 max_extend_sg;
+ u32 rsv0;
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
@@ -736,7 +734,7 @@ struct hns_roce_caps {
u32 max_srq_sges;
u32 max_sq_desc_sz;
u32 max_rq_desc_sz;
- u32 max_srq_desc_sz;
+ u32 rsv2;
int max_qp_init_rdma;
int max_qp_dest_rdma;
u32 num_cqs;
@@ -749,7 +747,7 @@ struct hns_roce_caps {
int num_comp_vectors;
int num_other_vectors;
u32 num_mtpts;
- u32 num_mtt_segs;
+ u32 rsv1;
u32 num_srqwqe_segs;
u32 num_idx_segs;
int reserved_mrws;
@@ -848,11 +846,6 @@ struct hns_roce_caps {
enum cong_type cong_type;
};
-struct hns_roce_dfx_hw {
- int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-};
-
enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_INITED,
HNS_ROCE_DEVICE_STATE_RST_DOWN,
@@ -898,6 +891,9 @@ struct hns_roce_hw {
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
+ int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
+ int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
+ int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -959,7 +955,6 @@ struct hns_roce_dev {
void *priv;
struct workqueue_struct *irq_workq;
struct work_struct ecc_work;
- const struct hns_roce_dfx_hw *dfx;
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
@@ -1227,8 +1222,12 @@ u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp);
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr);
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr);
struct hns_user_mmap_entry *
hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index ce1a0d2792a3..aa8a08d1c014 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -455,7 +455,7 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
* alloc bt space chunk for MTT/CQE.
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
- flag = (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN;
+ flag = GFP_KERNEL | __GFP_NOWARN;
table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
size, flag);
if (!table->hem[index->buf]) {
@@ -588,8 +588,7 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
table->hem[i] = hns_roce_alloc_hem(hr_dev,
table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
- (table->lowmem ? GFP_KERNEL :
- GFP_HIGHUSER) | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
ret = -ENOMEM;
goto out;
@@ -725,9 +724,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int length;
int i, j;
- if (!table->lowmem)
- return NULL;
-
mutex_lock(&table->mutex);
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
@@ -783,8 +779,7 @@ out:
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem)
+ unsigned long obj_size, unsigned long nobj)
{
unsigned long obj_per_chunk;
unsigned long num_hem;
@@ -861,7 +856,6 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
table->type = type;
table->num_hem = num_hem;
table->obj_size = obj_size;
- table->lowmem = use_lowmem;
mutex_init(&table->mutex);
return 0;
@@ -932,7 +926,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
if (table->hem[i]) {
if (hr_dev->hw->clear_hem(hr_dev, table,
i * table->table_chunk_size / table->obj_size, 0))
- dev_err(dev, "Clear HEM base address failed.\n");
+ dev_err(dev, "clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]);
}
@@ -986,7 +980,7 @@ struct hns_roce_hem_head {
static struct hns_roce_hem_item *
hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
- bool exist_bt, int bt_level)
+ bool exist_bt)
{
struct hns_roce_hem_item *hem;
@@ -1195,7 +1189,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
start_aligned = (distance / step) * step + r->offset;
end = min_t(int, start_aligned + step - 1, max_ofs);
cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
- true, level);
+ true);
if (!cur) {
ret = -ENOMEM;
goto err_exit;
@@ -1247,7 +1241,7 @@ alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
/* indicate to last region */
r = &regions[region_cnt - 1];
hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
- ba_num, true, 0);
+ ba_num, true);
if (!hem)
return ERR_PTR(-ENOMEM);
@@ -1264,7 +1258,7 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
struct hns_roce_hem_item *hem;
hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
- r->count, false, 0);
+ r->count, false);
if (!hem)
return -ENOMEM;
@@ -1421,7 +1415,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
&hem_list->btm_bt);
if (ret) {
dev_err(hr_dev->dev,
- "alloc hem trunk fail ret=%d!\n", ret);
+ "alloc hem trunk fail ret = %d!\n", ret);
goto err_alloc;
}
}
@@ -1430,7 +1424,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
region_cnt);
if (ret)
- dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
+ dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
else
return 0;
@@ -1468,19 +1462,17 @@ void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr)
+ int offset, int *mtt_cnt)
{
struct list_head *head = &hem_list->btm_bt;
struct hns_roce_hem_item *hem, *temp_hem;
void *cpu_base = NULL;
- u64 phy_base = 0;
int nr = 0;
list_for_each_entry_safe(hem, temp_hem, head, sibling) {
if (hem_list_page_is_in_range(hem, offset)) {
nr = offset - hem->start;
cpu_base = hem->addr + nr * BA_BYTE_LEN;
- phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
nr = hem->end + 1 - offset;
break;
}
@@ -1489,8 +1481,5 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
if (mtt_cnt)
*mtt_cnt = nr;
- if (phy_addr)
- *phy_addr = phy_base;
-
return cpu_base;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 2d84a6b3f05d..7d23d3c51da4 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -111,8 +111,7 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
dma_addr_t *dma_handle);
int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, u32 type,
- unsigned long obj_size, unsigned long nobj,
- int use_lowmem);
+ unsigned long obj_size, unsigned long nobj);
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
@@ -132,7 +131,7 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list);
void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
- int offset, int *mtt_cnt, u64 *phy_addr);
+ int offset, int *mtt_cnt);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c780646bd60a..1ead35fb031b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -193,8 +193,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
unsigned int *sge_idx, u32 msg_len)
{
struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev;
- unsigned int dseg_len = sizeof(struct hns_roce_v2_wqe_data_seg);
- unsigned int ext_sge_sz = qp->sq.max_gs * dseg_len;
+ unsigned int ext_sge_sz = qp->sq.max_gs * HNS_ROCE_SGE_SIZE;
unsigned int left_len_in_pg;
unsigned int idx = *sge_idx;
unsigned int i = 0;
@@ -222,7 +221,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
if (len <= left_len_in_pg) {
memcpy(dseg, addr, len);
- idx += len / dseg_len;
+ idx += len / HNS_ROCE_SGE_SIZE;
i++;
if (i >= wr->num_sge)
@@ -237,7 +236,7 @@ static int fill_ext_sge_inl_data(struct hns_roce_qp *qp,
len -= left_len_in_pg;
addr += left_len_in_pg;
- idx += left_len_in_pg / dseg_len;
+ idx += left_len_in_pg / HNS_ROCE_SGE_SIZE;
dseg = hns_roce_get_extend_sge(qp,
idx & (qp->sge.sge_cnt - 1));
left_len_in_pg = 1 << HNS_HW_PAGE_SHIFT;
@@ -381,7 +380,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) {
- ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
+ ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
ibqp->qp_type);
return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET ||
@@ -637,7 +636,7 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
} else {
struct hns_roce_v2_db sq_db = {};
- hr_reg_write(&sq_db, DB_TAG, qp->doorbell_qpn);
+ hr_reg_write(&sq_db, DB_TAG, qp->qpn);
hr_reg_write(&sq_db, DB_CMD, HNS_ROCE_V2_SQ_DB);
hr_reg_write(&sq_db, DB_PI, qp->sq.head);
hr_reg_write(&sq_db, DB_SL, qp->sl);
@@ -1406,20 +1405,20 @@ static void func_clr_hw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (!ops->get_hw_reset_stat(handle)) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after reset.\n");
+ "func clear success after reset.\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
@@ -1431,21 +1430,21 @@ static void func_clr_sw_resetting_state(struct hns_roce_dev *hr_dev,
hr_dev->dis_db = true;
dev_warn(hr_dev->dev,
- "Func clear is pending, device in resetting state.\n");
+ "func clear is pending, device in resetting state.\n");
end = HNS_ROCE_V2_HW_RST_TIMEOUT;
while (end) {
if (ops->ae_dev_reset_cnt(handle) !=
hr_dev->reset_cnt) {
hr_dev->is_reset = true;
dev_info(hr_dev->dev,
- "Func clear success after sw reset\n");
+ "func clear success after sw reset\n");
return;
}
msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
}
- dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
+ dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n");
}
static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
@@ -1458,7 +1457,7 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) {
hr_dev->dis_db = true;
hr_dev->is_reset = true;
- dev_info(hr_dev->dev, "Func clear success after reset.\n");
+ dev_info(hr_dev->dev, "func clear success after reset.\n");
return;
}
@@ -1475,9 +1474,9 @@ static void hns_roce_func_clr_rst_proc(struct hns_roce_dev *hr_dev, int retval,
if (retval && !flag)
dev_warn(hr_dev->dev,
- "Func clear read failed, ret = %d.\n", retval);
+ "func clear read failed, ret = %d.\n", retval);
- dev_warn(hr_dev->dev, "Func clear failed.\n");
+ dev_warn(hr_dev->dev, "func clear failed.\n");
}
static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
@@ -1498,7 +1497,7 @@ static void __hns_roce_function_clear(struct hns_roce_dev *hr_dev, int vf_id)
ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) {
fclr_write_fail_flag = true;
- dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
+ dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n",
ret);
goto out;
}
@@ -1966,7 +1965,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
- caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1984,7 +1982,6 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
- caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
@@ -2185,13 +2182,14 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
- caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
if (!caps->num_comp_vectors)
- caps->num_comp_vectors = min_t(u32, caps->eqc_bt_num - 1,
- (u32)priv->handle->rinfo.num_vectors - 2);
+ caps->num_comp_vectors =
+ min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM,
+ (u32)priv->handle->rinfo.num_vectors -
+ (HNS_ROCE_V2_AEQE_VEC_NUM + HNS_ROCE_V2_ABNORMAL_VEC_NUM));
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
@@ -2272,14 +2270,12 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
- caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
caps->num_other_vectors = resp_a->num_other_vectors;
caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
- caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
caps->cqe_sz = resp_a->cqe_sz;
caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
@@ -4299,7 +4295,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
static void modify_qp_reset_to_init(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
- int attr_mask,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4363,7 +4358,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
}
static void modify_qp_init_to_init(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
+ const struct ib_qp_attr *attr,
struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask)
{
@@ -4612,7 +4607,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_DQPN);
}
- memcpy(&(context->dmac), dmac, sizeof(u32));
+ memcpy(&context->dmac, dmac, sizeof(u32));
hr_reg_write(context, QPC_DMAC_H, *((u16 *)(&dmac[4])));
qpc_mask->dmac = 0;
hr_reg_clear(qpc_mask, QPC_DMAC_H);
@@ -5014,11 +5009,9 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
- modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- modify_qp_init_to_init(ibqp, attr, attr_mask, context,
- qpc_mask);
+ modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -5039,14 +5032,14 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 20.\n");
+ "local ACK timeout shall be 0 to 20.\n");
return false;
}
*timeout += QP_ACK_TIMEOUT_OFFSET;
} else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
if (*timeout > QP_ACK_TIMEOUT_MAX) {
ibdev_warn(&hr_dev->ib_dev,
- "Local ACK timeout shall be 0 to 31.\n");
+ "local ACK timeout shall be 0 to 31.\n");
return false;
}
}
@@ -5306,9 +5299,8 @@ static int to_ib_qp_st(enum hns_roce_v2_qp_state state)
return (state < ARRAY_SIZE(map)) ? map[state] : -1;
}
-static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_v2_qp_context *hr_context)
+static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
{
struct hns_roce_cmd_mailbox *mailbox;
int ret;
@@ -5318,11 +5310,11 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
return PTR_ERR(mailbox);
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC,
- hr_qp->qpn);
+ qpn);
if (ret)
goto out;
- memcpy(hr_context, mailbox->buf, hr_dev->caps.qpc_sz);
+ memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz);
out:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
@@ -5352,7 +5344,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto done;
}
- ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret) {
ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
ret = -EINVAL;
@@ -5550,7 +5542,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20);
}
- ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
+ ibdev_err(ibdev, "query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT;
out:
@@ -5773,6 +5765,64 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
return ret;
}
+static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn,
+ void *buffer)
+{
+ struct hns_roce_v2_cq_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma,
+ HNS_ROCE_CMD_QUERY_CQC, cqn);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying CQ, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
+static int hns_roce_v2_query_mpt(struct hns_roce_dev *hr_dev, u32 key,
+ void *buffer)
+{
+ struct hns_roce_v2_mpt_entry *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ context = mailbox->buf;
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
+ key_to_hw_index(key));
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to process cmd when querying MPT, ret = %d.\n",
+ ret);
+ goto err_mailbox;
+ }
+
+ memcpy(buffer, context, sizeof(*context));
+
+err_mailbox:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return ret;
+}
+
static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
@@ -5781,26 +5831,26 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- ibdev_info(ibdev, "Path migrated succeeded.\n");
+ ibdev_info(ibdev, "path migrated succeeded.\n");
break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- ibdev_warn(ibdev, "Path migration failed.\n");
+ ibdev_warn(ibdev, "path migration failed.\n");
break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- ibdev_warn(ibdev, "Send queue drained.\n");
+ ibdev_warn(ibdev, "send queue drained.\n");
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local work queue 0x%x catast error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
+ ibdev_err(ibdev, "invalid request local work queue 0x%x error.\n",
irq_work->queue_num);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
+ ibdev_err(ibdev, "local access violation work queue 0x%x error, sub_event type is: %d\n",
irq_work->queue_num, irq_work->sub_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
@@ -5822,7 +5872,7 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
ibdev_warn(ibdev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- ibdev_warn(ibdev, "Function level reset.\n");
+ ibdev_warn(ibdev, "function level reset.\n");
break;
case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION:
ibdev_err(ibdev, "xrc domain violation error.\n");
@@ -5846,12 +5896,12 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
if (!irq_work)
return;
- INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
+ INIT_WORK(&irq_work->work, hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
irq_work->queue_num = queue_num;
- queue_work(hr_dev->irq_workq, &(irq_work->work));
+ queue_work(hr_dev->irq_workq, &irq_work->work);
}
static void update_eq_db(struct hns_roce_eq *eq)
@@ -5941,7 +5991,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
case HNS_ROCE_EVENT_TYPE_FLR:
break;
default:
- dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n",
event_type, eq->eqn, eq->cons_index);
break;
}
@@ -6011,7 +6061,7 @@ static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
/* Completion event interrupt */
int_work = hns_roce_v2_ceq_int(hr_dev, eq);
else
- /* Asychronous event interrupt */
+ /* Asynchronous event interrupt */
int_work = hns_roce_v2_aeq_int(hr_dev, eq);
return IRQ_RETVAL(int_work);
@@ -6332,7 +6382,7 @@ static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL,
0);
if (err)
- dev_err(hr_dev->dev, "Failed to alloc EQE mtr, err %d\n", err);
+ dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err);
return err;
}
@@ -6421,7 +6471,7 @@ static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
0, hr_dev->irq_names[j - comp_num],
&eq_table->eq[j - other_num]);
if (ret) {
- dev_err(hr_dev->dev, "Request irq error!\n");
+ dev_err(hr_dev->dev, "request irq error!\n");
goto err_request_failed;
}
}
@@ -6574,10 +6624,6 @@ static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(eq_table->eq);
}
-static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
- .query_cqc_info = hns_roce_v2_query_cqc_info,
-};
-
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
@@ -6618,6 +6664,9 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.init_eq = hns_roce_v2_init_eq_table,
.cleanup_eq = hns_roce_v2_cleanup_eq_table,
.write_srqc = hns_roce_v2_write_srqc,
+ .query_cqc = hns_roce_v2_query_cqc,
+ .query_qpc = hns_roce_v2_query_qpc,
+ .query_mpt = hns_roce_v2_query_mpt,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6649,7 +6698,6 @@ static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
hr_dev->is_vf = id->driver_data;
hr_dev->dev = &handle->pdev->dev;
hr_dev->hw = &hns_roce_hw_v2;
- hr_dev->dfx = &hns_roce_dfx_hw_v2;
hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
hr_dev->odb_offset = hr_dev->sdb_offset;
@@ -6845,7 +6893,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
} else {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
- dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ dev_info(dev, "reset done, RoCE client reinit finished.\n");
}
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 64797109bab6..b11579027e82 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -46,7 +46,6 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
-#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V3_MAX_SQ_INLINE 0x400
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
@@ -55,7 +54,6 @@
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
-#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -65,7 +63,6 @@
#define HNS_ROCE_V2_MAX_QP_DEST_RDMA 128
#define HNS_ROCE_V2_MAX_SQ_DESC_SZ 64
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
-#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
@@ -406,6 +403,7 @@ enum hns_roce_v2_qp_state {
struct hns_roce_v2_qp_context_ex {
__le32 data[64];
};
+
struct hns_roce_v2_qp_context {
__le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba;
@@ -758,7 +756,8 @@ struct hns_roce_v2_mpt_entry {
#define MPT_INNER_PA_VLD MPT_FIELD_LOC(71, 71)
#define MPT_MW_BIND_QPN MPT_FIELD_LOC(95, 72)
#define MPT_BOUND_LKEY MPT_FIELD_LOC(127, 96)
-#define MPT_LEN MPT_FIELD_LOC(191, 128)
+#define MPT_LEN_L MPT_FIELD_LOC(159, 128)
+#define MPT_LEN_H MPT_FIELD_LOC(191, 160)
#define MPT_LKEY MPT_FIELD_LOC(223, 192)
#define MPT_VA MPT_FIELD_LOC(287, 224)
#define MPT_PBL_SIZE MPT_FIELD_LOC(319, 288)
@@ -1173,7 +1172,7 @@ struct hns_roce_query_pf_caps_a {
__le16 max_sq_sg;
__le16 max_sq_inline;
__le16 max_rq_sg;
- __le32 max_extend_sg;
+ __le32 rsv0;
__le16 num_qpc_timer;
__le16 num_cqc_timer;
__le16 max_srq_sges;
@@ -1181,7 +1180,7 @@ struct hns_roce_query_pf_caps_a {
u8 num_other_vectors;
u8 max_sq_desc_sz;
u8 max_rq_desc_sz;
- u8 max_srq_desc_sz;
+ u8 rsv1;
u8 cqe_sz;
};
@@ -1462,9 +1461,6 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer);
-
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
deleted file mode 100644
index f7a75a7cda74..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2_dfx.c
+++ /dev/null
@@ -1,34 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-// Copyright (c) 2019 Hisilicon Limited.
-
-#include "hnae3.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hw_v2.h"
-
-int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn,
- int *buffer)
-{
- struct hns_roce_v2_cq_context *cq_context;
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- cq_context = mailbox->buf;
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC,
- cqn);
- if (ret) {
- dev_err(hr_dev->dev, "QUERY cqc cmd process error\n");
- goto err_mailbox;
- }
-
- memcpy(buffer, cq_context, sizeof(*cq_context));
-
-err_mailbox:
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 4ccb217b2841..dcf89689a4c6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -97,7 +97,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
- dev_err(dev, "Can't find netdev on port(%u)!\n", port);
+ dev_err(dev, "can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
@@ -239,7 +239,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
net_dev = hr_dev->iboe.netdevs[port];
if (!net_dev) {
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "Find netdev %u failed!\n", port);
+ dev_err(dev, "find netdev %u failed!\n", port);
return -EINVAL;
}
@@ -515,7 +515,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
- .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
@@ -566,6 +565,15 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
INIT_RDMA_OBJ_SIZE(ib_xrcd, hns_roce_xrcd, ibxrcd),
};
+static const struct ib_device_ops hns_roce_dev_restrack_ops = {
+ .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+ .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
+ .fill_res_qp_entry = hns_roce_fill_res_qp_entry,
+ .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw,
+ .fill_res_mr_entry = hns_roce_fill_res_mr_entry,
+ .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw,
+};
+
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
int ret;
@@ -605,6 +613,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
+ ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (!hr_dev->iboe.netdevs[i])
continue;
@@ -650,17 +659,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
- hr_dev->caps.num_mtpts, 1);
+ hr_dev->caps.num_mtpts);
if (ret) {
- dev_err(dev, "Failed to init MTPT context memory, aborting.\n");
+ dev_err(dev, "failed to init MTPT context memory, aborting.\n");
return ret;
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table,
HEM_TYPE_QPC, hr_dev->caps.qpc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init QP context memory, aborting.\n");
+ dev_err(dev, "failed to init QP context memory, aborting.\n");
goto err_unmap_dmpt;
}
@@ -668,9 +677,9 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_IRRL,
hr_dev->caps.irrl_entry_sz *
hr_dev->caps.max_qp_init_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
- dev_err(dev, "Failed to init irrl_table memory, aborting.\n");
+ dev_err(dev, "failed to init irrl_table memory, aborting.\n");
goto err_unmap_qp;
}
@@ -680,19 +689,19 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
HEM_TYPE_TRRL,
hr_dev->caps.trrl_entry_sz *
hr_dev->caps.max_qp_dest_rdma,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init trrl_table memory, aborting.\n");
+ "failed to init trrl_table memory, aborting.\n");
goto err_unmap_irrl;
}
}
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
- hr_dev->caps.num_cqs, 1);
+ hr_dev->caps.num_cqs);
if (ret) {
- dev_err(dev, "Failed to init CQ context memory, aborting.\n");
+ dev_err(dev, "failed to init CQ context memory, aborting.\n");
goto err_unmap_trrl;
}
@@ -700,10 +709,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->srq_table.table,
HEM_TYPE_SRQC,
hr_dev->caps.srqc_entry_sz,
- hr_dev->caps.num_srqs, 1);
+ hr_dev->caps.num_srqs);
if (ret) {
dev_err(dev,
- "Failed to init SRQ context memory, aborting.\n");
+ "failed to init SRQ context memory, aborting.\n");
goto err_unmap_cq;
}
}
@@ -713,10 +722,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
&hr_dev->qp_table.sccc_table,
HEM_TYPE_SCCC,
hr_dev->caps.sccc_sz,
- hr_dev->caps.num_qps, 1);
+ hr_dev->caps.num_qps);
if (ret) {
dev_err(dev,
- "Failed to init SCC context memory, aborting.\n");
+ "failed to init SCC context memory, aborting.\n");
goto err_unmap_srq;
}
}
@@ -725,10 +734,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.qpc_timer_bt_num, 1);
+ hr_dev->caps.qpc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init QPC timer memory, aborting.\n");
+ "failed to init QPC timer memory, aborting.\n");
goto err_unmap_ctx;
}
}
@@ -737,10 +746,10 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cqc_timer_table,
HEM_TYPE_CQC_TIMER,
hr_dev->caps.cqc_timer_entry_sz,
- hr_dev->caps.cqc_timer_bt_num, 1);
+ hr_dev->caps.cqc_timer_bt_num);
if (ret) {
dev_err(dev,
- "Failed to init CQC timer memory, aborting.\n");
+ "failed to init CQC timer memory, aborting.\n");
goto err_unmap_qpc_timer;
}
}
@@ -749,7 +758,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->gmv_table,
HEM_TYPE_GMV,
hr_dev->caps.gmv_entry_sz,
- hr_dev->caps.gmv_entry_num, 1);
+ hr_dev->caps.gmv_entry_num);
if (ret) {
dev_err(dev,
"failed to init gmv table memory, ret = %d\n",
@@ -818,13 +827,13 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
if (ret) {
- dev_err(dev, "Failed to allocate priv_uar.\n");
+ dev_err(dev, "failed to allocate priv_uar.\n");
goto err_uar_table_free;
}
ret = hns_roce_init_qp_table(hr_dev);
if (ret) {
- dev_err(dev, "Failed to init qp_table.\n");
+ dev_err(dev, "failed to init qp_table.\n");
goto err_uar_table_free;
}
@@ -837,9 +846,8 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
hns_roce_init_cq_table(hr_dev);
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
hns_roce_init_srq_table(hr_dev);
- }
return 0;
@@ -902,14 +910,14 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
- dev_err(dev, "Init RoCE Command Queue failed!\n");
+ dev_err(dev, "init RoCE Command Queue failed!\n");
return ret;
}
}
ret = hr_dev->hw->hw_profile(hr_dev);
if (ret) {
- dev_err(dev, "Get RoCE engine profile failed!\n");
+ dev_err(dev, "get RoCE engine profile failed!\n");
goto error_failed_cmd_init;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 867972c2a894..845ac7d3831f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -190,7 +190,7 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
int ret;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (mr == NULL)
+ if (!mr)
return ERR_PTR(-ENOMEM);
mr->type = MR_TYPE_DMA;
@@ -249,7 +249,6 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_alloc_pbl;
mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
- mr->ibmr.length = length;
return &mr->ibmr;
@@ -586,7 +585,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
while (offset < end && npage < max_count) {
count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
- offset, &count, NULL);
+ offset, &count);
if (!mtts)
return -ENOBUFS;
@@ -835,7 +834,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
- &mtt_count, NULL);
+ &mtt_count);
if (!mtts || !mtt_count)
goto done;
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 7bee7f6c5e70..f0bd82a18069 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -56,7 +56,7 @@ static void flush_work_handle(struct work_struct *work)
if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) {
ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL);
if (ret)
- dev_err(dev, "Modify QP to error state failed(%d) during CQE flush\n",
+ dev_err(dev, "modify QP to error state failed(%d) during CQE flush\n",
ret);
}
@@ -105,7 +105,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_unlock(&hr_dev->qp_table_xa);
if (!qp) {
- dev_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ dev_warn(dev, "async event for bogus QP %08x\n", qpn);
return;
}
@@ -218,7 +218,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
num = 1;
- hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
bankid = get_least_load_bankid_for_qp(qp_table->bank);
@@ -234,8 +233,6 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
qp_table->bank[bankid].inuse++;
mutex_unlock(&qp_table->bank_mutex);
-
- hr_qp->doorbell_qpn = (u32)num;
}
hr_qp->qpn = num;
@@ -278,7 +275,7 @@ static int hns_roce_qp_store(struct hns_roce_dev *hr_dev,
ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL));
if (ret)
- dev_err(hr_dev->dev, "Failed to xa store for QPC\n");
+ dev_err(hr_dev->dev, "failed to xa store for QPC\n");
else
/* add QP to device's QP list for softwc */
add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq,
@@ -299,14 +296,14 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get QPC table\n");
+ dev_err(dev, "failed to get QPC table\n");
goto err_out;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get IRRL table\n");
+ dev_err(dev, "failed to get IRRL table\n");
goto err_put_qp;
}
@@ -315,7 +312,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->trrl_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get TRRL table\n");
+ dev_err(dev, "failed to get TRRL table\n");
goto err_put_irrl;
}
}
@@ -325,7 +322,7 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
hr_qp->qpn);
if (ret) {
- dev_err(dev, "Failed to get SCC CTX table\n");
+ dev_err(dev, "failed to get SCC CTX table\n");
goto err_put_trrl;
}
}
@@ -1206,7 +1203,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
if (ret)
- ibdev_err(ibdev, "Create QP type 0x%x failed(%d)\n",
+ ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
init_attr->qp_type, ret);
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 24a154d64630..989a2af2e938 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -9,91 +9,223 @@
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
-static int hns_roce_fill_cq(struct sk_buff *msg,
- struct hns_roce_v2_cq_context *context)
+#define MAX_ENTRY_NUM 256
+
+int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
- if (rdma_nl_put_driver_u32(msg, "state",
- hr_reg_read(context, CQC_ARM_ST)))
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+ if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ceqn",
- hr_reg_read(context, CQC_CEQN)))
+ if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cqn",
- hr_reg_read(context, CQC_CQN)))
+ if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
goto err;
- if (rdma_nl_put_driver_u32(msg, "hopnum",
- hr_reg_read(context, CQC_CQE_HOP_NUM)))
+ if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
goto err;
- if (rdma_nl_put_driver_u32(msg, "pi",
- hr_reg_read(context, CQC_CQ_PRODUCER_IDX)))
+ nla_nest_end(msg, table_attr);
+
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+ struct hns_roce_v2_cq_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_cqc)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
+ data[offset++] = hr_reg_read(&context, CQC_SHIFT);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
+ data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
+ data[offset++] = hr_reg_read(&context, CQC_CEQN);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
+ data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
+ data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
+{
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct nlattr *table_attr;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "ci",
- hr_reg_read(context, CQC_CQ_CONSUMER_IDX)))
+ if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "coalesce",
- hr_reg_read(context, CQC_CQ_MAX_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
goto err;
- if (rdma_nl_put_driver_u32(msg, "period",
- hr_reg_read(context, CQC_CQ_PERIOD)))
+ if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
goto err;
- if (rdma_nl_put_driver_u32(msg, "cnt",
- hr_reg_read(context, CQC_CQE_CNT)))
+ if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
goto err;
+ nla_nest_end(msg, table_attr);
+
return 0;
err:
+ nla_nest_cancel(msg, table_attr);
+
return -EMSGSIZE;
}
-int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
- struct ib_cq *ib_cq)
+int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- struct hns_roce_v2_cq_context *context;
- struct nlattr *table_attr;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
+ struct hns_roce_v2_qp_context context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
int ret;
- if (!hr_dev->dfx->query_cqc_info)
+ if (!hr_dev->hw->query_qpc)
return -EINVAL;
- context = kzalloc(sizeof(struct hns_roce_v2_cq_context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
if (ret)
- goto err;
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, QPC_QP_ST);
+ data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
+ data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
+ data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
+ data[offset++] = hr_reg_read(&context, QPC_SRQN);
+ data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
+ data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_RQWS);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
+ data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
+ data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+ return ret;
+}
+
+int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct nlattr *table_attr;
table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
- if (!table_attr) {
- ret = -EMSGSIZE;
+ if (!table_attr)
+ return -EMSGSIZE;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
goto err;
- }
- if (hns_roce_fill_cq(msg, context)) {
- ret = -EMSGSIZE;
- goto err_cancel_table;
- }
+ if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
+ goto err;
+
+ if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
+ hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
+ goto err;
nla_nest_end(msg, table_attr);
- kfree(context);
return 0;
-err_cancel_table:
- nla_nest_cancel(msg, table_attr);
err:
- kfree(context);
+ nla_nest_cancel(msg, table_attr);
+
+ return -EMSGSIZE;
+}
+
+int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
+ struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
+ struct hns_roce_v2_mpt_entry context;
+ u32 data[MAX_ENTRY_NUM] = {};
+ int offset = 0;
+ int ret;
+
+ if (!hr_dev->hw->query_mpt)
+ return -EINVAL;
+
+ ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
+ if (ret)
+ return -EINVAL;
+
+ data[offset++] = hr_reg_read(&context, MPT_ST);
+ data[offset++] = hr_reg_read(&context, MPT_PD);
+ data[offset++] = hr_reg_read(&context, MPT_LKEY);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_L);
+ data[offset++] = hr_reg_read(&context, MPT_LEN_H);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
+ data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
+
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index e03e03082a5f..c1906cab5c8a 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -314,6 +314,7 @@ enum irdma_cqp_op_type {
#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_INVALID_REQUEST 0x0223
#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 4f132c6fb653..ab246447520b 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -138,59 +138,68 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
switch (info->ae_id) {
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BOUNDS_VIOLATION:
case IRDMA_AE_AMP_INVALID_STAG:
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- fallthrough;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
qp->flush_code = FLUSH_PROT_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
qp->flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->flush_code = FLUSH_LOC_LEN_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- case IRDMA_AE_IB_INVALID_REQUEST:
case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
qp->flush_code = FLUSH_REM_ACCESS_ERR;
qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp->flush_code = FLUSH_FATAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
- case IRDMA_AE_DDP_UBE_INVALID_MO:
case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
qp->flush_code = FLUSH_MW_BIND_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
break;
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp->flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
break;
default:
- qp->flush_code = FLUSH_FATAL_ERR;
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
}
}
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 9e7b8ecb137a..517d41a1c289 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -98,6 +98,7 @@ enum irdma_term_mpa_errors {
enum irdma_qp_event_type {
IRDMA_QP_EVENT_CATASTROPHIC,
IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_hw_stats_index_32b {
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ddd0ebbdd7d5..2ef61923c926 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -103,6 +103,7 @@ enum irdma_flush_opcode {
FLUSH_FATAL_ERR,
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index 075defaabee5..8dfc9e154d73 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2479,6 +2479,9 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
case IRDMA_QP_EVENT_ACCESS_ERR:
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
}
ibevent.device = iwqp->ibqp.device;
ibevent.element.qp = &iwqp->ibqp;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b207f5084eb..a22afbb25bc5 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -299,13 +299,19 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
struct ib_device *ibdev = uctx->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_alloc_ucontext_req req;
+ struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
struct irdma_uk_attrs *uk_attrs;
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
return -EINVAL;
@@ -317,7 +323,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
/* GEN_1 legacy support with libi40iw */
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
return -EOPNOTSUPP;
@@ -389,6 +395,7 @@ static void irdma_dealloc_ucontext(struct ib_ucontext *context)
*/
static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
{
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -398,6 +405,9 @@ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
u32 pd_id = 0;
int err;
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
&rf->next_pd);
if (err)
@@ -814,12 +824,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
struct ib_pd *ibpd = ibqp->pd;
struct irdma_pd *iwpd = to_iwpd(ibpd);
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req;
+ struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
@@ -836,6 +848,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (err_code)
return err_code;
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return -EINVAL;
+
sq_size = init_attr->cap.max_send_wr;
rq_size = init_attr->cap.max_recv_wr;
@@ -1120,6 +1136,8 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@@ -1138,6 +1156,13 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info = &iwqp->roce_info;
udp_info = &iwqp->udp_info;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1374,7 +1399,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1426,7 +1451,7 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} else {
iwqp->ibqp_state = attr->qp_state;
}
- if (udata && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
ucontext = rdma_udata_to_drv_context(udata,
@@ -1466,6 +1491,8 @@ exit:
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
@@ -1480,6 +1507,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
int err;
unsigned long flags;
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
return -EOPNOTSUPP;
@@ -1565,7 +1599,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
case IB_QPS_RESET:
if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
spin_unlock_irqrestore(&iwqp->lock, flags);
- if (udata) {
+ if (udata && udata->inlen) {
if (ib_copy_from_udata(&ureq, udata,
min(sizeof(ureq), udata->inlen)))
return -EINVAL;
@@ -1662,7 +1696,7 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
}
}
}
- if (attr_mask & IB_QP_STATE && udata &&
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
@@ -1797,6 +1831,7 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct ib_udata *udata)
{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
struct irdma_cq *iwcq = to_iwcq(ibcq);
struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
struct irdma_cqp_request *cqp_request;
@@ -1819,6 +1854,9 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
IRDMA_FEATURE_CQ_RESIZE))
return -EOPNOTSUPP;
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
if (entries > rf->max_cqe)
return -EINVAL;
@@ -1951,6 +1989,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
struct ib_device *ibdev = ibcq->device;
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
@@ -1969,6 +2009,11 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
+
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
@@ -2746,6 +2791,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
u64 virt, int access,
struct ib_udata *udata)
{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_ucontext *ucontext;
struct irdma_pble_alloc *palloc;
@@ -2763,6 +2809,9 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
region = ib_umem_get(pd->device, start, len, access);
if (IS_ERR(region)) {
@@ -3315,6 +3364,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_RETRY_EXC_ERR;
case FLUSH_MW_BIND_ERR:
return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -4296,12 +4347,16 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
struct rdma_ah_init_attr *attr,
struct ib_udata *udata)
{
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
struct irdma_create_ah_resp uresp;
struct irdma_ah *parent_ah;
int err;
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
+
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 04a67b481608..a40bf58bcdd3 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -439,7 +439,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
goto err_mr;
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
- mr->ibmr.length = length;
mr->ibmr.page_size = 1U << shift;
return &mr->ibmr;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 2a2a9e9afc9d..2211a0be16f3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -907,6 +907,7 @@ static bool devx_is_whitelist_cmd(void *in)
case MLX5_CMD_OP_QUERY_HCA_CAP:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -962,6 +963,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
return true;
default:
return false;
@@ -2158,32 +2160,39 @@ err:
static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
struct uverbs_attr_bundle *attrs,
- struct devx_umem *obj)
+ struct devx_umem *obj, u32 access_flags)
{
u64 addr;
size_t size;
- u32 access;
int err;
if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
return -EFAULT;
- err = uverbs_get_flags32(&access, attrs,
- MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
if (err)
return err;
- err = ib_check_mr_access(&dev->ib_dev, access);
- if (err)
- return err;
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int dmabuf_fd;
+
+ err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
+ if (err)
+ return -EFAULT;
- obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
- if (IS_ERR(obj->umem))
- return PTR_ERR(obj->umem);
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return PTR_ERR(umem_dmabuf);
+ obj->umem = &umem_dmabuf->umem;
+ } else {
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+ }
return 0;
}
@@ -2222,7 +2231,8 @@ static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
struct uverbs_attr_bundle *attrs,
struct devx_umem *obj,
- struct devx_umem_reg_cmd *cmd)
+ struct devx_umem_reg_cmd *cmd,
+ int access)
{
unsigned long pgsz_bitmap;
unsigned int page_size;
@@ -2271,6 +2281,9 @@ static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
MLX5_SET(umem, umem, page_offset,
ib_umem_dma_offset(obj->umem, page_size));
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
mlx5_ib_populate_pas(obj->umem, page_size, mtt,
(obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
MLX5_IB_MTT_READ);
@@ -2288,20 +2301,30 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
&attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
int err;
if (!c->devx_uid)
return -EINVAL;
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
if (!obj)
return -ENOMEM;
- err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
if (err)
goto err_obj_free;
- err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd);
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
if (err)
goto err_umem_release;
@@ -2833,6 +2856,8 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
UVERBS_ATTR_TYPE(u64),
UA_MANDATORY),
+ UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
+ UA_OPTIONAL),
UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
enum ib_access_flags),
UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 883d7c60143e..c669ef6e47e7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -26,7 +26,7 @@
#include <linux/mlx5/eswitch.h>
#include <linux/list.h>
#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
+#include <rdma/ib_umem_odp.h>
#include <rdma/lag.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
@@ -46,7 +46,6 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#include <rdma/ib_umem_odp.h>
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -1826,6 +1825,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
+
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 6191aa833ac2..96ffbbaf0a73 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include <linux/jiffies.h>
@@ -152,6 +151,7 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
for (i = 0; i < 8; i++)
mlx5_write64(&mmio_wqe[i * 2],
bf->bfreg->map + bf->offset + i * 8);
+ io_stop_wc();
bf->offset ^= bf->buf_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 62338f44a30e..4a7f7064bd0e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1563,4 +1563,40 @@ static inline bool rt_supported(int ts_cap)
return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
}
+
+/*
+ * PCI Peer to Peer is a trainwreck. If no switch is present then things
+ * sometimes work, depending on the pci_distance_p2p logic for excluding broken
+ * root complexes. However if a switch is present in the path, then things get
+ * really ugly depending on how the switch is setup. This table assumes that the
+ * root complex is strict and is validating that all req/reps are matches
+ * perfectly - so any scenario where it sees only half the transaction is a
+ * failure.
+ *
+ * CR/RR/DT ATS RO P2P
+ * 00X X X OK
+ * 010 X X fails (request is routed to root but root never sees comp)
+ * 011 0 X fails (request is routed to root but root never sees comp)
+ * 011 1 X OK
+ * 10X X 1 OK
+ * 101 X 0 fails (completion is routed to root but root didn't see req)
+ * 110 X 0 SLOW
+ * 111 0 0 SLOW
+ * 111 1 0 fails (completion is routed to root but root didn't see req)
+ * 111 1 1 OK
+ *
+ * Unfortunately we cannot reliably know if a switch is present or what the
+ * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
+ * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
+ *
+ * For now assume if the umem is a dma_buf then it is P2P.
+ */
+static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
+ struct ib_umem *umem, int access_flags)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
+ return false;
+ return access_flags & IB_ACCESS_RELAXED_ORDERING;
+}
+
#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 129d531bd01b..410cc5fd2523 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -39,9 +39,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
-#include <rdma/ib_verbs.h>
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"
@@ -937,7 +935,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
* cache then synchronously create an uncached one.
*/
if (!ent || ent->limit == 0 ||
- !mlx5r_umr_can_reconfig(dev, 0, access_flags)) {
+ !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
+ mlx5_umem_needs_ats(dev, umem, access_flags)) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex);
@@ -1018,6 +1017,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
MLX5_SET(mkc, mkc, translations_octword_size,
get_octo_len(iova, umem->length, mr->page_shift));
MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
+ if (mlx5_umem_needs_ats(dev, umem, access_flags))
+ MLX5_SET(mkc, mkc, ma_translation_mode, 1);
if (populate) {
MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
get_octo_len(iova, umem->length, mr->page_shift));
@@ -1402,7 +1403,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
}
- mr->ibmr.length = new_umem->length;
mr->ibmr.iova = iova;
mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e305bf1dc6c2..bc97958818bb 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <rdma/ib_umem.h>
#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
#include <linux/dma-buf.h>
@@ -795,7 +794,8 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
- if (mmkey->type == MLX5_MKEY_MW)
+ if (mmkey->type == MLX5_MKEY_MW ||
+ mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
return mmkey->key == key;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index bdf5ed38de22..f330ce895d88 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1252,7 +1252,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MTHCA_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 265a581133dc..56f06c68f31a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1363,7 +1363,7 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
OCRDMA_HBA_ATTRB_PTNUM_MASK)
>> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
- strlcpy(dev->model_number,
+ strscpy(dev->model_number,
hba_attribs->controller_model_number,
sizeof(dev->model_number));
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6861c6384f18..9d2dd135b784 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2124,7 +2124,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcpy(msg,
+ strscpy(msg,
"[Memory BIST test failed, InfiniPath hardware unusable]",
msgl);
/* ignore from now on, so disable until driver reloaded */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index cb2a02d671e2..692b64efad97 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -295,7 +295,7 @@ void qib_free_irq(struct qib_devdata *dd)
* Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and
- * we switch to to a different interrupt vector, which is confusing,
+ * we switch to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 67a1b4562dc2..67923ced6e2d 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -95,7 +95,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int ret;
int off;
int i;
- int flags;
dma_addr_t pa;
unsigned int gup_flags;
struct mm_struct *mm;
@@ -132,8 +131,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
goto out;
}
- flags = IOMMU_READ | IOMMU_CACHE;
- flags |= (writable) ? IOMMU_WRITE : 0;
gup_flags = FOLL_WRITE;
gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;