summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorCaleb Sander <csander@purestorage.com>2024-01-31 09:43:14 -0700
committerKeith Busch <kbusch@kernel.org>2024-01-31 17:00:45 -0800
commit7d23e836b00eec96610141549f59e5902dc55fe8 (patch)
tree14e53192e3d2d02a83ae6ba2b9b45916efcbab7f /drivers/nvme
parentf9e9115d0c014dec3278d68823eaff159f98f4d6 (diff)
downloadlinux-7d23e836b00eec96610141549f59e5902dc55fe8.tar.gz
linux-7d23e836b00eec96610141549f59e5902dc55fe8.tar.bz2
linux-7d23e836b00eec96610141549f59e5902dc55fe8.zip
nvme: split out fabrics version of nvme_opcode_str()
nvme_opcode_str() currently supports admin, IO, and fabrics commands. However, fabrics commands aren't allowed for the pci transport. Currently the pci caller passes 0 as the fctype, which means any fabrics command would be displayed as "Property Set". Move fabrics command support into a function nvme_fabrics_opcode_str() and remove the fctype argument to nvme_opcode_str(). This way, a fabrics command will display as "Unknown" for pci. Convert the rdma and tcp transports to use nvme_fabrics_opcode_str(). Signed-off-by: Caleb Sander <csander@purestorage.com> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/nvme.h13
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/rdma.c7
-rw-r--r--drivers/nvme/host/tcp.c6
4 files changed, 17 insertions, 11 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index acdf0e5e7e9a..b70b333a0874 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1164,11 +1164,18 @@ static inline const char *nvme_get_fabrics_opcode_str(u8 opcode)
}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
-static inline const char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+static inline const char *nvme_opcode_str(int qid, u8 opcode)
{
- if (opcode == nvme_fabrics_command)
- return nvme_get_fabrics_opcode_str(fctype);
return qid ? nvme_get_opcode_str(opcode) :
nvme_get_admin_opcode_str(opcode);
}
+
+static inline const char *nvme_fabrics_opcode_str(
+ int qid, const struct nvme_command *cmd)
+{
+ if (nvme_is_fabrics(cmd))
+ return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype);
+
+ return nvme_opcode_str(qid, cmd->common.opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 25eb72779541..e6267a6aa380 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1349,7 +1349,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
dev_warn(dev->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n",
req->tag, nvme_cid(req), opcode,
- nvme_opcode_str(nvmeq->qid, opcode, 0), nvmeq->qid);
+ nvme_opcode_str(nvmeq->qid, opcode), nvmeq->qid);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
goto disable;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 3f393ee20281..6adf2c19a712 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1951,14 +1951,13 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_queue *queue = req->queue;
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- u8 opcode = req->req.cmd->common.opcode;
- u8 fctype = req->req.cmd->fabrics.fctype;
+ struct nvme_command *cmd = req->req.cmd;
int qid = nvme_rdma_queue_idx(queue);
dev_warn(ctrl->ctrl.device,
"I/O tag %d (%04x) opcode %#x (%s) QID %d timeout\n",
- rq->tag, nvme_cid(rq), opcode,
- nvme_opcode_str(qid, opcode, fctype), qid);
+ rq->tag, nvme_cid(rq), cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) {
/*
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4393cf244025..9f8dea2e2c7d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2428,13 +2428,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
- u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ struct nvme_command *cmd = &pdu->cmd;
int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
"I/O tag %d (%04x) type %d opcode %#x (%s) QID %d timeout\n",
- rq->tag, nvme_cid(rq), pdu->hdr.type, opc,
- nvme_opcode_str(qid, opc, fctype), qid);
+ rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
+ nvme_fabrics_opcode_str(qid, cmd), qid);
if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) {
/*