summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c65
-rw-r--r--drivers/nvme/host/fabrics.c13
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/ioctl.c6
-rw-r--r--drivers/nvme/host/nvme.h4
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/host/tcp.c2
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/passthru.c8
-rw-r--r--drivers/nvme/target/tcp.c1
11 files changed, 47 insertions, 61 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 80c656dcbbac..11779be42186 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -609,6 +609,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU);
static inline void nvme_clear_nvme_request(struct request *req)
{
+ nvme_req(req)->status = 0;
nvme_req(req)->retries = 0;
nvme_req(req)->flags = 0;
req->rq_flags |= RQF_DONTPREP;
@@ -631,6 +632,8 @@ static inline void nvme_init_request(struct request *req,
cmd->common.flags &= ~NVME_CMD_SGL_ALL;
req->cmd_flags |= REQ_FAILFAST_DRIVER;
+ if (req->mq_hctx->type == HCTX_TYPE_POLL)
+ req->cmd_flags |= REQ_HIPRI;
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
@@ -1029,29 +1032,23 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
-static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
+/*
+ * Return values:
+ * 0: success
+ * >0: nvme controller's cqe status response
+ * <0: kernel error in lieu of controller response
+ */
+static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
+ bool at_head)
{
- struct completion *waiting = rq->end_io_data;
+ blk_status_t status;
- rq->end_io_data = NULL;
- complete(waiting);
-}
-
-static void nvme_execute_rq_polled(struct request_queue *q,
- struct gendisk *bd_disk, struct request *rq, int at_head)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
-
- WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
-
- rq->cmd_flags |= REQ_HIPRI;
- rq->end_io_data = &wait;
- blk_execute_rq_nowait(bd_disk, rq, at_head, nvme_end_sync_rq);
-
- while (!completion_done(&wait)) {
- blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
- cond_resched();
- }
+ status = blk_execute_rq(disk, rq, at_head);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ return -EINTR;
+ if (nvme_req(rq)->status)
+ return nvme_req(rq)->status;
+ return blk_status_to_errno(status);
}
/*
@@ -1061,7 +1058,7 @@ static void nvme_execute_rq_polled(struct request_queue *q,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll)
+ blk_mq_req_flags_t flags)
{
struct request *req;
int ret;
@@ -1082,16 +1079,9 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- if (poll)
- nvme_execute_rq_polled(req->q, NULL, req, at_head);
- else
- blk_execute_rq(NULL, req, at_head);
- if (result)
+ ret = nvme_execute_rq(NULL, req, at_head);
+ if (result && ret >= 0)
*result = nvme_req(req)->result;
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
out:
blk_mq_free_request(req);
return ret;
@@ -1102,7 +1092,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -1179,18 +1169,21 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
}
}
-void nvme_execute_passthru_rq(struct request *rq)
+int nvme_execute_passthru_rq(struct request *rq)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
struct nvme_ns *ns = rq->q->queuedata;
struct gendisk *disk = ns ? ns->disk : NULL;
u32 effects;
+ int ret;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- blk_execute_rq(disk, rq, 0);
+ ret = nvme_execute_rq(disk, rq, false);
if (effects) /* nothing to be done for zero cmd effects */
nvme_passthru_end(ctrl, effects);
+
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
@@ -1465,7 +1458,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
+ buffer, buflen, 0, NVME_QID_ANY, 0, 0);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -2047,7 +2040,7 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, 0,
- NVME_QID_ANY, 1, 0, false);
+ NVME_QID_ANY, 1, 0);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 1e6a7cc056ca..a5469fd9d4c3 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -154,7 +154,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -200,7 +200,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -245,7 +245,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0, 0,
- NVME_QID_ANY, 0, 0, false);
+ NVME_QID_ANY, 0, 0);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -391,7 +391,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -415,7 +415,6 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* @qid: NVMe I/O queue number for the new I/O connection between
* host and target (note qid == 0 is illegal as this is
* the Admin queue, per NVMe standard).
- * @poll: Whether or not to poll for the completion of the connect cmd.
*
* This function issues a fabrics-protocol connection
* of a NVMe I/O queue (via NVMe Fabrics "Connect" command)
@@ -427,7 +426,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* > 0: NVMe error status code
* < 0: Linux errno error code
*/
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
{
struct nvme_command cmd = { };
struct nvmf_connect_data *data;
@@ -453,7 +452,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c31dad69a773..a146cb903869 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -182,7 +182,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 95aad3fed571..b08a61ca283f 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2346,7 +2346,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
(qsize / 5));
if (ret)
break;
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d93928d1e5bd..305ddd415e45 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -93,11 +93,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
}
}
- nvme_execute_passthru_rq(req);
- if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
- ret = -EINTR;
- else
- ret = nvme_req(req)->status;
+ ret = nvme_execute_passthru_rq(req);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta && !ret && !write) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 75420ceacc10..18ef8dd03a90 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -658,7 +658,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags, bool poll);
+ blk_mq_req_flags_t flags);
int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen,
u32 *result);
@@ -876,7 +876,7 @@ static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
-void nvme_execute_passthru_rq(struct request *rq);
+int nvme_execute_passthru_rq(struct request *rq);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
void nvme_put_ns(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a9e70cefd7ed..7f6b3a991501 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -680,11 +680,10 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{
struct nvme_rdma_queue *queue = &ctrl->queues[idx];
- bool poll = nvme_rdma_poll_queue(queue);
int ret;
if (idx)
- ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c7bd37103cf4..12acfe05cd68 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1574,7 +1574,7 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
int ret;
if (idx)
- ret = nvmf_connect_io_queue(nctrl, idx, false);
+ ret = nvmf_connect_io_queue(nctrl, idx);
else
ret = nvmf_connect_admin_queue(nctrl);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index a5c4a1865026..3a17a7e26bbf 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -337,7 +337,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index fced52de33ce..225cd1ffbe45 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -153,11 +153,10 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
- u16 status;
+ int status;
- nvme_execute_passthru_rq(rq);
+ status = nvme_execute_passthru_rq(rq);
- status = nvme_req(rq)->status;
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
@@ -168,7 +167,8 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
nvmet_passthru_override_id_ns(req);
break;
}
- }
+ } else if (status < 0)
+ status = NVME_SC_INTERNAL;
req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index d8aceef83284..07ee347ea3f3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1497,7 +1497,6 @@ static void nvmet_tcp_state_change(struct sock *sk)
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
/* FALLTHRU */
- sk->sk_user_data = NULL;
nvmet_tcp_schedule_release_queue(queue);
break;
default: