diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2019-08-07 17:51:20 +1000 |
---|---|---|
committer | Sagi Grimberg <sagi@grimberg.me> | 2019-08-29 12:55:02 -0700 |
commit | c1e0cc7e1d319936271dfdd0a9405275c8091381 (patch) | |
tree | 56ffaf9c071de640780d7f4e703262b9cfddeee0 /drivers/nvme | |
parent | 8a1d09a668e7e245b8e4131cc9017c63fee02ee5 (diff) | |
download | linux-c1e0cc7e1d319936271dfdd0a9405275c8091381.tar.gz linux-c1e0cc7e1d319936271dfdd0a9405275c8091381.tar.bz2 linux-c1e0cc7e1d319936271dfdd0a9405275c8091381.zip |
nvme-pci: Add support for variable IO SQ element size
The size of a submission queue element should always be 6 (64 bytes)
by spec.
However some controllers such as Apple's are not properly implementing
the standard and require a different size.
This provides the ground work for the subsequent quirks for these
controllers.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: Minwoo Im <minwoo.im.dev@gmail.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/pci.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a09e6c4e3434..eee93e138c2c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -28,7 +28,7 @@ #include "trace.h" #include "nvme.h" -#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command)) +#define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) @@ -100,6 +100,7 @@ struct nvme_dev { unsigned io_queues[HCTX_MAX_TYPES]; unsigned int num_vecs; int q_depth; + int io_sqes; u32 db_stride; void __iomem *bar; unsigned long bar_mapped_size; @@ -162,7 +163,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) struct nvme_queue { struct nvme_dev *dev; spinlock_t sq_lock; - struct nvme_command *sq_cmds; + void *sq_cmds; /* only used for poll queues: */ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; volatile struct nvme_completion *cqes; @@ -178,6 +179,7 @@ struct nvme_queue { u16 last_cq_head; u16 qid; u8 cq_phase; + u8 sqes; unsigned long flags; #define NVMEQ_ENABLED 0 #define NVMEQ_SQ_CMB 1 @@ -488,7 +490,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, bool write_sq) { spin_lock(&nvmeq->sq_lock); - memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd)); + memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), + cmd, sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; nvme_write_sq_db(nvmeq, write_sq); @@ -1465,6 +1468,7 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) if (dev->ctrl.queue_count > qid) return 0; + nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; nvmeq->q_depth = depth; nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), &nvmeq->cq_dma_addr, GFP_KERNEL); @@ -2317,6 +2321,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; + dev->io_sqes = NVME_NVM_IOSQES; /* * Temporary fix for the Apple controller found in the MacBook8,1 and |