diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2012-01-06 13:52:56 -0700 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2012-01-10 14:54:22 -0500 |
commit | 1c2ad9faaf662b4a525348775deca3ac8e6c35a0 (patch) | |
tree | 3b378e4edca145b682cd4952c105ff9e31b3d465 /drivers/block | |
parent | fe304c43c6d63e29ed4fc46a874d7a74313788c5 (diff) | |
download | linux-1c2ad9faaf662b4a525348775deca3ac8e6c35a0.tar.gz linux-1c2ad9faaf662b4a525348775deca3ac8e6c35a0.tar.bz2 linux-1c2ad9faaf662b4a525348775deca3ac8e6c35a0.zip |
NVMe: Simplify nvme_unmap_user_pages
By using the iod->nents field (the same way other I/O paths do), we can
avoid recalculating the number of sg entries at unmap time, and make
nvme_unmap_user_pages() easier to call.
Also, use the 'write' parameter instead of assuming DMA_FROM_DEVICE.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 71fc9030b4df..3cf82c27a544 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c @@ -1046,6 +1046,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, offset = 0; } sg_mark_end(&sg[i - 1]); + iod->nents = count; err = -ENOMEM; nents = dma_map_sg(&dev->pci_dev->dev, sg, count, @@ -1066,16 +1067,15 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, } static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, - unsigned long addr, int length, struct nvme_iod *iod) + struct nvme_iod *iod) { - struct scatterlist *sg = iod->sg; - int i, count; + int i; - count = DIV_ROUND_UP(offset_in_page(addr) + length, PAGE_SIZE); - dma_unmap_sg(&dev->pci_dev->dev, sg, count, DMA_FROM_DEVICE); + dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, + write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - for (i = 0; i < count; i++) - put_page(sg_page(&sg[i])); + for (i = 0; i < iod->nents; i++) + put_page(sg_page(&iod->sg[i])); } static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) @@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) else status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); - nvme_unmap_user_pages(dev, io.opcode & 1, io.addr, length, iod); + nvme_unmap_user_pages(dev, io.opcode & 1, iod); nvme_free_iod(dev, iod); return status; } @@ -1180,8 +1180,7 @@ static int nvme_user_admin_cmd(struct nvme_ns *ns, status = nvme_submit_admin_cmd(dev, &c, NULL); if (cmd.data_len) { - nvme_unmap_user_pages(dev, cmd.opcode & 1, cmd.addr, - cmd.data_len, iod); + nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); nvme_free_iod(dev, iod); } return status; |