summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorAlexey Kardashevskiy <aik@ozlabs.ru>2015-06-05 16:35:03 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2015-06-11 15:14:56 +1000
commit649354b75dca1ebcb55d8db41e1d6b59ef69ac77 (patch)
tree972d11c3b8a4ab16c3e8f82febb459e4e8af8025 /drivers/vfio
parent3c56e822f8fb0105949d04bda0e549e9d08713cd (diff)
downloadlinux-649354b75dca1ebcb55d8db41e1d6b59ef69ac77.tar.gz
linux-649354b75dca1ebcb55d8db41e1d6b59ef69ac77.tar.bz2
linux-649354b75dca1ebcb55d8db41e1d6b59ef69ac77.zip
vfio: powerpc/spapr: Moving pinning/unpinning to helpers
This is a pretty mechanical patch to make next patches simpler. New tce_iommu_unuse_page() helper does put_page() now but it might skip that after the memory registering patch applied. As we are here, this removes unnecessary checks for a value returned by pfn_to_page() as it cannot possibly return NULL. This moves tce_iommu_disable() later to let tce_iommu_clear() know if the container has been enabled because if it has not been, then put_page() must not be called on TCEs from the TCE table. This situation is not yet possible but it will after KVM acceleration patchset is applied. This changes code to work with physical addresses rather than linear mapping addresses for better code readability. Following patches will add an xchg() callback for an IOMMU table which will accept/return physical addresses (unlike current tce_build()) which will eliminate redundant conversions. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> [aw: for the vfio related changes] Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Gavin Shan <gwshan@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c61
1 files changed, 41 insertions, 20 deletions
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 5bbdf378fd59..cf5d4a159ce9 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -191,69 +191,90 @@ static void tce_iommu_release(void *iommu_data)
struct tce_container *container = iommu_data;
WARN_ON(container->tbl && !container->tbl->it_group);
- tce_iommu_disable(container);
if (container->tbl && container->tbl->it_group)
tce_iommu_detach_group(iommu_data, container->tbl->it_group);
+ tce_iommu_disable(container);
mutex_destroy(&container->lock);
kfree(container);
}
+static void tce_iommu_unuse_page(struct tce_container *container,
+ unsigned long oldtce)
+{
+ struct page *page;
+
+ if (!(oldtce & (TCE_PCI_READ | TCE_PCI_WRITE)))
+ return;
+
+ page = pfn_to_page(oldtce >> PAGE_SHIFT);
+
+ if (oldtce & TCE_PCI_WRITE)
+ SetPageDirty(page);
+
+ put_page(page);
+}
+
static int tce_iommu_clear(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
unsigned long oldtce;
- struct page *page;
for ( ; pages; --pages, ++entry) {
oldtce = iommu_clear_tce(tbl, entry);
if (!oldtce)
continue;
- page = pfn_to_page(oldtce >> PAGE_SHIFT);
- WARN_ON(!page);
- if (page) {
- if (oldtce & TCE_PCI_WRITE)
- SetPageDirty(page);
- put_page(page);
- }
+ tce_iommu_unuse_page(container, oldtce);
}
return 0;
}
+static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
+{
+ struct page *page = NULL;
+ enum dma_data_direction direction = iommu_tce_direction(tce);
+
+ if (get_user_pages_fast(tce & PAGE_MASK, 1,
+ direction != DMA_TO_DEVICE, &page) != 1)
+ return -EFAULT;
+
+ *hpa = __pa((unsigned long) page_address(page));
+
+ return 0;
+}
+
static long tce_iommu_build(struct tce_container *container,
struct iommu_table *tbl,
unsigned long entry, unsigned long tce, unsigned long pages)
{
long i, ret = 0;
- struct page *page = NULL;
- unsigned long hva;
+ struct page *page;
+ unsigned long hpa;
enum dma_data_direction direction = iommu_tce_direction(tce);
for (i = 0; i < pages; ++i) {
unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
- ret = get_user_pages_fast(tce & PAGE_MASK, 1,
- direction != DMA_TO_DEVICE, &page);
- if (unlikely(ret != 1)) {
- ret = -EFAULT;
+ ret = tce_iommu_use_page(tce, &hpa);
+ if (ret)
break;
- }
+ page = pfn_to_page(hpa >> PAGE_SHIFT);
if (!tce_page_is_contained(page, tbl->it_page_shift)) {
ret = -EPERM;
break;
}
- hva = (unsigned long) page_address(page) + offset;
-
- ret = iommu_tce_build(tbl, entry + i, hva, direction);
+ hpa |= offset;
+ ret = iommu_tce_build(tbl, entry + i, (unsigned long) __va(hpa),
+ direction);
if (ret) {
- put_page(page);
+ tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
__func__, entry << tbl->it_page_shift,
tce, ret);