diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2017-11-01 03:56:19 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2017-11-02 13:32:30 +1000 |
commit | 9202d732e6bc3b46566db3ed25f7a5a3eeaee3c1 (patch) | |
tree | 34489a4c28d1f8dc0ff63491ff111a349e639022 /drivers/gpu/drm | |
parent | 6f4dc18c166cd36d8e9dfd130874060065bedd1c (diff) | |
download | linux-9202d732e6bc3b46566db3ed25f7a5a3eeaee3c1.tar.gz linux-9202d732e6bc3b46566db3ed25f7a5a3eeaee3c1.tar.bz2 linux-9202d732e6bc3b46566db3ed25f7a5a3eeaee3c1.zip |
drm/nouveau/imem/nv50-: use new interfaces for vmm operations
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c | 28 |
2 files changed, 41 insertions, 32 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c index 39f6e8e42339..2f60f0d18aeb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -52,7 +52,7 @@ struct gk20a_instobj { struct nvkm_memory memory; - struct nvkm_mem mem; + struct nvkm_mm_node *mn; struct gk20a_instmem *imem; /* CPU mapping */ @@ -129,13 +129,13 @@ gk20a_instobj_page(struct nvkm_memory *memory) static u64 gk20a_instobj_addr(struct nvkm_memory *memory) { - return gk20a_instobj(memory)->mem.offset; + return (u64)gk20a_instobj(memory)->mn->offset << 12; } static u64 gk20a_instobj_size(struct nvkm_memory *memory) { - return (u64)gk20a_instobj(memory)->mem.size << 12; + return (u64)gk20a_instobj(memory)->mn->length << 12; } /* @@ -284,8 +284,22 @@ gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc) { struct gk20a_instobj *node = gk20a_instobj(memory); - nvkm_vm_map_at(vma, 0, &node->mem); - return 0; + struct nvkm_vmm_map map = { + .memory = &node->memory, + .offset = offset, + .mem = node->mn, + }; + + if (vma->vm) { + struct nvkm_mem mem = { + .mem = node->mn, + .memory = &node->memory, + }; + nvkm_vm_map_at(vma, 0, &mem); + return 0; + } + + return nvkm_vmm_map(vmm, vma, argv, argc, &map); } static void * @@ -298,8 +312,8 @@ gk20a_instobj_dtor_dma(struct nvkm_memory *memory) if (unlikely(!node->base.vaddr)) goto out; - dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, - node->handle, imem->attrs); + dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT, + node->base.vaddr, node->handle, imem->attrs); out: return node; @@ -311,7 +325,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); struct gk20a_instmem *imem = node->base.imem; struct device *dev = imem->base.subdev.device->dev; - struct nvkm_mm_node *r = node->base.mem.mem; + struct nvkm_mm_node *r = node->base.mn; int i; if (unlikely(!r)) @@ -329,7 +343,7 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); /* Unmap pages from GPU address space and free them */ - for (i = 0; i < node->base.mem.size; i++) { + for (i = 0; i < node->base.mn->length; i++) { iommu_unmap(imem->domain, (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, @@ -410,8 +424,7 @@ gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, node->r.offset = node->handle >> 12; node->r.length = (npages << PAGE_SHIFT) >> 12; - node->base.mem.offset = node->handle; - node->base.mem.mem = &node->r; + node->base.mn = &node->r; return 0; } @@ -488,8 +501,7 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, /* IOMMU bit tells that an address is to be resolved through the IOMMU */ r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); - node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; - node->base.mem.mem = r; + node->base.mn = r; return 0; release_area: @@ -537,13 +549,8 @@ gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, node->imem = imem; - /* present memory for being mapped using small pages */ - node->mem.size = size >> 12; - node->mem.memtype = 0; - node->mem.memory = &node->memory; - nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", - size, align, node->mem.offset); + size, align, (u64)node->mn->offset << 12); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 64e2b6e0e8b1..1ba7289684aa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -46,7 +46,7 @@ struct nv50_instobj { struct nvkm_instobj base; struct nv50_instmem *imem; struct nvkm_memory *ram; - struct nvkm_vma bar; + struct nvkm_vma *bar; refcount_t maps; void *map; struct list_head lru; @@ -124,7 +124,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) struct nvkm_memory *memory = &iobj->base.memory; struct nvkm_subdev *subdev = &imem->base.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_vma bar = {}, ebar; + struct nvkm_vma *bar = NULL, *ebar; u64 size = nvkm_memory_size(memory); void *emap; int ret; @@ -134,7 +134,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) * to the possibility of recursion for page table allocation. */ mutex_unlock(&subdev->mutex); - while ((ret = nvkm_vm_get(vmm, size, 12, NV_MEM_ACCESS_RW, &bar))) { + while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) { /* Evict unused mappings, and keep retrying until we either * succeed,or there's no more objects left on the LRU. */ @@ -144,10 +144,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n", nvkm_memory_addr(&eobj->base.memory), nvkm_memory_size(&eobj->base.memory), - eobj->bar.offset); + eobj->bar->addr); list_del_init(&eobj->lru); ebar = eobj->bar; - eobj->bar.node = NULL; + eobj->bar = NULL; emap = eobj->map; eobj->map = NULL; } @@ -155,16 +155,16 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) if (!eobj) break; iounmap(emap); - nvkm_vm_put(&ebar); + nvkm_vmm_put(vmm, &ebar); } if (ret == 0) - ret = nvkm_memory_map(memory, 0, vmm, &bar, NULL, 0); + ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0); mutex_lock(&subdev->mutex); - if (ret || iobj->bar.node) { + if (ret || iobj->bar) { /* We either failed, or another thread beat us. */ mutex_unlock(&subdev->mutex); - nvkm_vm_put(&bar); + nvkm_vmm_put(vmm, &bar); mutex_lock(&subdev->mutex); return; } @@ -172,10 +172,10 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm) /* Make the mapping visible to the host. */ iobj->bar = bar; iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + - (u32)iobj->bar.offset, size); + (u32)iobj->bar->addr, size); if (!iobj->map) { nvkm_warn(subdev, "PRAMIN ioremap failed\n"); - nvkm_vm_put(&iobj->bar); + nvkm_vmm_put(vmm, &iobj->bar); } } @@ -299,7 +299,7 @@ nv50_instobj_dtor(struct nvkm_memory *memory) { struct nv50_instobj *iobj = nv50_instobj(memory); struct nvkm_instmem *imem = &iobj->imem->base; - struct nvkm_vma bar; + struct nvkm_vma *bar; void *map = map; mutex_lock(&imem->subdev.mutex); @@ -310,8 +310,10 @@ nv50_instobj_dtor(struct nvkm_memory *memory) mutex_unlock(&imem->subdev.mutex); if (map) { + struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device); iounmap(map); - nvkm_vm_put(&bar); + if (likely(vmm)) /* Can be NULL during BAR destructor. */ + nvkm_vmm_put(vmm, &bar); } nvkm_memory_unref(&iobj->ram); |