summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2017-11-01 03:56:19 +1000
committerBen Skeggs <bskeggs@redhat.com>2017-11-02 13:32:27 +1000
commitdd12d158eb91442674111a423d88abee4180b5a9 (patch)
tree3bfc219921409378ff882b0e159010f93f40585e /drivers/gpu/drm/nouveau/nvkm
parenteb813999f20097d24310836dfa07a97e2eb0c936 (diff)
downloadlinux-dd12d158eb91442674111a423d88abee4180b5a9.tar.gz
linux-dd12d158eb91442674111a423d88abee4180b5a9.tar.bz2
linux-dd12d158eb91442674111a423d88abee4180b5a9.zip
drm/nouveau/mmu/nv04: implement new vmm backend
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c57
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c58
3 files changed, 58 insertions, 58 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
index 3e6036431754..13438dbc6063 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv04.c
@@ -26,71 +26,14 @@
#include <nvif/class.h>
#define NV04_PDMA_SIZE (128 * 1024 * 1024)
-#define NV04_PDMA_PAGE ( 4 * 1024)
-
-/*******************************************************************************
- * VM map/unmap callbacks
- ******************************************************************************/
-
-static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
- struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt) {
- u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
- u32 phys = (u32)*list++;
- while (cnt && page--) {
- nvkm_wo32(pgt, pte, phys | 3);
- phys += NV04_PDMA_PAGE;
- pte += 4;
- cnt -= 1;
- }
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
-{
- pte = 0x00008 + (pte * 4);
- nvkm_kmap(pgt);
- while (cnt--) {
- nvkm_wo32(pgt, pte, 0x00000000);
- pte += 4;
- }
- nvkm_done(pgt);
-}
-
-static void
-nv04_vm_flush(struct nvkm_vm *vm)
-{
-}
-
-/*******************************************************************************
- * MMU subdev
- ******************************************************************************/
-
-static int
-nv04_mmu_oneinit(struct nvkm_mmu *mmu)
-{
- mmu->vmm->pgt[0].mem[0] = mmu->vmm->pd->pt[0]->memory;
- mmu->vmm->pgt[0].refcount[0] = 1;
- return 0;
-}
const struct nvkm_mmu_func
nv04_mmu = {
- .oneinit = nv04_mmu_oneinit,
.limit = NV04_PDMA_SIZE,
.dma_bits = 32,
.pgt_bits = 32 - 12,
.spg_shift = 12,
.lpg_shift = 12,
- .map_sg = nv04_vm_map_sg,
- .unmap = nv04_vm_unmap,
- .flush = nv04_vm_flush,
.vmm = {{ -1, -1, NVIF_CLASS_VMM_NV04}, nv04_vmm_new, true },
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 042d84c5e950..cbb8d47fbd11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -157,6 +157,7 @@ void nvkm_vmm_ptes_unmap(struct nvkm_vmm *, const struct nvkm_vmm_page *,
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *,
const char *, struct nvkm_vmm **);
+int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *,
struct nvkm_mmu *, u64, u64, void *, u32,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
index 1f874b351a39..0cab1ffc9f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c
@@ -24,8 +24,50 @@
#include <nvif/if000d.h>
#include <nvif/unpack.h>
+static inline void
+nv04_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
+{
+ u32 data = addr | 0x00000003; /* PRESENT, RW. */
+ while (ptes--) {
+ VMM_WO032(pt, vmm, 8 + ptei++ * 4, data);
+ data += 0x00001000;
+ }
+}
+
+static void
+nv04_vmm_pgt_sgl(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+ VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+}
+
+static void
+nv04_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+ u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+#if PAGE_SHIFT == 12
+ nvkm_kmap(pt->memory);
+ while (ptes--)
+ VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003);
+ nvkm_done(pt->memory);
+#else
+ VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte);
+#endif
+}
+
+static void
+nv04_vmm_pgt_unmap(struct nvkm_vmm *vmm,
+ struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+ VMM_FO032(pt, vmm, 8 + (ptei * 4), 0, ptes);
+}
+
static const struct nvkm_vmm_desc_func
nv04_vmm_desc_pgt = {
+ .unmap = nv04_vmm_pgt_unmap,
+ .dma = nv04_vmm_pgt_dma,
+ .sgl = nv04_vmm_pgt_sgl,
};
static const struct nvkm_vmm_desc
@@ -34,8 +76,22 @@ nv04_vmm_desc_12[] = {
{}
};
+int
+nv04_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
+ struct nvkm_vmm_map *map)
+{
+ union {
+ struct nv04_vmm_map_vn vn;
+ } *args = argv;
+ int ret = -ENOSYS;
+ if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
+ VMM_DEBUG(vmm, "args");
+ return ret;
+}
+
static const struct nvkm_vmm_func
nv04_vmm = {
+ .valid = nv04_vmm_valid,
.page = {
{ 12, &nv04_vmm_desc_12[0], NVKM_VMM_PAGE_HOST },
{}
@@ -65,8 +121,8 @@ nv04_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc,
struct lock_class_key *key, const char *name,
struct nvkm_vmm **pvmm)
{
- struct nvkm_vmm *vmm;
struct nvkm_memory *mem;
+ struct nvkm_vmm *vmm;
int ret;
ret = nv04_vmm_new_(&nv04_vmm, mmu, 8, addr, size,